req               141 arch/arm/common/locomo.c 	int req, i;
req               147 arch/arm/common/locomo.c 	req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
req               149 arch/arm/common/locomo.c 	if (req) {
req               155 arch/arm/common/locomo.c 			if (req & (0x0100 << i)) {
req               177 arch/arm/crypto/aes-ce-glue.c static int ecb_encrypt(struct skcipher_request *req)
req               179 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               185 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               197 arch/arm/crypto/aes-ce-glue.c static int ecb_decrypt(struct skcipher_request *req)
req               199 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               205 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               217 arch/arm/crypto/aes-ce-glue.c static int cbc_encrypt_walk(struct skcipher_request *req,
req               220 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               236 arch/arm/crypto/aes-ce-glue.c static int cbc_encrypt(struct skcipher_request *req)
req               241 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               244 arch/arm/crypto/aes-ce-glue.c 	return cbc_encrypt_walk(req, &walk);
req               247 arch/arm/crypto/aes-ce-glue.c static int cbc_decrypt_walk(struct skcipher_request *req,
req               250 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               266 arch/arm/crypto/aes-ce-glue.c static int cbc_decrypt(struct skcipher_request *req)
req               271 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               274 arch/arm/crypto/aes-ce-glue.c 	return cbc_decrypt_walk(req, &walk);
req               277 arch/arm/crypto/aes-ce-glue.c static int cts_cbc_encrypt(struct skcipher_request *req)
req               279 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               281 arch/arm/crypto/aes-ce-glue.c 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
req               282 arch/arm/crypto/aes-ce-glue.c 	struct scatterlist *src = req->src, *dst = req->dst;
req               289 arch/arm/crypto/aes-ce-glue.c 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
req               292 arch/arm/crypto/aes-ce-glue.c 	if (req->cryptlen <= AES_BLOCK_SIZE) {
req               293 arch/arm/crypto/aes-ce-glue.c 		if (req->cryptlen < AES_BLOCK_SIZE)
req               299 arch/arm/crypto/aes-ce-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               301 arch/arm/crypto/aes-ce-glue.c 					   req->iv);
req               308 arch/arm/crypto/aes-ce-glue.c 		if (req->cryptlen == AES_BLOCK_SIZE)
req               311 arch/arm/crypto/aes-ce-glue.c 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
req               312 arch/arm/crypto/aes-ce-glue.c 		if (req->dst != req->src)
req               313 arch/arm/crypto/aes-ce-glue.c 			dst = scatterwalk_ffwd(sg_dst, req->dst,
req               319 arch/arm/crypto/aes-ce-glue.c 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req               320 arch/arm/crypto/aes-ce-glue.c 				   req->iv);
req               335 arch/arm/crypto/aes-ce-glue.c static int cts_cbc_decrypt(struct skcipher_request *req)
req               337 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               339 arch/arm/crypto/aes-ce-glue.c 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
req               340 arch/arm/crypto/aes-ce-glue.c 	struct scatterlist *src = req->src, *dst = req->dst;
req               347 arch/arm/crypto/aes-ce-glue.c 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
req               350 arch/arm/crypto/aes-ce-glue.c 	if (req->cryptlen <= AES_BLOCK_SIZE) {
req               351 arch/arm/crypto/aes-ce-glue.c 		if (req->cryptlen < AES_BLOCK_SIZE)
req               357 arch/arm/crypto/aes-ce-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               359 arch/arm/crypto/aes-ce-glue.c 					   req->iv);
req               366 arch/arm/crypto/aes-ce-glue.c 		if (req->cryptlen == AES_BLOCK_SIZE)
req               369 arch/arm/crypto/aes-ce-glue.c 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
req               370 arch/arm/crypto/aes-ce-glue.c 		if (req->dst != req->src)
req               371 arch/arm/crypto/aes-ce-glue.c 			dst = scatterwalk_ffwd(sg_dst, req->dst,
req               377 arch/arm/crypto/aes-ce-glue.c 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req               378 arch/arm/crypto/aes-ce-glue.c 				   req->iv);
req               393 arch/arm/crypto/aes-ce-glue.c static int ctr_encrypt(struct skcipher_request *req)
req               395 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               400 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               446 arch/arm/crypto/aes-ce-glue.c static int ctr_encrypt_sync(struct skcipher_request *req)
req               449 arch/arm/crypto/aes-ce-glue.c 		return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
req               451 arch/arm/crypto/aes-ce-glue.c 	return ctr_encrypt(req);
req               454 arch/arm/crypto/aes-ce-glue.c static int xts_encrypt(struct skcipher_request *req)
req               456 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               459 arch/arm/crypto/aes-ce-glue.c 	int tail = req->cryptlen % AES_BLOCK_SIZE;
req               465 arch/arm/crypto/aes-ce-glue.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req               468 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               471 arch/arm/crypto/aes-ce-glue.c 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
req               478 arch/arm/crypto/aes-ce-glue.c 					      skcipher_request_flags(req),
req               480 arch/arm/crypto/aes-ce-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               482 arch/arm/crypto/aes-ce-glue.c 					   req->iv);
req               483 arch/arm/crypto/aes-ce-glue.c 		req = &subreq;
req               484 arch/arm/crypto/aes-ce-glue.c 		err = skcipher_walk_virt(&walk, req, false);
req               506 arch/arm/crypto/aes-ce-glue.c 	dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
req               507 arch/arm/crypto/aes-ce-glue.c 	if (req->dst != req->src)
req               508 arch/arm/crypto/aes-ce-glue.c 		dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
req               510 arch/arm/crypto/aes-ce-glue.c 	skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req               511 arch/arm/crypto/aes-ce-glue.c 				   req->iv);
req               513 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               526 arch/arm/crypto/aes-ce-glue.c static int xts_decrypt(struct skcipher_request *req)
req               528 arch/arm/crypto/aes-ce-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               531 arch/arm/crypto/aes-ce-glue.c 	int tail = req->cryptlen % AES_BLOCK_SIZE;
req               537 arch/arm/crypto/aes-ce-glue.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req               540 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               543 arch/arm/crypto/aes-ce-glue.c 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
req               550 arch/arm/crypto/aes-ce-glue.c 					      skcipher_request_flags(req),
req               552 arch/arm/crypto/aes-ce-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               554 arch/arm/crypto/aes-ce-glue.c 					   req->iv);
req               555 arch/arm/crypto/aes-ce-glue.c 		req = &subreq;
req               556 arch/arm/crypto/aes-ce-glue.c 		err = skcipher_walk_virt(&walk, req, false);
req               578 arch/arm/crypto/aes-ce-glue.c 	dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
req               579 arch/arm/crypto/aes-ce-glue.c 	if (req->dst != req->src)
req               580 arch/arm/crypto/aes-ce-glue.c 		dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
req               582 arch/arm/crypto/aes-ce-glue.c 	skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req               583 arch/arm/crypto/aes-ce-glue.c 				   req->iv);
req               585 arch/arm/crypto/aes-ce-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req                86 arch/arm/crypto/aes-neonbs-glue.c static int __ecb_crypt(struct skcipher_request *req,
req                90 arch/arm/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                95 arch/arm/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               115 arch/arm/crypto/aes-neonbs-glue.c static int ecb_encrypt(struct skcipher_request *req)
req               117 arch/arm/crypto/aes-neonbs-glue.c 	return __ecb_crypt(req, aesbs_ecb_encrypt);
req               120 arch/arm/crypto/aes-neonbs-glue.c static int ecb_decrypt(struct skcipher_request *req)
req               122 arch/arm/crypto/aes-neonbs-glue.c 	return __ecb_crypt(req, aesbs_ecb_decrypt);
req               152 arch/arm/crypto/aes-neonbs-glue.c static int cbc_encrypt(struct skcipher_request *req)
req               154 arch/arm/crypto/aes-neonbs-glue.c 	return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
req               157 arch/arm/crypto/aes-neonbs-glue.c static int cbc_decrypt(struct skcipher_request *req)
req               159 arch/arm/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               164 arch/arm/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               220 arch/arm/crypto/aes-neonbs-glue.c static int ctr_encrypt(struct skcipher_request *req)
req               222 arch/arm/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               228 arch/arm/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               277 arch/arm/crypto/aes-neonbs-glue.c static int ctr_encrypt_sync(struct skcipher_request *req)
req               280 arch/arm/crypto/aes-neonbs-glue.c 		return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
req               282 arch/arm/crypto/aes-neonbs-glue.c 	return ctr_encrypt(req);
req               329 arch/arm/crypto/aes-neonbs-glue.c static int __xts_crypt(struct skcipher_request *req, bool encrypt,
req               333 arch/arm/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               335 arch/arm/crypto/aes-neonbs-glue.c 	int tail = req->cryptlen % AES_BLOCK_SIZE;
req               341 arch/arm/crypto/aes-neonbs-glue.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req               347 arch/arm/crypto/aes-neonbs-glue.c 					      skcipher_request_flags(req),
req               349 arch/arm/crypto/aes-neonbs-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               350 arch/arm/crypto/aes-neonbs-glue.c 					   req->cryptlen - tail, req->iv);
req               351 arch/arm/crypto/aes-neonbs-glue.c 		req = &subreq;
req               354 arch/arm/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               382 arch/arm/crypto/aes-neonbs-glue.c 	scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
req               385 arch/arm/crypto/aes-neonbs-glue.c 	scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0);
req               387 arch/arm/crypto/aes-neonbs-glue.c 	crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
req               394 arch/arm/crypto/aes-neonbs-glue.c 	crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
req               396 arch/arm/crypto/aes-neonbs-glue.c 	scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
req               401 arch/arm/crypto/aes-neonbs-glue.c static int xts_encrypt(struct skcipher_request *req)
req               403 arch/arm/crypto/aes-neonbs-glue.c 	return __xts_crypt(req, true, aesbs_xts_encrypt);
req               406 arch/arm/crypto/aes-neonbs-glue.c static int xts_decrypt(struct skcipher_request *req)
req               408 arch/arm/crypto/aes-neonbs-glue.c 	return __xts_crypt(req, false, aesbs_xts_decrypt);
req                65 arch/arm/crypto/chacha-neon-glue.c static int chacha_neon_stream_xor(struct skcipher_request *req,
req                72 arch/arm/crypto/chacha-neon-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req                92 arch/arm/crypto/chacha-neon-glue.c static int chacha_neon(struct skcipher_request *req)
req                94 arch/arm/crypto/chacha-neon-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                97 arch/arm/crypto/chacha-neon-glue.c 	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
req                98 arch/arm/crypto/chacha-neon-glue.c 		return crypto_chacha_crypt(req);
req               100 arch/arm/crypto/chacha-neon-glue.c 	return chacha_neon_stream_xor(req, ctx, req->iv);
req               103 arch/arm/crypto/chacha-neon-glue.c static int xchacha_neon(struct skcipher_request *req)
req               105 arch/arm/crypto/chacha-neon-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               111 arch/arm/crypto/chacha-neon-glue.c 	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
req               112 arch/arm/crypto/chacha-neon-glue.c 		return crypto_xchacha_crypt(req);
req               114 arch/arm/crypto/chacha-neon-glue.c 	crypto_chacha_init(state, ctx, req->iv);
req               121 arch/arm/crypto/chacha-neon-glue.c 	memcpy(&real_iv[0], req->iv + 24, 8);
req               122 arch/arm/crypto/chacha-neon-glue.c 	memcpy(&real_iv[8], req->iv + 16, 8);
req               123 arch/arm/crypto/chacha-neon-glue.c 	return chacha_neon_stream_xor(req, &subctx, real_iv);
req               204 arch/arm/crypto/ghash-ce-glue.c static int ghash_async_init(struct ahash_request *req)
req               206 arch/arm/crypto/ghash-ce-glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               208 arch/arm/crypto/ghash-ce-glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               217 arch/arm/crypto/ghash-ce-glue.c static int ghash_async_update(struct ahash_request *req)
req               219 arch/arm/crypto/ghash-ce-glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               220 arch/arm/crypto/ghash-ce-glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               226 arch/arm/crypto/ghash-ce-glue.c 		memcpy(cryptd_req, req, sizeof(*req));
req               231 arch/arm/crypto/ghash-ce-glue.c 		return shash_ahash_update(req, desc);
req               235 arch/arm/crypto/ghash-ce-glue.c static int ghash_async_final(struct ahash_request *req)
req               237 arch/arm/crypto/ghash-ce-glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               238 arch/arm/crypto/ghash-ce-glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               244 arch/arm/crypto/ghash-ce-glue.c 		memcpy(cryptd_req, req, sizeof(*req));
req               249 arch/arm/crypto/ghash-ce-glue.c 		return crypto_shash_final(desc, req->result);
req               253 arch/arm/crypto/ghash-ce-glue.c static int ghash_async_digest(struct ahash_request *req)
req               255 arch/arm/crypto/ghash-ce-glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               257 arch/arm/crypto/ghash-ce-glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               262 arch/arm/crypto/ghash-ce-glue.c 		memcpy(cryptd_req, req, sizeof(*req));
req               270 arch/arm/crypto/ghash-ce-glue.c 		return shash_ahash_digest(req, desc);
req               274 arch/arm/crypto/ghash-ce-glue.c static int ghash_async_import(struct ahash_request *req, const void *in)
req               276 arch/arm/crypto/ghash-ce-glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               277 arch/arm/crypto/ghash-ce-glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               286 arch/arm/crypto/ghash-ce-glue.c static int ghash_async_export(struct ahash_request *req, void *out)
req               288 arch/arm/crypto/ghash-ce-glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               343 arch/arm/include/asm/assembler.h .macro safe_svcmode_maskall reg:req
req               442 arch/arm/include/asm/assembler.h 	.macro	string name:req, string
req                19 arch/arm/include/asm/uaccess-asm.h 	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
req                31 arch/arm/include/asm/uaccess-asm.h 	.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
req               119 arch/arm/mach-rpc/ecard.c static void ecard_task_reset(struct ecard_request *req)
req               121 arch/arm/mach-rpc/ecard.c 	struct expansion_card *ec = req->ec;
req               133 arch/arm/mach-rpc/ecard.c static void ecard_task_readbytes(struct ecard_request *req)
req               135 arch/arm/mach-rpc/ecard.c 	struct expansion_card *ec = req->ec;
req               136 arch/arm/mach-rpc/ecard.c 	unsigned char *buf = req->buffer;
req               137 arch/arm/mach-rpc/ecard.c 	unsigned int len = req->length;
req               138 arch/arm/mach-rpc/ecard.c 	unsigned int off = req->address;
req               186 arch/arm/mach-rpc/ecard.c 		if (!req->use_loader || !ec->loader) {
req               274 arch/arm/mach-rpc/ecard.c 		struct ecard_request *req;
req               278 arch/arm/mach-rpc/ecard.c 		req = xchg(&ecard_req, NULL);
req               279 arch/arm/mach-rpc/ecard.c 		if (req != NULL) {
req               280 arch/arm/mach-rpc/ecard.c 			req->fn(req);
req               281 arch/arm/mach-rpc/ecard.c 			complete(req->complete);
req               292 arch/arm/mach-rpc/ecard.c static void ecard_call(struct ecard_request *req)
req               296 arch/arm/mach-rpc/ecard.c 	req->complete = &completion;
req               299 arch/arm/mach-rpc/ecard.c 	ecard_req = req;
req               314 arch/arm/mach-rpc/ecard.c 	struct ecard_request req;
req               316 arch/arm/mach-rpc/ecard.c 	req.fn		= ecard_task_readbytes;
req               317 arch/arm/mach-rpc/ecard.c 	req.ec		= ec;
req               318 arch/arm/mach-rpc/ecard.c 	req.address	= off;
req               319 arch/arm/mach-rpc/ecard.c 	req.length	= len;
req               320 arch/arm/mach-rpc/ecard.c 	req.use_loader	= useld;
req               321 arch/arm/mach-rpc/ecard.c 	req.buffer	= addr;
req               323 arch/arm/mach-rpc/ecard.c 	ecard_call(&req);
req              1084 arch/arm/mach-rpc/ecard.c 	struct ecard_request req;
req              1096 arch/arm/mach-rpc/ecard.c 		req.fn = ecard_task_reset;
req              1097 arch/arm/mach-rpc/ecard.c 		req.ec = ec;
req              1098 arch/arm/mach-rpc/ecard.c 		ecard_call(&req);
req               154 arch/arm/plat-omap/dma.c static inline void set_gdma_dev(int req, int dev)
req               156 arch/arm/plat-omap/dma.c 	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
req               157 arch/arm/plat-omap/dma.c 	int shift = ((req - 1) % 5) * 6;
req               166 arch/arm/plat-omap/dma.c #define set_gdma_dev(req, dev)	do {} while (0)
req                67 arch/arm64/crypto/aes-ce-ccm-glue.c static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
req                69 arch/arm64/crypto/aes-ce-ccm-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req                71 arch/arm64/crypto/aes-ce-ccm-glue.c 	u32 l = req->iv[0] + 1;
req                88 arch/arm64/crypto/aes-ce-ccm-glue.c 	memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
req                98 arch/arm64/crypto/aes-ce-ccm-glue.c 	if (req->assoclen)
req               101 arch/arm64/crypto/aes-ce-ccm-glue.c 	memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
req               140 arch/arm64/crypto/aes-ce-ccm-glue.c static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
req               142 arch/arm64/crypto/aes-ce-ccm-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               146 arch/arm64/crypto/aes-ce-ccm-glue.c 	u32 len = req->assoclen;
req               160 arch/arm64/crypto/aes-ce-ccm-glue.c 	scatterwalk_start(&walk, req->src);
req               228 arch/arm64/crypto/aes-ce-ccm-glue.c static int ccm_encrypt(struct aead_request *req)
req               230 arch/arm64/crypto/aes-ce-ccm-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               235 arch/arm64/crypto/aes-ce-ccm-glue.c 	u32 len = req->cryptlen;
req               238 arch/arm64/crypto/aes-ce-ccm-glue.c 	err = ccm_init_mac(req, mac, len);
req               242 arch/arm64/crypto/aes-ce-ccm-glue.c 	if (req->assoclen)
req               243 arch/arm64/crypto/aes-ce-ccm-glue.c 		ccm_calculate_auth_mac(req, mac);
req               246 arch/arm64/crypto/aes-ce-ccm-glue.c 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
req               248 arch/arm64/crypto/aes-ce-ccm-glue.c 	err = skcipher_walk_aead_encrypt(&walk, req, false);
req               279 arch/arm64/crypto/aes-ce-ccm-glue.c 	scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
req               285 arch/arm64/crypto/aes-ce-ccm-glue.c static int ccm_decrypt(struct aead_request *req)
req               287 arch/arm64/crypto/aes-ce-ccm-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               293 arch/arm64/crypto/aes-ce-ccm-glue.c 	u32 len = req->cryptlen - authsize;
req               296 arch/arm64/crypto/aes-ce-ccm-glue.c 	err = ccm_init_mac(req, mac, len);
req               300 arch/arm64/crypto/aes-ce-ccm-glue.c 	if (req->assoclen)
req               301 arch/arm64/crypto/aes-ce-ccm-glue.c 		ccm_calculate_auth_mac(req, mac);
req               304 arch/arm64/crypto/aes-ce-ccm-glue.c 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
req               306 arch/arm64/crypto/aes-ce-ccm-glue.c 	err = skcipher_walk_aead_decrypt(&walk, req, false);
req               338 arch/arm64/crypto/aes-ce-ccm-glue.c 	scatterwalk_map_and_copy(buf, req->src,
req               339 arch/arm64/crypto/aes-ce-ccm-glue.c 				 req->assoclen + req->cryptlen - authsize,
req               191 arch/arm64/crypto/aes-glue.c static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
req               193 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               199 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               211 arch/arm64/crypto/aes-glue.c static int __maybe_unused ecb_decrypt(struct skcipher_request *req)
req               213 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               219 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               231 arch/arm64/crypto/aes-glue.c static int cbc_encrypt_walk(struct skcipher_request *req,
req               234 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               249 arch/arm64/crypto/aes-glue.c static int __maybe_unused cbc_encrypt(struct skcipher_request *req)
req               254 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               257 arch/arm64/crypto/aes-glue.c 	return cbc_encrypt_walk(req, &walk);
req               260 arch/arm64/crypto/aes-glue.c static int cbc_decrypt_walk(struct skcipher_request *req,
req               263 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               278 arch/arm64/crypto/aes-glue.c static int __maybe_unused cbc_decrypt(struct skcipher_request *req)
req               283 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               286 arch/arm64/crypto/aes-glue.c 	return cbc_decrypt_walk(req, &walk);
req               289 arch/arm64/crypto/aes-glue.c static int cts_cbc_encrypt(struct skcipher_request *req)
req               291 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               294 arch/arm64/crypto/aes-glue.c 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
req               295 arch/arm64/crypto/aes-glue.c 	struct scatterlist *src = req->src, *dst = req->dst;
req               301 arch/arm64/crypto/aes-glue.c 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
req               304 arch/arm64/crypto/aes-glue.c 	if (req->cryptlen <= AES_BLOCK_SIZE) {
req               305 arch/arm64/crypto/aes-glue.c 		if (req->cryptlen < AES_BLOCK_SIZE)
req               311 arch/arm64/crypto/aes-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               313 arch/arm64/crypto/aes-glue.c 					   req->iv);
req               320 arch/arm64/crypto/aes-glue.c 		if (req->cryptlen == AES_BLOCK_SIZE)
req               323 arch/arm64/crypto/aes-glue.c 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
req               324 arch/arm64/crypto/aes-glue.c 		if (req->dst != req->src)
req               325 arch/arm64/crypto/aes-glue.c 			dst = scatterwalk_ffwd(sg_dst, req->dst,
req               331 arch/arm64/crypto/aes-glue.c 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req               332 arch/arm64/crypto/aes-glue.c 				   req->iv);
req               346 arch/arm64/crypto/aes-glue.c static int cts_cbc_decrypt(struct skcipher_request *req)
req               348 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               351 arch/arm64/crypto/aes-glue.c 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
req               352 arch/arm64/crypto/aes-glue.c 	struct scatterlist *src = req->src, *dst = req->dst;
req               358 arch/arm64/crypto/aes-glue.c 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
req               361 arch/arm64/crypto/aes-glue.c 	if (req->cryptlen <= AES_BLOCK_SIZE) {
req               362 arch/arm64/crypto/aes-glue.c 		if (req->cryptlen < AES_BLOCK_SIZE)
req               368 arch/arm64/crypto/aes-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               370 arch/arm64/crypto/aes-glue.c 					   req->iv);
req               377 arch/arm64/crypto/aes-glue.c 		if (req->cryptlen == AES_BLOCK_SIZE)
req               380 arch/arm64/crypto/aes-glue.c 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
req               381 arch/arm64/crypto/aes-glue.c 		if (req->dst != req->src)
req               382 arch/arm64/crypto/aes-glue.c 			dst = scatterwalk_ffwd(sg_dst, req->dst,
req               388 arch/arm64/crypto/aes-glue.c 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req               389 arch/arm64/crypto/aes-glue.c 				   req->iv);
req               419 arch/arm64/crypto/aes-glue.c static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req)
req               421 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               427 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               434 arch/arm64/crypto/aes-glue.c 				      req->iv, ctx->key2.key_enc);
req               438 arch/arm64/crypto/aes-glue.c 	return err ?: cbc_encrypt_walk(req, &walk);
req               441 arch/arm64/crypto/aes-glue.c static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
req               443 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               449 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               456 arch/arm64/crypto/aes-glue.c 				      req->iv, ctx->key2.key_enc);
req               460 arch/arm64/crypto/aes-glue.c 	return err ?: cbc_decrypt_walk(req, &walk);
req               463 arch/arm64/crypto/aes-glue.c static int ctr_encrypt(struct skcipher_request *req)
req               465 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               471 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               517 arch/arm64/crypto/aes-glue.c static int __maybe_unused ctr_encrypt_sync(struct skcipher_request *req)
req               520 arch/arm64/crypto/aes-glue.c 		return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
req               522 arch/arm64/crypto/aes-glue.c 	return ctr_encrypt(req);
req               525 arch/arm64/crypto/aes-glue.c static int __maybe_unused xts_encrypt(struct skcipher_request *req)
req               527 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               530 arch/arm64/crypto/aes-glue.c 	int tail = req->cryptlen % AES_BLOCK_SIZE;
req               536 arch/arm64/crypto/aes-glue.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req               539 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               542 arch/arm64/crypto/aes-glue.c 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
req               549 arch/arm64/crypto/aes-glue.c 					      skcipher_request_flags(req),
req               551 arch/arm64/crypto/aes-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               553 arch/arm64/crypto/aes-glue.c 					   req->iv);
req               554 arch/arm64/crypto/aes-glue.c 		req = &subreq;
req               555 arch/arm64/crypto/aes-glue.c 		err = skcipher_walk_virt(&walk, req, false);
req               577 arch/arm64/crypto/aes-glue.c 	dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
req               578 arch/arm64/crypto/aes-glue.c 	if (req->dst != req->src)
req               579 arch/arm64/crypto/aes-glue.c 		dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
req               581 arch/arm64/crypto/aes-glue.c 	skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req               582 arch/arm64/crypto/aes-glue.c 				   req->iv);
req               597 arch/arm64/crypto/aes-glue.c static int __maybe_unused xts_decrypt(struct skcipher_request *req)
req               599 arch/arm64/crypto/aes-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               602 arch/arm64/crypto/aes-glue.c 	int tail = req->cryptlen % AES_BLOCK_SIZE;
req               608 arch/arm64/crypto/aes-glue.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req               611 arch/arm64/crypto/aes-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               614 arch/arm64/crypto/aes-glue.c 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
req               621 arch/arm64/crypto/aes-glue.c 					      skcipher_request_flags(req),
req               623 arch/arm64/crypto/aes-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               625 arch/arm64/crypto/aes-glue.c 					   req->iv);
req               626 arch/arm64/crypto/aes-glue.c 		req = &subreq;
req               627 arch/arm64/crypto/aes-glue.c 		err = skcipher_walk_virt(&walk, req, false);
req               649 arch/arm64/crypto/aes-glue.c 	dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
req               650 arch/arm64/crypto/aes-glue.c 	if (req->dst != req->src)
req               651 arch/arm64/crypto/aes-glue.c 		dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
req               653 arch/arm64/crypto/aes-glue.c 	skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req               654 arch/arm64/crypto/aes-glue.c 				   req->iv);
req                97 arch/arm64/crypto/aes-neonbs-glue.c static int __ecb_crypt(struct skcipher_request *req,
req               101 arch/arm64/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               106 arch/arm64/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               126 arch/arm64/crypto/aes-neonbs-glue.c static int ecb_encrypt(struct skcipher_request *req)
req               128 arch/arm64/crypto/aes-neonbs-glue.c 	return __ecb_crypt(req, aesbs_ecb_encrypt);
req               131 arch/arm64/crypto/aes-neonbs-glue.c static int ecb_decrypt(struct skcipher_request *req)
req               133 arch/arm64/crypto/aes-neonbs-glue.c 	return __ecb_crypt(req, aesbs_ecb_decrypt);
req               158 arch/arm64/crypto/aes-neonbs-glue.c static int cbc_encrypt(struct skcipher_request *req)
req               160 arch/arm64/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               165 arch/arm64/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               181 arch/arm64/crypto/aes-neonbs-glue.c static int cbc_decrypt(struct skcipher_request *req)
req               183 arch/arm64/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               188 arch/arm64/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               228 arch/arm64/crypto/aes-neonbs-glue.c static int ctr_encrypt(struct skcipher_request *req)
req               230 arch/arm64/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               236 arch/arm64/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               309 arch/arm64/crypto/aes-neonbs-glue.c static int ctr_encrypt_sync(struct skcipher_request *req)
req               312 arch/arm64/crypto/aes-neonbs-glue.c 		return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
req               314 arch/arm64/crypto/aes-neonbs-glue.c 	return ctr_encrypt(req);
req               317 arch/arm64/crypto/aes-neonbs-glue.c static int __xts_crypt(struct skcipher_request *req, bool encrypt,
req               321 arch/arm64/crypto/aes-neonbs-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               323 arch/arm64/crypto/aes-neonbs-glue.c 	int tail = req->cryptlen % (8 * AES_BLOCK_SIZE);
req               332 arch/arm64/crypto/aes-neonbs-glue.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req               337 arch/arm64/crypto/aes-neonbs-glue.c 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
req               342 arch/arm64/crypto/aes-neonbs-glue.c 					      skcipher_request_flags(req),
req               344 arch/arm64/crypto/aes-neonbs-glue.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               346 arch/arm64/crypto/aes-neonbs-glue.c 					   req->iv);
req               347 arch/arm64/crypto/aes-neonbs-glue.c 		req = &subreq;
req               352 arch/arm64/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               394 arch/arm64/crypto/aes-neonbs-glue.c 	dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
req               395 arch/arm64/crypto/aes-neonbs-glue.c 	if (req->dst != req->src)
req               396 arch/arm64/crypto/aes-neonbs-glue.c 		dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
req               398 arch/arm64/crypto/aes-neonbs-glue.c 	skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req               399 arch/arm64/crypto/aes-neonbs-glue.c 				   req->iv);
req               401 arch/arm64/crypto/aes-neonbs-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               422 arch/arm64/crypto/aes-neonbs-glue.c static int xts_encrypt(struct skcipher_request *req)
req               424 arch/arm64/crypto/aes-neonbs-glue.c 	return __xts_crypt(req, true, aesbs_xts_encrypt);
req               427 arch/arm64/crypto/aes-neonbs-glue.c static int xts_decrypt(struct skcipher_request *req)
req               429 arch/arm64/crypto/aes-neonbs-glue.c 	return __xts_crypt(req, false, aesbs_xts_decrypt);
req                62 arch/arm64/crypto/chacha-neon-glue.c static int chacha_neon_stream_xor(struct skcipher_request *req,
req                69 arch/arm64/crypto/chacha-neon-glue.c 	err = skcipher_walk_virt(&walk, req, false);
req                89 arch/arm64/crypto/chacha-neon-glue.c static int chacha_neon(struct skcipher_request *req)
req                91 arch/arm64/crypto/chacha-neon-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                94 arch/arm64/crypto/chacha-neon-glue.c 	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
req                95 arch/arm64/crypto/chacha-neon-glue.c 		return crypto_chacha_crypt(req);
req                97 arch/arm64/crypto/chacha-neon-glue.c 	return chacha_neon_stream_xor(req, ctx, req->iv);
req               100 arch/arm64/crypto/chacha-neon-glue.c static int xchacha_neon(struct skcipher_request *req)
req               102 arch/arm64/crypto/chacha-neon-glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               108 arch/arm64/crypto/chacha-neon-glue.c 	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
req               109 arch/arm64/crypto/chacha-neon-glue.c 		return crypto_xchacha_crypt(req);
req               111 arch/arm64/crypto/chacha-neon-glue.c 	crypto_chacha_init(state, ctx, req->iv);
req               118 arch/arm64/crypto/chacha-neon-glue.c 	memcpy(&real_iv[0], req->iv + 24, 8);
req               119 arch/arm64/crypto/chacha-neon-glue.c 	memcpy(&real_iv[8], req->iv + 16, 8);
req               120 arch/arm64/crypto/chacha-neon-glue.c 	return chacha_neon_stream_xor(req, &subctx, real_iv);
req               365 arch/arm64/crypto/ghash-ce-glue.c static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
req               367 arch/arm64/crypto/ghash-ce-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               371 arch/arm64/crypto/ghash-ce-glue.c 	u32 len = req->assoclen;
req               374 arch/arm64/crypto/ghash-ce-glue.c 	scatterwalk_start(&walk, req->src);
req               401 arch/arm64/crypto/ghash-ce-glue.c static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
req               407 arch/arm64/crypto/ghash-ce-glue.c 	lengths.a = cpu_to_be64(req->assoclen * 8);
req               419 arch/arm64/crypto/ghash-ce-glue.c static int gcm_encrypt(struct aead_request *req)
req               421 arch/arm64/crypto/ghash-ce-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               431 arch/arm64/crypto/ghash-ce-glue.c 	if (req->assoclen)
req               432 arch/arm64/crypto/ghash-ce-glue.c 		gcm_calculate_auth_mac(req, dg);
req               434 arch/arm64/crypto/ghash-ce-glue.c 	memcpy(iv, req->iv, GCM_IV_SIZE);
req               437 arch/arm64/crypto/ghash-ce-glue.c 	err = skcipher_walk_aead_encrypt(&walk, req, false);
req               529 arch/arm64/crypto/ghash-ce-glue.c 	gcm_final(req, ctx, dg, tag, req->cryptlen);
req               532 arch/arm64/crypto/ghash-ce-glue.c 	scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
req               538 arch/arm64/crypto/ghash-ce-glue.c static int gcm_decrypt(struct aead_request *req)
req               540 arch/arm64/crypto/ghash-ce-glue.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               551 arch/arm64/crypto/ghash-ce-glue.c 	if (req->assoclen)
req               552 arch/arm64/crypto/ghash-ce-glue.c 		gcm_calculate_auth_mac(req, dg);
req               554 arch/arm64/crypto/ghash-ce-glue.c 	memcpy(iv, req->iv, GCM_IV_SIZE);
req               557 arch/arm64/crypto/ghash-ce-glue.c 	err = skcipher_walk_aead_decrypt(&walk, req, false);
req               664 arch/arm64/crypto/ghash-ce-glue.c 	gcm_final(req, ctx, dg, tag, req->cryptlen - authsize);
req               667 arch/arm64/crypto/ghash-ce-glue.c 	scatterwalk_map_and_copy(buf, req->src,
req               668 arch/arm64/crypto/ghash-ce-glue.c 				 req->assoclen + req->cryptlen - authsize,
req                39 arch/arm64/include/asm/assembler.h 	.macro	restore_daif, flags:req
req                44 arch/arm64/include/asm/assembler.h 	.macro	inherit_daif, pstate:req, tmp:req
req               152 arch/arm64/include/asm/assembler.h lr	.req	x30		// link register
req               449 arch/arm64/include/asm/assembler.h 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
req               632 arch/arm64/include/asm/assembler.h 	.macro		frame_push, regcount:req, extra
req               167 arch/arm64/include/asm/fpsimdmacros.h .macro __for from:req, to:req
req               176 arch/arm64/include/asm/fpsimdmacros.h .macro _for var:req, from:req, to:req, insn:vararg
req               177 arch/arm64/include/asm/fpsimdmacros.h 	.macro _for__body \var:req
req                87 arch/ia64/include/asm/perfmon.h extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
req                88 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
req                89 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
req                90 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
req              2481 arch/ia64/kernel/perfmon.c 	pfarg_context_t *req = (pfarg_context_t *)arg;
req              2486 arch/ia64/kernel/perfmon.c 	if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
req              2488 arch/ia64/kernel/perfmon.c 	fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
req              2584 arch/ia64/kernel/perfmon.c 	pfarg_context_t *req = (pfarg_context_t *)arg;
req              2592 arch/ia64/kernel/perfmon.c 	ret = pfarg_is_sane(current, req);
req              2596 arch/ia64/kernel/perfmon.c 	ctx_flags = req->ctx_flags;
req              2614 arch/ia64/kernel/perfmon.c 	req->ctx_fd = ctx->ctx_fd = fd;
req              2619 arch/ia64/kernel/perfmon.c 	if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
req              2620 arch/ia64/kernel/perfmon.c 		ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
req              2769 arch/ia64/kernel/perfmon.c 	pfarg_reg_t *req = (pfarg_reg_t *)arg;
req              2801 arch/ia64/kernel/perfmon.c 	for (i = 0; i < count; i++, req++) {
req              2803 arch/ia64/kernel/perfmon.c 		cnum       = req->reg_num;
req              2804 arch/ia64/kernel/perfmon.c 		reg_flags  = req->reg_flags;
req              2805 arch/ia64/kernel/perfmon.c 		value      = req->reg_value;
req              2806 arch/ia64/kernel/perfmon.c 		smpl_pmds  = req->reg_smpl_pmds[0];
req              2807 arch/ia64/kernel/perfmon.c 		reset_pmds = req->reg_reset_pmds[0];
req              2888 arch/ia64/kernel/perfmon.c 		PFM_REG_RETFLAG_SET(req->reg_flags, 0);
req              2905 arch/ia64/kernel/perfmon.c 			ctx->ctx_pmds[cnum].eventid       = req->reg_smpl_eventid;
req              3001 arch/ia64/kernel/perfmon.c 	PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
req              3009 arch/ia64/kernel/perfmon.c 	pfarg_reg_t *req = (pfarg_reg_t *)arg;
req              3044 arch/ia64/kernel/perfmon.c 	for (i = 0; i < count; i++, req++) {
req              3046 arch/ia64/kernel/perfmon.c 		cnum  = req->reg_num;
req              3047 arch/ia64/kernel/perfmon.c 		value = req->reg_value;
req              3072 arch/ia64/kernel/perfmon.c 		PFM_REG_RETFLAG_SET(req->reg_flags, 0);
req              3099 arch/ia64/kernel/perfmon.c 		ctx->ctx_pmds[cnum].long_reset  = req->reg_long_reset;
req              3100 arch/ia64/kernel/perfmon.c 		ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
req              3105 arch/ia64/kernel/perfmon.c 		ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
req              3106 arch/ia64/kernel/perfmon.c 		ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
req              3188 arch/ia64/kernel/perfmon.c 	PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
req              3206 arch/ia64/kernel/perfmon.c 	pfarg_reg_t *req = (pfarg_reg_t *)arg;
req              3255 arch/ia64/kernel/perfmon.c 	for (i = 0; i < count; i++, req++) {
req              3257 arch/ia64/kernel/perfmon.c 		cnum        = req->reg_num;
req              3258 arch/ia64/kernel/perfmon.c 		reg_flags   = req->reg_flags;
req              3320 arch/ia64/kernel/perfmon.c 		req->reg_value            = val;
req              3321 arch/ia64/kernel/perfmon.c 		req->reg_flags            = reg_flags;
req              3322 arch/ia64/kernel/perfmon.c 		req->reg_last_reset_val   = lval;
req              3328 arch/ia64/kernel/perfmon.c 	PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
req              3333 arch/ia64/kernel/perfmon.c pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
req              3337 arch/ia64/kernel/perfmon.c 	if (req == NULL) return -EINVAL;
req              3349 arch/ia64/kernel/perfmon.c 	return pfm_write_pmcs(ctx, req, nreq, regs);
req              3354 arch/ia64/kernel/perfmon.c pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
req              3358 arch/ia64/kernel/perfmon.c 	if (req == NULL) return -EINVAL;
req              3370 arch/ia64/kernel/perfmon.c 	return pfm_read_pmds(ctx, req, nreq, regs);
req              3631 arch/ia64/kernel/perfmon.c 	pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
req              3737 arch/ia64/kernel/perfmon.c 	for (i = 0; i < count; i++, req++) {
req              3739 arch/ia64/kernel/perfmon.c 		rnum      = req->dbreg_num;
req              3740 arch/ia64/kernel/perfmon.c 		dbreg.val = req->dbreg_value;
req              3761 arch/ia64/kernel/perfmon.c 		PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
req              3816 arch/ia64/kernel/perfmon.c 	PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
req              3834 arch/ia64/kernel/perfmon.c pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
req              3838 arch/ia64/kernel/perfmon.c 	if (req == NULL) return -EINVAL;
req              3850 arch/ia64/kernel/perfmon.c 	return pfm_write_ibrs(ctx, req, nreq, regs);
req              3855 arch/ia64/kernel/perfmon.c pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
req              3859 arch/ia64/kernel/perfmon.c 	if (req == NULL) return -EINVAL;
req              3871 arch/ia64/kernel/perfmon.c 	return pfm_write_dbrs(ctx, req, nreq, regs);
req              3879 arch/ia64/kernel/perfmon.c 	pfarg_features_t *req = (pfarg_features_t *)arg;
req              3881 arch/ia64/kernel/perfmon.c 	req->ft_version = PFM_VERSION;
req              4058 arch/ia64/kernel/perfmon.c 	pfarg_reg_t *req = (pfarg_reg_t *)arg;
req              4063 arch/ia64/kernel/perfmon.c 	for (i = 0; i < count; i++, req++) {
req              4065 arch/ia64/kernel/perfmon.c 		cnum = req->reg_num;
req              4069 arch/ia64/kernel/perfmon.c 		req->reg_value = PMC_DFL_VAL(cnum);
req              4071 arch/ia64/kernel/perfmon.c 		PFM_REG_RETFLAG_SET(req->reg_flags, 0);
req              4073 arch/ia64/kernel/perfmon.c 		DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
req              4078 arch/ia64/kernel/perfmon.c 	PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
req              4114 arch/ia64/kernel/perfmon.c 	pfarg_load_t *req = (pfarg_load_t *)arg;
req              4127 arch/ia64/kernel/perfmon.c 			req->load_pid,
req              4132 arch/ia64/kernel/perfmon.c 	DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
req              4134 arch/ia64/kernel/perfmon.c 	if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
req              4139 arch/ia64/kernel/perfmon.c 	ret = pfm_get_task(ctx, req->load_pid, &task);
req              4141 arch/ia64/kernel/perfmon.c 		DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
req              4152 arch/ia64/kernel/perfmon.c 			req->load_pid));
req              4166 arch/ia64/kernel/perfmon.c 			DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
req              4227 arch/ia64/kernel/perfmon.c 		DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
req                43 arch/m68k/mac/misc.c 	struct adb_request req;
req                45 arch/m68k/mac/misc.c 	if (cuda_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
req                48 arch/m68k/mac/misc.c 	while (!req.complete)
req                50 arch/m68k/mac/misc.c 	return req.reply[3];
req                55 arch/m68k/mac/misc.c 	struct adb_request req;
req                57 arch/m68k/mac/misc.c 	if (cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
req                60 arch/m68k/mac/misc.c 	while (!req.complete)
req                68 arch/m68k/mac/misc.c 	struct adb_request req;
req                70 arch/m68k/mac/misc.c 	if (pmu_request(&req, NULL, 3, PMU_READ_XPRAM,
req                73 arch/m68k/mac/misc.c 	pmu_wait_complete(&req);
req                75 arch/m68k/mac/misc.c 	return req.reply[0];
req                80 arch/m68k/mac/misc.c 	struct adb_request req;
req                82 arch/m68k/mac/misc.c 	if (pmu_request(&req, NULL, 4, PMU_WRITE_XPRAM,
req                85 arch/m68k/mac/misc.c 	pmu_wait_complete(&req);
req               342 arch/m68k/mac/misc.c 	struct adb_request req;
req               344 arch/m68k/mac/misc.c 	if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM) < 0)
req               346 arch/m68k/mac/misc.c 	while (!req.complete)
req               352 arch/m68k/mac/misc.c 	struct adb_request req;
req               354 arch/m68k/mac/misc.c 	if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN) < 0)
req               367 arch/m68k/mac/misc.c 	while (!req.complete)
req               395 arch/mips/alchemy/common/clock.c 				 struct clk_rate_request *req,
req               426 arch/mips/alchemy/common/clock.c 		if (pr < req->rate)
req               430 arch/mips/alchemy/common/clock.c 		tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
req               432 arch/mips/alchemy/common/clock.c 		diff = req->rate - nr;
req               433 arch/mips/alchemy/common/clock.c 		if (nr > req->rate)
req               452 arch/mips/alchemy/common/clock.c 			tpr = req->rate * j;
req               457 arch/mips/alchemy/common/clock.c 			tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
req               460 arch/mips/alchemy/common/clock.c 			diff = req->rate - nr;
req               461 arch/mips/alchemy/common/clock.c 			if (nr > req->rate)
req               477 arch/mips/alchemy/common/clock.c 	req->best_parent_rate = bpr;
req               478 arch/mips/alchemy/common/clock.c 	req->best_parent_hw = bpc;
req               479 arch/mips/alchemy/common/clock.c 	req->rate = br;
req               573 arch/mips/alchemy/common/clock.c 				 struct clk_rate_request *req)
req               575 arch/mips/alchemy/common/clock.c 	return alchemy_clk_fgcs_detr(hw, req, 2, 512);
req               703 arch/mips/alchemy/common/clock.c 				 struct clk_rate_request *req)
req               716 arch/mips/alchemy/common/clock.c 	return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
req               920 arch/mips/alchemy/common/clock.c 				 struct clk_rate_request *req)
req               925 arch/mips/alchemy/common/clock.c 	return alchemy_clk_fgcs_detr(hw, req, scale, 4);
req               444 arch/mips/txx9/generic/setup.c 	struct uart_port req;
req               446 arch/mips/txx9/generic/setup.c 	memset(&req, 0, sizeof(req));
req               447 arch/mips/txx9/generic/setup.c 	req.line = line;
req               448 arch/mips/txx9/generic/setup.c 	req.iotype = UPIO_MEM;
req               449 arch/mips/txx9/generic/setup.c 	req.membase = ioremap(baseaddr, 0x24);
req               450 arch/mips/txx9/generic/setup.c 	req.mapbase = baseaddr;
req               451 arch/mips/txx9/generic/setup.c 	req.irq = irq;
req               453 arch/mips/txx9/generic/setup.c 		req.flags |= UPF_BUGGY_UART /*HAVE_CTS_LINE*/;
req               455 arch/mips/txx9/generic/setup.c 		req.flags |= UPF_MAGIC_MULTIPLIER /*USE_SCLK*/;
req               456 arch/mips/txx9/generic/setup.c 		req.uartclk = sclk;
req               458 arch/mips/txx9/generic/setup.c 		req.uartclk = TXX9_IMCLK;
req               459 arch/mips/txx9/generic/setup.c 	early_serial_txx9_setup(&req);
req               118 arch/powerpc/include/asm/mpc5121.h int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req);
req               324 arch/powerpc/include/asm/mpc52xx.h extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
req               325 arch/powerpc/include/asm/mpc52xx.h extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
req               327 arch/powerpc/include/asm/mpc52xx.h extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
req               911 arch/powerpc/kvm/book3s_hv.c 	unsigned long req = kvmppc_get_gpr(vcpu, 3);
req               917 arch/powerpc/kvm/book3s_hv.c 	if (req <= MAX_HCALL_OPCODE &&
req               918 arch/powerpc/kvm/book3s_hv.c 	    !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
req               921 arch/powerpc/kvm/book3s_hv.c 	switch (req) {
req               999 arch/powerpc/kvm/book3s_hv.c 			ret = kvmppc_xics_hcall(vcpu, req);
req               574 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long flags, req, pte_index, rcbits;
req               590 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			req = flags >> 6;
req               592 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			if (req == 3) {		/* no more requests */
req               596 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			if (req != 1 || flags == 3 ||
req               875 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
req               886 arch/powerpc/kvm/book3s_xics.c 	switch (req) {
req               899 arch/powerpc/kvm/book3s_xics.c 		return kvmppc_xics_rm_complete(vcpu, req);
req               901 arch/powerpc/kvm/book3s_xics.c 	switch (req) {
req               339 arch/powerpc/kvm/trace_hv.h 		__field(unsigned long,	req)
req               348 arch/powerpc/kvm/trace_hv.h 		__entry->req   = kvmppc_get_gpr(vcpu, 3);
req               357 arch/powerpc/kvm/trace_hv.h 		   __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
req              1096 arch/powerpc/perf/hv-24x7.c 		struct hv_24x7_request *req;
req              1098 arch/powerpc/perf/hv-24x7.c 		req = request_buffer->requests;
req              1100 arch/powerpc/perf/hv-24x7.c 				      req->performance_domain, req->data_offset,
req              1101 arch/powerpc/perf/hv-24x7.c 				      req->starting_ix, req->starting_lpar_ix,
req              1123 arch/powerpc/perf/hv-24x7.c 	struct hv_24x7_request *req;
req              1146 arch/powerpc/perf/hv-24x7.c 	req = (void *) request_buffer->requests + i * req_size;
req              1148 arch/powerpc/perf/hv-24x7.c 	req->performance_domain = event_get_domain(event);
req              1149 arch/powerpc/perf/hv-24x7.c 	req->data_size = cpu_to_be16(8);
req              1150 arch/powerpc/perf/hv-24x7.c 	req->data_offset = cpu_to_be32(event_get_offset(event));
req              1151 arch/powerpc/perf/hv-24x7.c 	req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event));
req              1152 arch/powerpc/perf/hv-24x7.c 	req->max_num_lpars = cpu_to_be16(1);
req              1153 arch/powerpc/perf/hv-24x7.c 	req->starting_ix = cpu_to_be16(idx);
req              1154 arch/powerpc/perf/hv-24x7.c 	req->max_ix = cpu_to_be16(1);
req              1157 arch/powerpc/perf/hv-24x7.c 		if (domain_needs_aggregation(req->performance_domain))
req              1158 arch/powerpc/perf/hv-24x7.c 			req->max_num_thread_groups = -1;
req              1159 arch/powerpc/perf/hv-24x7.c 		else if (req->performance_domain != HV_PERF_DOMAIN_PHYS_CHIP) {
req              1160 arch/powerpc/perf/hv-24x7.c 			req->starting_thread_group_ix = idx % 2;
req              1161 arch/powerpc/perf/hv-24x7.c 			req->max_num_thread_groups = 1;
req               137 arch/powerpc/perf/hv-gpci.c static unsigned long single_gpci_request(u32 req, u32 starting_index,
req               149 arch/powerpc/perf/hv-gpci.c 	arg->params.counter_request = cpu_to_be32(req);
req                14 arch/powerpc/perf/req-gen/_begin.h #define REQ_GEN_PREFIX req-gen
req                42 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	struct mpc512x_lpbfifo_request *req;
req                66 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	struct mpc512x_lpbfifo_request *req = NULL;
req                75 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	req = lpbfifo.req;
req                76 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
req                97 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	lpbfifo.req = NULL;
req               101 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (req->callback)
req               102 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		req->callback(req);
req               118 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	struct mpc512x_lpbfifo_request *req = NULL;
req               128 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	req = lpbfifo.req;
req               129 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (!req) {
req               136 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
req               141 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 			lpbfifo.ram_bus_addr, req->size, dir);
req               147 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		lpbfifo.req = NULL;
req               151 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		if (req->callback)
req               152 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 			req->callback(req);
req               189 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
req               192 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
req               193 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		bpt = lpbfifo.req->portsize;
req               198 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
req               199 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 					IS_ALIGNED(lpbfifo.req->size, bpt)) {
req               214 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
req               215 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		phys_addr_t access_end = access_start + lpbfifo.req->size;
req               228 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
req               251 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 			lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
req               257 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	sg_dma_len(&sg) = lpbfifo.req->size;
req               287 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
req               294 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
req               303 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
req               311 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	bits = lpbfifo.req->size | MPC512X_SCLPC_START;
req               328 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 						lpbfifo.req->size, dir);
req               332 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
req               340 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	if (lpbfifo.req)
req               345 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	lpbfifo.req = req;
req               349 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 		lpbfifo.req = NULL; /* Set the FIFO as idle */
req               354 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
req               360 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c 	ret = mpc512x_lpbfifo_submit_locked(req);
req                52 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	struct mpc52xx_lpbfifo_request *req;
req                62 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
req                64 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	size_t transfer_size = req->size - req->pos;
req                70 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
req                71 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
req                72 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
req                95 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 			data = req->data + req->pos;
req               149 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		bd->data[0] = req->data_phys + req->pos;
req               163 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		 req->offset + req->pos);
req               166 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	bit_fields = req->cs << 24 | 0x000008;
req               172 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (!lpbfifo.req->defer_xfer_start)
req               221 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	struct mpc52xx_lpbfifo_request *req;
req               234 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req = lpbfifo.req;
req               235 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (!req) {
req               241 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
req               242 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
req               243 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
req               269 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		data = req->data + req->pos;
req               275 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->pos += count;
req               278 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (req->size - req->pos)
req               279 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		mpc52xx_lpbfifo_kick(req); /* more work to do */
req               300 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->last_byte = ((u8 *)req->data)[req->size - 1];
req               305 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		lpbfifo.req = NULL;
req               308 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		req->irq_count++;
req               310 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->irq_ticks += get_tbl() - ts;
req               314 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (do_callback && req->callback)
req               315 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		req->callback(req);
req               327 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	struct mpc52xx_lpbfifo_request *req;
req               335 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req = lpbfifo.req;
req               336 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
req               342 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		req->irq_count++;
req               347 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		req->buffer_not_done_cnt++;
req               348 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		if ((req->buffer_not_done_cnt % 1000) == 0)
req               356 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->last_byte = ((u8 *)req->data)[req->size - 1];
req               358 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->pos = status & 0x00ffffff;
req               361 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	lpbfifo.req = NULL;
req               364 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->irq_ticks += get_tbl() - ts;
req               367 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (req->callback)
req               368 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		req->callback(req);
req               378 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
req               379 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
req               380 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
req               396 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
req               406 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (lpbfifo.req) {
req               412 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	lpbfifo.req = req;
req               413 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->irq_count = 0;
req               414 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->irq_ticks = 0;
req               415 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->buffer_not_done_cnt = 0;
req               416 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	req->pos = 0;
req               418 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	mpc52xx_lpbfifo_kick(req);
req               424 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
req               437 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
req               446 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (lpbfifo.req && lpbfifo.req == req &&
req               447 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	    lpbfifo.req->defer_xfer_start) {
req               456 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
req               461 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 	if (lpbfifo.req == req) {
req               466 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c 		lpbfifo.req = NULL;
req               667 arch/powerpc/platforms/powermac/low_i2c.c static void pmu_i2c_complete(struct adb_request *req)
req               669 arch/powerpc/platforms/powermac/low_i2c.c 	complete(req->arg);
req               675 arch/powerpc/platforms/powermac/low_i2c.c 	struct adb_request *req = bus->hostdata;
req               676 arch/powerpc/platforms/powermac/low_i2c.c 	struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
req               689 arch/powerpc/platforms/powermac/low_i2c.c 		memset(req, 0, sizeof(struct adb_request));
req               717 arch/powerpc/platforms/powermac/low_i2c.c 		req->data[0] = PMU_I2C_CMD;
req               718 arch/powerpc/platforms/powermac/low_i2c.c 		req->reply[0] = 0xff;
req               719 arch/powerpc/platforms/powermac/low_i2c.c 		req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
req               720 arch/powerpc/platforms/powermac/low_i2c.c 		req->done = pmu_i2c_complete;
req               721 arch/powerpc/platforms/powermac/low_i2c.c 		req->arg = &comp;
req               724 arch/powerpc/platforms/powermac/low_i2c.c 			req->nbytes += len;
req               726 arch/powerpc/platforms/powermac/low_i2c.c 		rc = pmu_queue_request(req);
req               730 arch/powerpc/platforms/powermac/low_i2c.c 		if (req->reply[0] == PMU_I2C_STATUS_OK)
req               734 arch/powerpc/platforms/powermac/low_i2c.c 	if (req->reply[0] != PMU_I2C_STATUS_OK)
req               738 arch/powerpc/platforms/powermac/low_i2c.c 		memset(req, 0, sizeof(struct adb_request));
req               748 arch/powerpc/platforms/powermac/low_i2c.c 		req->data[0] = PMU_I2C_CMD;
req               749 arch/powerpc/platforms/powermac/low_i2c.c 		req->reply[0] = 0xff;
req               750 arch/powerpc/platforms/powermac/low_i2c.c 		req->nbytes = 2;
req               751 arch/powerpc/platforms/powermac/low_i2c.c 		req->done = pmu_i2c_complete;
req               752 arch/powerpc/platforms/powermac/low_i2c.c 		req->arg = &comp;
req               753 arch/powerpc/platforms/powermac/low_i2c.c 		rc = pmu_queue_request(req);
req               758 arch/powerpc/platforms/powermac/low_i2c.c 		if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
req               760 arch/powerpc/platforms/powermac/low_i2c.c 		if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
req               761 arch/powerpc/platforms/powermac/low_i2c.c 			int rlen = req->reply_len - 1;
req               769 arch/powerpc/platforms/powermac/low_i2c.c 				memcpy(data, &req->reply[1], len);
req               188 arch/powerpc/platforms/powermac/nvram.c static void pmu_nvram_complete(struct adb_request *req)
req               190 arch/powerpc/platforms/powermac/nvram.c 	if (req->arg)
req               191 arch/powerpc/platforms/powermac/nvram.c 		complete((struct completion *)req->arg);
req               196 arch/powerpc/platforms/powermac/nvram.c 	struct adb_request req;
req               199 arch/powerpc/platforms/powermac/nvram.c 	req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
req               200 arch/powerpc/platforms/powermac/nvram.c 	if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
req               205 arch/powerpc/platforms/powermac/nvram.c 	while (!req.complete)
req               207 arch/powerpc/platforms/powermac/nvram.c 	return req.reply[0];
req               212 arch/powerpc/platforms/powermac/nvram.c 	struct adb_request req;
req               215 arch/powerpc/platforms/powermac/nvram.c 	req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
req               216 arch/powerpc/platforms/powermac/nvram.c 	if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
req               221 arch/powerpc/platforms/powermac/nvram.c 	while (!req.complete)
req               377 arch/powerpc/platforms/powermac/setup.c 	struct adb_request req;
req               379 arch/powerpc/platforms/powermac/setup.c 	cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
req               386 arch/powerpc/platforms/powermac/setup.c 	struct adb_request req;
req               388 arch/powerpc/platforms/powermac/setup.c 	cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
req               249 arch/powerpc/platforms/pseries/msi.c 	int req;
req               252 arch/powerpc/platforms/pseries/msi.c 		req = counts->request;
req               256 arch/powerpc/platforms/pseries/msi.c 		req = 0;
req               259 arch/powerpc/platforms/pseries/msi.c 			req = be32_to_cpup(p);
req               263 arch/powerpc/platforms/pseries/msi.c 			req = max(req, (int)be32_to_cpup(p));
req               266 arch/powerpc/platforms/pseries/msi.c 	if (req < counts->quota)
req               267 arch/powerpc/platforms/pseries/msi.c 		counts->spare += counts->quota - req;
req               268 arch/powerpc/platforms/pseries/msi.c 	else if (req > counts->quota)
req                38 arch/s390/boot/startup.c int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
req               208 arch/s390/crypto/aes_s390.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
req               210 arch/s390/crypto/aes_s390.c 	skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
req               211 arch/s390/crypto/aes_s390.c 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
req               212 arch/s390/crypto/aes_s390.c 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
req               214 arch/s390/crypto/aes_s390.c 	ret = crypto_skcipher_decrypt(req);
req               216 arch/s390/crypto/aes_s390.c 	skcipher_request_zero(req);
req               227 arch/s390/crypto/aes_s390.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
req               229 arch/s390/crypto/aes_s390.c 	skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
req               230 arch/s390/crypto/aes_s390.c 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
req               231 arch/s390/crypto/aes_s390.c 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
req               233 arch/s390/crypto/aes_s390.c 	ret = crypto_skcipher_encrypt(req);
req               476 arch/s390/crypto/aes_s390.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
req               479 arch/s390/crypto/aes_s390.c 	skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
req               480 arch/s390/crypto/aes_s390.c 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
req               481 arch/s390/crypto/aes_s390.c 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
req               483 arch/s390/crypto/aes_s390.c 	ret = crypto_skcipher_decrypt(req);
req               485 arch/s390/crypto/aes_s390.c 	skcipher_request_zero(req);
req               495 arch/s390/crypto/aes_s390.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
req               498 arch/s390/crypto/aes_s390.c 	skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
req               499 arch/s390/crypto/aes_s390.c 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
req               500 arch/s390/crypto/aes_s390.c 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
req               502 arch/s390/crypto/aes_s390.c 	ret = crypto_skcipher_encrypt(req);
req               504 arch/s390/crypto/aes_s390.c 	skcipher_request_zero(req);
req               988 arch/s390/crypto/aes_s390.c static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
req               990 arch/s390/crypto/aes_s390.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               994 arch/s390/crypto/aes_s390.c 	unsigned int aadlen = req->assoclen;
req               995 arch/s390/crypto/aes_s390.c 	unsigned int pclen = req->cryptlen;
req              1031 arch/s390/crypto/aes_s390.c 	memcpy(param.j0, req->iv, ivsize);
req              1035 arch/s390/crypto/aes_s390.c 	gcm_walk_start(&gw_in, req->src, len);
req              1036 arch/s390/crypto/aes_s390.c 	gcm_walk_start(&gw_out, req->dst, len);
req              1079 arch/s390/crypto/aes_s390.c 		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
req              1083 arch/s390/crypto/aes_s390.c 		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
req              1089 arch/s390/crypto/aes_s390.c static int gcm_aes_encrypt(struct aead_request *req)
req              1091 arch/s390/crypto/aes_s390.c 	return gcm_aes_crypt(req, CPACF_ENCRYPT);
req              1094 arch/s390/crypto/aes_s390.c static int gcm_aes_decrypt(struct aead_request *req)
req              1096 arch/s390/crypto/aes_s390.c 	return gcm_aes_crypt(req, CPACF_DECRYPT);
req               256 arch/s390/include/asm/cpu_mf.h static inline int lsctl(struct hws_lsctl_request_block *req)
req               267 arch/s390/include/asm/cpu_mf.h 		: "+d" (cc), "+a" (req)
req               268 arch/s390/include/asm/cpu_mf.h 		: "m" (*req)
req               310 arch/s390/include/asm/diag.h int diag26c(void *req, void *resp, enum diag26c_sc subcode);
req               316 arch/s390/include/asm/diag.h 	int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
req               129 arch/s390/include/asm/pci_insn.h u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
req               131 arch/s390/include/asm/pci_insn.h int __zpci_load(u64 *data, u64 req, u64 offset);
req               133 arch/s390/include/asm/pci_insn.h int __zpci_store(u64 data, u64 req, u64 offset);
req               135 arch/s390/include/asm/pci_insn.h int __zpci_store_block(const u64 *data, u64 req, u64 offset);
req               105 arch/s390/include/uapi/asm/chsc.h 	} req;
req               116 arch/s390/include/uapi/asm/chsc.h 	} req;
req               204 arch/s390/include/uapi/asm/zcrypt.h 	__u64		req;
req               217 arch/s390/kernel/diag.c int diag26c(void *req, void *resp, enum diag26c_sc subcode)
req               220 arch/s390/kernel/diag.c 	return diag_dma_ops.diag26c(req, resp, subcode);
req              2444 arch/s390/kvm/interrupt.c 	struct kvm_s390_io_adapter_req req;
req              2448 arch/s390/kvm/interrupt.c 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
req              2451 arch/s390/kvm/interrupt.c 	adapter = get_io_adapter(dev->kvm, req.id);
req              2454 arch/s390/kvm/interrupt.c 	switch (req.type) {
req              2456 arch/s390/kvm/interrupt.c 		ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
req              2461 arch/s390/kvm/interrupt.c 		ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
req              2464 arch/s390/kvm/interrupt.c 		ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
req              2499 arch/s390/kvm/interrupt.c 	struct kvm_s390_ais_req req;
req              2505 arch/s390/kvm/interrupt.c 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
req              2508 arch/s390/kvm/interrupt.c 	if (req.isc > MAX_ISC)
req              2511 arch/s390/kvm/interrupt.c 	trace_kvm_s390_modify_ais_mode(req.isc,
req              2512 arch/s390/kvm/interrupt.c 				       (fi->simm & AIS_MODE_MASK(req.isc)) ?
req              2513 arch/s390/kvm/interrupt.c 				       (fi->nimm & AIS_MODE_MASK(req.isc)) ?
req              2515 arch/s390/kvm/interrupt.c 				       KVM_S390_AIS_MODE_ALL, req.mode);
req              2518 arch/s390/kvm/interrupt.c 	switch (req.mode) {
req              2520 arch/s390/kvm/interrupt.c 		fi->simm &= ~AIS_MODE_MASK(req.isc);
req              2521 arch/s390/kvm/interrupt.c 		fi->nimm &= ~AIS_MODE_MASK(req.isc);
req              2524 arch/s390/kvm/interrupt.c 		fi->simm |= AIS_MODE_MASK(req.isc);
req              2525 arch/s390/kvm/interrupt.c 		fi->nimm &= ~AIS_MODE_MASK(req.isc);
req               987 arch/s390/kvm/kvm-s390.c static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
req               993 arch/s390/kvm/kvm-s390.c 		kvm_s390_sync_request(req, vcpu);
req              3144 arch/s390/kvm/kvm-s390.c void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
req              3146 arch/s390/kvm/kvm-s390.c 	kvm_make_request(req, vcpu);
req               295 arch/s390/kvm/kvm-s390.h void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
req               113 arch/s390/pci/pci.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
req               121 arch/s390/pci/pci.c 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
req               127 arch/s390/pci/pci.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
req               131 arch/s390/pci/pci.c 	cc = zpci_mod_fc(req, &fib, &status);
req               140 arch/s390/pci/pci.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
req               158 arch/s390/pci/pci.c 	cc = zpci_mod_fc(req, &fib, &status);
req               169 arch/s390/pci/pci.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
req               177 arch/s390/pci/pci.c 	cc = zpci_mod_fc(req, &fib, &status);
req               190 arch/s390/pci/pci.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
req               194 arch/s390/pci/pci.c 	rc = __zpci_load(&data, req, offset);
req               206 arch/s390/pci/pci.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
req               212 arch/s390/pci/pci.c 	rc = __zpci_store(data, req, offset);
req                71 arch/s390/pci/pci_clp.c 	struct { u8 _[CLP_BLK_SIZE]; } *req = data;
req                81 arch/s390/pci/pci_clp.c 		: [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
req                82 arch/s390/pci/pci_clp.c 		: [req] "a" (req), [lps] "i" (lps)
req               473 arch/s390/pci/pci_clp.c static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
req               483 arch/s390/pci/pci_clp.c static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
req               487 arch/s390/pci/pci_clp.c 		return clp_base_slpc(req, (void *) lpcb);
req               493 arch/s390/pci/pci_clp.c static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
req               503 arch/s390/pci/pci_clp.c static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
req               515 arch/s390/pci/pci_clp.c static int clp_pci_query(struct clp_req *req,
req               528 arch/s390/pci/pci_clp.c static int clp_pci_query_grp(struct clp_req *req,
req               542 arch/s390/pci/pci_clp.c static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
req               546 arch/s390/pci/pci_clp.c 		return clp_pci_slpc(req, (void *) lpcb);
req               548 arch/s390/pci/pci_clp.c 		return clp_pci_list(req, (void *) lpcb);
req               550 arch/s390/pci/pci_clp.c 		return clp_pci_query(req, (void *) lpcb);
req               552 arch/s390/pci/pci_clp.c 		return clp_pci_query_grp(req, (void *) lpcb);
req               558 arch/s390/pci/pci_clp.c static int clp_normal_command(struct clp_req *req)
req               565 arch/s390/pci/pci_clp.c 	if (req->lps != 0 && req->lps != 2)
req               574 arch/s390/pci/pci_clp.c 	uptr = (void __force __user *)(unsigned long) req->data_p;
req               582 arch/s390/pci/pci_clp.c 	switch (req->lps) {
req               584 arch/s390/pci/pci_clp.c 		rc = clp_base_command(req, lpcb);
req               587 arch/s390/pci/pci_clp.c 		rc = clp_pci_command(req, lpcb);
req               605 arch/s390/pci/pci_clp.c static int clp_immediate_command(struct clp_req *req)
req               611 arch/s390/pci/pci_clp.c 	if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
req               614 arch/s390/pci/pci_clp.c 	uptr = (void __force __user *)(unsigned long) req->data_p;
req               615 arch/s390/pci/pci_clp.c 	if (req->cmd == 0) {
req               617 arch/s390/pci/pci_clp.c 		exists = test_bit_inv(req->lps, &ilp);
req               627 arch/s390/pci/pci_clp.c 	struct clp_req req;
req               634 arch/s390/pci/pci_clp.c 	if (copy_from_user(&req, argp, sizeof(req)))
req               636 arch/s390/pci/pci_clp.c 	if (req.r != 0)
req               638 arch/s390/pci/pci_clp.c 	return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
req                20 arch/s390/pci/pci_insn.c static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
req                23 arch/s390/pci/pci_insn.c 		u64 req;
req                27 arch/s390/pci/pci_insn.c 	} __packed data = {req, offset, cc, status};
req                33 arch/s390/pci/pci_insn.c static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
req                41 arch/s390/pci/pci_insn.c 		: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
req                43 arch/s390/pci/pci_insn.c 	*status = req >> 24 & 0xff;
req                47 arch/s390/pci/pci_insn.c u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
req                52 arch/s390/pci/pci_insn.c 		cc = __mpcifc(req, fib, status);
req                58 arch/s390/pci/pci_insn.c 		zpci_err_insn(cc, *status, req, 0);
req               114 arch/s390/pci/pci_insn.c static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
req               116 arch/s390/pci/pci_insn.c 	register u64 __req asm("2") = req;
req               127 arch/s390/pci/pci_insn.c 		: [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
req               135 arch/s390/pci/pci_insn.c static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
req               140 arch/s390/pci/pci_insn.c 	cc = ____pcilg(&__data, req, offset, status);
req               147 arch/s390/pci/pci_insn.c int __zpci_load(u64 *data, u64 req, u64 offset)
req               153 arch/s390/pci/pci_insn.c 		cc = __pcilg(data, req, offset, &status);
req               159 arch/s390/pci/pci_insn.c 		zpci_err_insn(cc, status, req, offset);
req               169 arch/s390/pci/pci_insn.c 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
req               171 arch/s390/pci/pci_insn.c 	return __zpci_load(data, req, ZPCI_OFFSET(addr));
req               212 arch/s390/pci/pci_insn.c static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
req               214 arch/s390/pci/pci_insn.c 	register u64 __req asm("2") = req;
req               224 arch/s390/pci/pci_insn.c 		: [cc] "+d" (cc), [req] "+d" (__req)
req               231 arch/s390/pci/pci_insn.c int __zpci_store(u64 data, u64 req, u64 offset)
req               237 arch/s390/pci/pci_insn.c 		cc = __pcistg(data, req, offset, &status);
req               243 arch/s390/pci/pci_insn.c 		zpci_err_insn(cc, status, req, offset);
req               253 arch/s390/pci/pci_insn.c 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
req               255 arch/s390/pci/pci_insn.c 	return __zpci_store(data, req, ZPCI_OFFSET(addr));
req               294 arch/s390/pci/pci_insn.c static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
req               304 arch/s390/pci/pci_insn.c 		: [cc] "+d" (cc), [req] "+d" (req)
req               307 arch/s390/pci/pci_insn.c 	*status = req >> 24 & 0xff;
req               311 arch/s390/pci/pci_insn.c int __zpci_store_block(const u64 *data, u64 req, u64 offset)
req               317 arch/s390/pci/pci_insn.c 		cc = __pcistb(data, req, offset, &status);
req               323 arch/s390/pci/pci_insn.c 		zpci_err_insn(cc, status, req, offset);
req               333 arch/s390/pci/pci_insn.c 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
req               336 arch/s390/pci/pci_insn.c 	return __zpci_store_block(src, req, offset);
req                41 arch/s390/pci/pci_irq.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
req                53 arch/s390/pci/pci_irq.c 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
req                59 arch/s390/pci/pci_irq.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
req                63 arch/s390/pci/pci_irq.c 	cc = zpci_mod_fc(req, &fib, &status);
req                74 arch/s390/pci/pci_irq.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
req                82 arch/s390/pci/pci_irq.c 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
req                88 arch/s390/pci/pci_irq.c 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
req                93 arch/s390/pci/pci_irq.c 	cc = zpci_mod_fc(req, &fib, &status);
req               891 arch/sparc/kernel/ds.c 			struct ds_reg_req req;
req               908 arch/sparc/kernel/ds.c 		pbuf.req.tag.type = DS_REG_REQ;
req               909 arch/sparc/kernel/ds.c 		pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
req               910 arch/sparc/kernel/ds.c 		pbuf.req.handle = cp->handle;
req               911 arch/sparc/kernel/ds.c 		pbuf.req.major = 1;
req               912 arch/sparc/kernel/ds.c 		pbuf.req.minor = 0;
req               992 arch/sparc/kernel/ds.c 	u64				req[0];
req              1006 arch/sparc/kernel/ds.c 		struct ds_data *dpkt = (struct ds_data *) qp->req;
req              1057 arch/sparc/kernel/ds.c 		memcpy(&qp->req, pkt, len);
req              1067 arch/sparc/kernel/ds.c 	struct ds_ver_req req;
req              1070 arch/sparc/kernel/ds.c 	req.tag.type = DS_INIT_REQ;
req              1071 arch/sparc/kernel/ds.c 	req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
req              1072 arch/sparc/kernel/ds.c 	req.ver.major = 1;
req              1073 arch/sparc/kernel/ds.c 	req.ver.minor = 0;
req              1075 arch/sparc/kernel/ds.c 	err = __ds_send(lp, &req, sizeof(req));
req                52 arch/um/drivers/daemon_user.c 	struct request_v3 req;
req                93 arch/um/drivers/daemon_user.c 	req.magic = SWITCH_MAGIC;
req                94 arch/um/drivers/daemon_user.c 	req.version = SWITCH_VERSION;
req                95 arch/um/drivers/daemon_user.c 	req.type = REQ_NEW_CONTROL;
req                96 arch/um/drivers/daemon_user.c 	req.sock = *local_addr;
req                97 arch/um/drivers/daemon_user.c 	n = write(pri->control, &req, sizeof(req));
req                98 arch/um/drivers/daemon_user.c 	if (n != sizeof(req)) {
req                51 arch/um/drivers/mconsole.h 	void (*handler)(struct mc_request *req);
req                72 arch/um/drivers/mconsole.h extern int mconsole_reply_len(struct mc_request *req, const char *reply,
req                74 arch/um/drivers/mconsole.h extern int mconsole_reply(struct mc_request *req, const char *str, int err,
req                77 arch/um/drivers/mconsole.h extern void mconsole_version(struct mc_request *req);
req                78 arch/um/drivers/mconsole.h extern void mconsole_help(struct mc_request *req);
req                79 arch/um/drivers/mconsole.h extern void mconsole_halt(struct mc_request *req);
req                80 arch/um/drivers/mconsole.h extern void mconsole_reboot(struct mc_request *req);
req                81 arch/um/drivers/mconsole.h extern void mconsole_config(struct mc_request *req);
req                82 arch/um/drivers/mconsole.h extern void mconsole_remove(struct mc_request *req);
req                83 arch/um/drivers/mconsole.h extern void mconsole_sysrq(struct mc_request *req);
req                84 arch/um/drivers/mconsole.h extern void mconsole_cad(struct mc_request *req);
req                85 arch/um/drivers/mconsole.h extern void mconsole_stop(struct mc_request *req);
req                86 arch/um/drivers/mconsole.h extern void mconsole_go(struct mc_request *req);
req                87 arch/um/drivers/mconsole.h extern void mconsole_log(struct mc_request *req);
req                88 arch/um/drivers/mconsole.h extern void mconsole_proc(struct mc_request *req);
req                89 arch/um/drivers/mconsole.h extern void mconsole_stack(struct mc_request *req);
req                91 arch/um/drivers/mconsole.h extern int mconsole_get_request(int fd, struct mc_request *req);
req                60 arch/um/drivers/mconsole_kern.c 	struct mconsole_entry *req;
req                65 arch/um/drivers/mconsole_kern.c 		req = list_entry(mc_requests.next, struct mconsole_entry, list);
req                66 arch/um/drivers/mconsole_kern.c 		list_del(&req->list);
req                68 arch/um/drivers/mconsole_kern.c 		req->request.cmd->handler(&req->request);
req                69 arch/um/drivers/mconsole_kern.c 		kfree(req);
req                80 arch/um/drivers/mconsole_kern.c 	static struct mc_request req;	/* that's OK */
req                83 arch/um/drivers/mconsole_kern.c 	while (mconsole_get_request(fd, &req)) {
req                84 arch/um/drivers/mconsole_kern.c 		if (req.cmd->context == MCONSOLE_INTR)
req                85 arch/um/drivers/mconsole_kern.c 			(*req.cmd->handler)(&req);
req                89 arch/um/drivers/mconsole_kern.c 				mconsole_reply(&req, "Out of memory", 1, 0);
req                91 arch/um/drivers/mconsole_kern.c 				new->request = req;
req               102 arch/um/drivers/mconsole_kern.c void mconsole_version(struct mc_request *req)
req               109 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, version, 0, 0);
req               112 arch/um/drivers/mconsole_kern.c void mconsole_log(struct mc_request *req)
req               115 arch/um/drivers/mconsole_kern.c 	char *ptr = req->request.data;
req               119 arch/um/drivers/mconsole_kern.c 	len = req->len - (ptr - req->request.data);
req               121 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "", 0, 0);
req               124 arch/um/drivers/mconsole_kern.c void mconsole_proc(struct mc_request *req)
req               131 arch/um/drivers/mconsole_kern.c 	char *ptr = req->request.data;
req               139 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "Failed to open file", 1, 0);
req               146 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "Failed to allocate buffer", 1, 0);
req               153 arch/um/drivers/mconsole_kern.c 			mconsole_reply(req, "Read of file failed", 1, 0);
req               158 arch/um/drivers/mconsole_kern.c 			mconsole_reply(req, "\n", 0, 1);
req               162 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, buf, 0, (len > 0));
req               190 arch/um/drivers/mconsole_kern.c void mconsole_help(struct mc_request *req)
req               192 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, UML_MCONSOLE_HELPTEXT, 0, 0);
req               195 arch/um/drivers/mconsole_kern.c void mconsole_halt(struct mc_request *req)
req               197 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "", 0, 0);
req               201 arch/um/drivers/mconsole_kern.c void mconsole_reboot(struct mc_request *req)
req               203 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "", 0, 0);
req               207 arch/um/drivers/mconsole_kern.c void mconsole_cad(struct mc_request *req)
req               209 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "", 0, 0);
req               213 arch/um/drivers/mconsole_kern.c void mconsole_go(struct mc_request *req)
req               215 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "Not stopped", 1, 0);
req               218 arch/um/drivers/mconsole_kern.c void mconsole_stop(struct mc_request *req)
req               220 arch/um/drivers/mconsole_kern.c 	deactivate_fd(req->originating_fd, MCONSOLE_IRQ);
req               221 arch/um/drivers/mconsole_kern.c 	os_set_fd_block(req->originating_fd, 1);
req               222 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "stopped", 0, 0);
req               224 arch/um/drivers/mconsole_kern.c 		if (!mconsole_get_request(req->originating_fd, req))
req               226 arch/um/drivers/mconsole_kern.c 		if (req->cmd->handler == mconsole_go)
req               228 arch/um/drivers/mconsole_kern.c 		if (req->cmd->handler == mconsole_stop) {
req               229 arch/um/drivers/mconsole_kern.c 			mconsole_reply(req, "Already stopped", 1, 0);
req               232 arch/um/drivers/mconsole_kern.c 		if (req->cmd->handler == mconsole_sysrq) {
req               234 arch/um/drivers/mconsole_kern.c 			old_regs = set_irq_regs((struct pt_regs *)&req->regs);
req               235 arch/um/drivers/mconsole_kern.c 			mconsole_sysrq(req);
req               239 arch/um/drivers/mconsole_kern.c 		(*req->cmd->handler)(req);
req               241 arch/um/drivers/mconsole_kern.c 	os_set_fd_block(req->originating_fd, 0);
req               242 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "", 0, 0);
req               424 arch/um/drivers/mconsole_kern.c 				struct mc_request *req, char *name)
req               430 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "No get_config routine defined", 1, 0);
req               441 arch/um/drivers/mconsole_kern.c 			mconsole_reply(req, error, 1, 0);
req               446 arch/um/drivers/mconsole_kern.c 			mconsole_reply(req, buf, 0, 0);
req               456 arch/um/drivers/mconsole_kern.c 			mconsole_reply(req, "Failed to allocate buffer", 1, 0);
req               465 arch/um/drivers/mconsole_kern.c void mconsole_config(struct mc_request *req)
req               468 arch/um/drivers/mconsole_kern.c 	char *ptr = req->request.data, *name, *error_string = "";
req               475 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "Bad configuration option", 1, 0);
req               486 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, error_string, err, 0);
req               488 arch/um/drivers/mconsole_kern.c 	else mconsole_get_config(dev->get_config, req, name);
req               491 arch/um/drivers/mconsole_kern.c void mconsole_remove(struct mc_request *req)
req               494 arch/um/drivers/mconsole_kern.c 	char *ptr = req->request.data, *err_msg = "";
req               502 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "Bad remove option", 1, 0);
req               539 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, err_msg, err, 0);
req               544 arch/um/drivers/mconsole_kern.c 	struct mc_request *req;
req               570 arch/um/drivers/mconsole_kern.c 			mconsole_reply_len(entry->req, console_buf, n, 0, 1);
req               588 arch/um/drivers/mconsole_kern.c static void with_console(struct mc_request *req, void (*proc)(void *),
req               594 arch/um/drivers/mconsole_kern.c 	entry.req = req;
req               601 arch/um/drivers/mconsole_kern.c 	mconsole_reply_len(req, "", 0, 0, 0);
req               618 arch/um/drivers/mconsole_kern.c void mconsole_sysrq(struct mc_request *req)
req               620 arch/um/drivers/mconsole_kern.c 	char *ptr = req->request.data;
req               630 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "", 0, 0);
req               632 arch/um/drivers/mconsole_kern.c 	with_console(req, sysrq_proc, ptr);
req               635 arch/um/drivers/mconsole_kern.c void mconsole_sysrq(struct mc_request *req)
req               637 arch/um/drivers/mconsole_kern.c 	mconsole_reply(req, "Sysrq not compiled in", 1, 0);
req               654 arch/um/drivers/mconsole_kern.c void mconsole_stack(struct mc_request *req)
req               656 arch/um/drivers/mconsole_kern.c 	char *ptr = req->request.data;
req               674 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "Please specify a pid", 1, 0);
req               680 arch/um/drivers/mconsole_kern.c 		mconsole_reply(req, "Couldn't find that pid", 1, 0);
req               683 arch/um/drivers/mconsole_kern.c 	with_console(req, stack_proc, to);
req                40 arch/um/drivers/mconsole_user.c static int mconsole_reply_v0(struct mc_request *req, char *reply)
req                48 arch/um/drivers/mconsole_user.c 	msg.msg_name = &(req->origin);
req                49 arch/um/drivers/mconsole_user.c 	msg.msg_namelen = req->originlen;
req                56 arch/um/drivers/mconsole_user.c 	return sendmsg(req->originating_fd, &msg, 0);
req                59 arch/um/drivers/mconsole_user.c static struct mconsole_command *mconsole_parse(struct mc_request *req)
req                66 arch/um/drivers/mconsole_user.c 		if (!strncmp(req->request.data, cmd->command,
req                79 arch/um/drivers/mconsole_user.c int mconsole_get_request(int fd, struct mc_request *req)
req                83 arch/um/drivers/mconsole_user.c 	req->originlen = sizeof(req->origin);
req                84 arch/um/drivers/mconsole_user.c 	req->len = recvfrom(fd, &req->request, sizeof(req->request), 0,
req                85 arch/um/drivers/mconsole_user.c 			    (struct sockaddr *) req->origin, &req->originlen);
req                86 arch/um/drivers/mconsole_user.c 	if (req->len < 0)
req                89 arch/um/drivers/mconsole_user.c 	req->originating_fd = fd;
req                91 arch/um/drivers/mconsole_user.c 	if (req->request.magic != MCONSOLE_MAGIC) {
req                93 arch/um/drivers/mconsole_user.c 		len = MIN(sizeof(req->request.data) - 1,
req                94 arch/um/drivers/mconsole_user.c 			  strlen((char *) &req->request));
req                95 arch/um/drivers/mconsole_user.c 		memmove(req->request.data, &req->request, len);
req                96 arch/um/drivers/mconsole_user.c 		req->request.data[len] = '\0';
req                98 arch/um/drivers/mconsole_user.c 		req->request.magic = MCONSOLE_MAGIC;
req                99 arch/um/drivers/mconsole_user.c 		req->request.version = 0;
req               100 arch/um/drivers/mconsole_user.c 		req->request.len = len;
req               102 arch/um/drivers/mconsole_user.c 		mconsole_reply_v0(req, "ERR Version 0 mconsole clients are "
req               107 arch/um/drivers/mconsole_user.c 	if (req->request.len >= MCONSOLE_MAX_DATA) {
req               108 arch/um/drivers/mconsole_user.c 		mconsole_reply(req, "Request too large", 1, 0);
req               111 arch/um/drivers/mconsole_user.c 	if (req->request.version != MCONSOLE_VERSION) {
req               112 arch/um/drivers/mconsole_user.c 		mconsole_reply(req, "This driver only supports version "
req               116 arch/um/drivers/mconsole_user.c 	req->request.data[req->request.len] = '\0';
req               117 arch/um/drivers/mconsole_user.c 	req->cmd = mconsole_parse(req);
req               118 arch/um/drivers/mconsole_user.c 	if (req->cmd == NULL) {
req               119 arch/um/drivers/mconsole_user.c 		mconsole_reply(req, "Unknown command", 1, 0);
req               126 arch/um/drivers/mconsole_user.c int mconsole_reply_len(struct mc_request *req, const char *str, int total,
req               156 arch/um/drivers/mconsole_user.c 		n = sendto(req->originating_fd, &reply, len, 0,
req               157 arch/um/drivers/mconsole_user.c 			   (struct sockaddr *) req->origin, req->originlen);
req               165 arch/um/drivers/mconsole_user.c int mconsole_reply(struct mc_request *req, const char *str, int err, int more)
req               167 arch/um/drivers/mconsole_user.c 	return mconsole_reply_len(req, str, strlen(str), err, more);
req                51 arch/um/drivers/ubd_kern.c 	struct request *req;
req               522 arch/um/drivers/ubd_kern.c 			if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
req               523 arch/um/drivers/ubd_kern.c 				blk_queue_max_discard_sectors(io_req->req->q, 0);
req               524 arch/um/drivers/ubd_kern.c 				blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
req               525 arch/um/drivers/ubd_kern.c 				blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
req               528 arch/um/drivers/ubd_kern.c 				blk_mq_end_request(io_req->req, io_req->error);
req               530 arch/um/drivers/ubd_kern.c 				if (!blk_update_request(io_req->req, io_req->error, io_req->length))
req               531 arch/um/drivers/ubd_kern.c 					__blk_mq_end_request(io_req->req, io_req->error);
req              1291 arch/um/drivers/ubd_kern.c static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
req              1294 arch/um/drivers/ubd_kern.c 	__u64 sector = req->offset >> SECTOR_SHIFT;
req              1297 arch/um/drivers/ubd_kern.c 	if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT)
req              1300 arch/um/drivers/ubd_kern.c 	if (req_op(req->req) == REQ_OP_READ) {
req              1301 arch/um/drivers/ubd_kern.c 		for (i = 0; i < req->length >> SECTOR_SHIFT; i++) {
req              1304 arch/um/drivers/ubd_kern.c 					    &req->sector_mask);
req              1307 arch/um/drivers/ubd_kern.c 	else cowify_bitmap(req->offset, req->length, &req->sector_mask,
req              1308 arch/um/drivers/ubd_kern.c 			   &req->cow_offset, bitmap, bitmap_offset,
req              1309 arch/um/drivers/ubd_kern.c 			   req->bitmap_words, bitmap_len);
req              1312 arch/um/drivers/ubd_kern.c static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
req              1323 arch/um/drivers/ubd_kern.c 	io_req->req = req;
req              1335 arch/um/drivers/ubd_kern.c 		io_req->length = blk_rq_bytes(req);
req              1359 arch/um/drivers/ubd_kern.c static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)
req              1364 arch/um/drivers/ubd_kern.c 	u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
req              1366 arch/um/drivers/ubd_kern.c 	rq_for_each_segment(bvec, req, iter) {
req              1367 arch/um/drivers/ubd_kern.c 		ret = ubd_queue_one_vec(hctx, req, off, &bvec);
req              1379 arch/um/drivers/ubd_kern.c 	struct request *req = bd->rq;
req              1382 arch/um/drivers/ubd_kern.c 	blk_mq_start_request(req);
req              1386 arch/um/drivers/ubd_kern.c 	switch (req_op(req)) {
req              1389 arch/um/drivers/ubd_kern.c 		ret = ubd_queue_one_vec(hctx, req, 0, NULL);
req              1393 arch/um/drivers/ubd_kern.c 		ret = queue_rw_req(hctx, req);
req              1397 arch/um/drivers/ubd_kern.c 		ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL);
req              1485 arch/um/drivers/ubd_kern.c static int update_bitmap(struct io_thread_req *req)
req              1489 arch/um/drivers/ubd_kern.c 	if(req->cow_offset == -1)
req              1492 arch/um/drivers/ubd_kern.c 	n = os_pwrite_file(req->fds[1], &req->bitmap_words,
req              1493 arch/um/drivers/ubd_kern.c 			  sizeof(req->bitmap_words), req->cow_offset);
req              1494 arch/um/drivers/ubd_kern.c 	if (n != sizeof(req->bitmap_words))
req              1500 arch/um/drivers/ubd_kern.c static void do_io(struct io_thread_req *req)
req              1509 arch/um/drivers/ubd_kern.c 	if (req_op(req->req) == REQ_OP_FLUSH) {
req              1511 arch/um/drivers/ubd_kern.c 		req->error = map_error(-os_sync_file(req->fds[0]));
req              1515 arch/um/drivers/ubd_kern.c 	nsectors = req->length / req->sectorsize;
req              1518 arch/um/drivers/ubd_kern.c 		bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
req              1522 arch/um/drivers/ubd_kern.c 				    &req->sector_mask) == bit))
req              1525 arch/um/drivers/ubd_kern.c 		off = req->offset + req->offsets[bit] +
req              1526 arch/um/drivers/ubd_kern.c 			start * req->sectorsize;
req              1527 arch/um/drivers/ubd_kern.c 		len = (end - start) * req->sectorsize;
req              1528 arch/um/drivers/ubd_kern.c 		if (req->buffer != NULL)
req              1529 arch/um/drivers/ubd_kern.c 			buf = &req->buffer[start * req->sectorsize];
req              1531 arch/um/drivers/ubd_kern.c 		switch (req_op(req->req)) {
req              1537 arch/um/drivers/ubd_kern.c 				n = os_pread_file(req->fds[bit], buf, len, off);
req              1539 arch/um/drivers/ubd_kern.c 					req->error = map_error(-n);
req              1546 arch/um/drivers/ubd_kern.c 			n = os_pwrite_file(req->fds[bit], buf, len, off);
req              1548 arch/um/drivers/ubd_kern.c 				req->error = map_error(-n);
req              1554 arch/um/drivers/ubd_kern.c 			n = os_falloc_punch(req->fds[bit], off, len);
req              1556 arch/um/drivers/ubd_kern.c 				req->error = map_error(-n);
req              1562 arch/um/drivers/ubd_kern.c 			req->error = BLK_STS_NOTSUPP;
req              1569 arch/um/drivers/ubd_kern.c 	req->error = update_bitmap(req);
req                61 arch/x86/crypto/aegis128-aesni-glue.c 				  struct aead_request *req, bool atomic);
req               167 arch/x86/crypto/aegis128-aesni-glue.c static void crypto_aegis128_aesni_crypt(struct aead_request *req,
req               172 arch/x86/crypto/aegis128-aesni-glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               177 arch/x86/crypto/aegis128-aesni-glue.c 	ops->skcipher_walk_init(&walk, req, true);
req               181 arch/x86/crypto/aegis128-aesni-glue.c 	crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
req               182 arch/x86/crypto/aegis128-aesni-glue.c 	crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
req               184 arch/x86/crypto/aegis128-aesni-glue.c 	crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
req               189 arch/x86/crypto/aegis128-aesni-glue.c static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
req               197 arch/x86/crypto/aegis128-aesni-glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               200 arch/x86/crypto/aegis128-aesni-glue.c 	unsigned int cryptlen = req->cryptlen;
req               202 arch/x86/crypto/aegis128-aesni-glue.c 	crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS);
req               204 arch/x86/crypto/aegis128-aesni-glue.c 	scatterwalk_map_and_copy(tag.bytes, req->dst,
req               205 arch/x86/crypto/aegis128-aesni-glue.c 				 req->assoclen + cryptlen, authsize, 1);
req               209 arch/x86/crypto/aegis128-aesni-glue.c static int crypto_aegis128_aesni_decrypt(struct aead_request *req)
req               219 arch/x86/crypto/aegis128-aesni-glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               222 arch/x86/crypto/aegis128-aesni-glue.c 	unsigned int cryptlen = req->cryptlen - authsize;
req               224 arch/x86/crypto/aegis128-aesni-glue.c 	scatterwalk_map_and_copy(tag.bytes, req->src,
req               225 arch/x86/crypto/aegis128-aesni-glue.c 				 req->assoclen + cryptlen, authsize, 0);
req               227 arch/x86/crypto/aegis128-aesni-glue.c 	crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS);
req               380 arch/x86/crypto/aesni-intel_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               382 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               388 arch/x86/crypto/aesni-intel_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               402 arch/x86/crypto/aesni-intel_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               404 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               410 arch/x86/crypto/aesni-intel_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               424 arch/x86/crypto/aesni-intel_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               426 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               432 arch/x86/crypto/aesni-intel_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               446 arch/x86/crypto/aesni-intel_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               448 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               454 arch/x86/crypto/aesni-intel_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               503 arch/x86/crypto/aesni-intel_glue.c static int ctr_crypt(struct skcipher_request *req)
req               505 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               511 arch/x86/crypto/aesni-intel_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               604 arch/x86/crypto/aesni-intel_glue.c static int xts_encrypt(struct skcipher_request *req)
req               606 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               609 arch/x86/crypto/aesni-intel_glue.c 	return glue_xts_req_128bit(&aesni_enc_xts, req,
req               616 arch/x86/crypto/aesni-intel_glue.c static int xts_decrypt(struct skcipher_request *req)
req               618 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               621 arch/x86/crypto/aesni-intel_glue.c 	return glue_xts_req_128bit(&aesni_dec_xts, req,
req               703 arch/x86/crypto/aesni-intel_glue.c static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
req               707 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               712 arch/x86/crypto/aesni-intel_glue.c 	unsigned long left = req->cryptlen;
req               737 arch/x86/crypto/aesni-intel_glue.c 	if (req->src->length >= assoclen && req->src->length &&
req               738 arch/x86/crypto/aesni-intel_glue.c 		(!PageHighMem(sg_page(req->src)) ||
req               739 arch/x86/crypto/aesni-intel_glue.c 			req->src->offset + req->src->length <= PAGE_SIZE)) {
req               740 arch/x86/crypto/aesni-intel_glue.c 		scatterwalk_start(&assoc_sg_walk, req->src);
req               749 arch/x86/crypto/aesni-intel_glue.c 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
req               753 arch/x86/crypto/aesni-intel_glue.c 		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
req               755 arch/x86/crypto/aesni-intel_glue.c 		if (req->src != req->dst) {
req               756 arch/x86/crypto/aesni-intel_glue.c 			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
req               757 arch/x86/crypto/aesni-intel_glue.c 						  req->assoclen);
req               765 arch/x86/crypto/aesni-intel_glue.c 	if (req->src != req->dst) {
req               819 arch/x86/crypto/aesni-intel_glue.c 		scatterwalk_map_and_copy(authTagMsg, req->src,
req               820 arch/x86/crypto/aesni-intel_glue.c 					 req->assoclen + req->cryptlen -
req               830 arch/x86/crypto/aesni-intel_glue.c 	scatterwalk_map_and_copy(authTag, req->dst,
req               831 arch/x86/crypto/aesni-intel_glue.c 				 req->assoclen + req->cryptlen,
req               837 arch/x86/crypto/aesni-intel_glue.c static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
req               840 arch/x86/crypto/aesni-intel_glue.c 	return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
req               844 arch/x86/crypto/aesni-intel_glue.c static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
req               847 arch/x86/crypto/aesni-intel_glue.c 	return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
req               851 arch/x86/crypto/aesni-intel_glue.c static int helper_rfc4106_encrypt(struct aead_request *req)
req               853 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               863 arch/x86/crypto/aesni-intel_glue.c 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
req               870 arch/x86/crypto/aesni-intel_glue.c 		*(iv+4+i) = req->iv[i];
req               873 arch/x86/crypto/aesni-intel_glue.c 	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
req               877 arch/x86/crypto/aesni-intel_glue.c static int helper_rfc4106_decrypt(struct aead_request *req)
req               880 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               886 arch/x86/crypto/aesni-intel_glue.c 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
req               897 arch/x86/crypto/aesni-intel_glue.c 		*(iv+4+i) = req->iv[i];
req               900 arch/x86/crypto/aesni-intel_glue.c 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
req              1008 arch/x86/crypto/aesni-intel_glue.c static int generic_gcmaes_encrypt(struct aead_request *req)
req              1010 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1016 arch/x86/crypto/aesni-intel_glue.c 	memcpy(iv, req->iv, 12);
req              1019 arch/x86/crypto/aesni-intel_glue.c 	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
req              1023 arch/x86/crypto/aesni-intel_glue.c static int generic_gcmaes_decrypt(struct aead_request *req)
req              1026 arch/x86/crypto/aesni-intel_glue.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1031 arch/x86/crypto/aesni-intel_glue.c 	memcpy(iv, req->iv, 12);
req              1034 arch/x86/crypto/aesni-intel_glue.c 	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
req                71 arch/x86/crypto/blowfish_glue.c static int ecb_crypt(struct skcipher_request *req,
req                76 arch/x86/crypto/blowfish_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                82 arch/x86/crypto/blowfish_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               118 arch/x86/crypto/blowfish_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               120 arch/x86/crypto/blowfish_glue.c 	return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
req               123 arch/x86/crypto/blowfish_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               125 arch/x86/crypto/blowfish_glue.c 	return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
req               151 arch/x86/crypto/blowfish_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               153 arch/x86/crypto/blowfish_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               159 arch/x86/crypto/blowfish_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               232 arch/x86/crypto/blowfish_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               234 arch/x86/crypto/blowfish_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               240 arch/x86/crypto/blowfish_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               318 arch/x86/crypto/blowfish_glue.c static int ctr_crypt(struct skcipher_request *req)
req               320 arch/x86/crypto/blowfish_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               326 arch/x86/crypto/blowfish_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               152 arch/x86/crypto/camellia_aesni_avx2_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               154 arch/x86/crypto/camellia_aesni_avx2_glue.c 	return glue_ecb_req_128bit(&camellia_enc, req);
req               157 arch/x86/crypto/camellia_aesni_avx2_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               159 arch/x86/crypto/camellia_aesni_avx2_glue.c 	return glue_ecb_req_128bit(&camellia_dec, req);
req               162 arch/x86/crypto/camellia_aesni_avx2_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               165 arch/x86/crypto/camellia_aesni_avx2_glue.c 					   req);
req               168 arch/x86/crypto/camellia_aesni_avx2_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               170 arch/x86/crypto/camellia_aesni_avx2_glue.c 	return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
req               173 arch/x86/crypto/camellia_aesni_avx2_glue.c static int ctr_crypt(struct skcipher_request *req)
req               175 arch/x86/crypto/camellia_aesni_avx2_glue.c 	return glue_ctr_req_128bit(&camellia_ctr, req);
req               178 arch/x86/crypto/camellia_aesni_avx2_glue.c static int xts_encrypt(struct skcipher_request *req)
req               180 arch/x86/crypto/camellia_aesni_avx2_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               183 arch/x86/crypto/camellia_aesni_avx2_glue.c 	return glue_xts_req_128bit(&camellia_enc_xts, req,
req               188 arch/x86/crypto/camellia_aesni_avx2_glue.c static int xts_decrypt(struct skcipher_request *req)
req               190 arch/x86/crypto/camellia_aesni_avx2_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               193 arch/x86/crypto/camellia_aesni_avx2_glue.c 	return glue_xts_req_128bit(&camellia_dec_xts, req,
req               156 arch/x86/crypto/camellia_aesni_avx_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               158 arch/x86/crypto/camellia_aesni_avx_glue.c 	return glue_ecb_req_128bit(&camellia_enc, req);
req               161 arch/x86/crypto/camellia_aesni_avx_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               163 arch/x86/crypto/camellia_aesni_avx_glue.c 	return glue_ecb_req_128bit(&camellia_dec, req);
req               166 arch/x86/crypto/camellia_aesni_avx_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               169 arch/x86/crypto/camellia_aesni_avx_glue.c 					   req);
req               172 arch/x86/crypto/camellia_aesni_avx_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               174 arch/x86/crypto/camellia_aesni_avx_glue.c 	return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
req               177 arch/x86/crypto/camellia_aesni_avx_glue.c static int ctr_crypt(struct skcipher_request *req)
req               179 arch/x86/crypto/camellia_aesni_avx_glue.c 	return glue_ctr_req_128bit(&camellia_ctr, req);
req               204 arch/x86/crypto/camellia_aesni_avx_glue.c static int xts_encrypt(struct skcipher_request *req)
req               206 arch/x86/crypto/camellia_aesni_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               209 arch/x86/crypto/camellia_aesni_avx_glue.c 	return glue_xts_req_128bit(&camellia_enc_xts, req,
req               214 arch/x86/crypto/camellia_aesni_avx_glue.c static int xts_decrypt(struct skcipher_request *req)
req               216 arch/x86/crypto/camellia_aesni_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               219 arch/x86/crypto/camellia_aesni_avx_glue.c 	return glue_xts_req_128bit(&camellia_dec_xts, req,
req              1364 arch/x86/crypto/camellia_glue.c static int ecb_encrypt(struct skcipher_request *req)
req              1366 arch/x86/crypto/camellia_glue.c 	return glue_ecb_req_128bit(&camellia_enc, req);
req              1369 arch/x86/crypto/camellia_glue.c static int ecb_decrypt(struct skcipher_request *req)
req              1371 arch/x86/crypto/camellia_glue.c 	return glue_ecb_req_128bit(&camellia_dec, req);
req              1374 arch/x86/crypto/camellia_glue.c static int cbc_encrypt(struct skcipher_request *req)
req              1377 arch/x86/crypto/camellia_glue.c 					   req);
req              1380 arch/x86/crypto/camellia_glue.c static int cbc_decrypt(struct skcipher_request *req)
req              1382 arch/x86/crypto/camellia_glue.c 	return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
req              1385 arch/x86/crypto/camellia_glue.c static int ctr_crypt(struct skcipher_request *req)
req              1387 arch/x86/crypto/camellia_glue.c 	return glue_ctr_req_128bit(&camellia_ctr, req);
req                47 arch/x86/crypto/cast5_avx_glue.c static int ecb_crypt(struct skcipher_request *req, bool enc)
req                50 arch/x86/crypto/cast5_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                58 arch/x86/crypto/cast5_avx_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               100 arch/x86/crypto/cast5_avx_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               102 arch/x86/crypto/cast5_avx_glue.c 	return ecb_crypt(req, true);
req               105 arch/x86/crypto/cast5_avx_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               107 arch/x86/crypto/cast5_avx_glue.c 	return ecb_crypt(req, false);
req               110 arch/x86/crypto/cast5_avx_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               113 arch/x86/crypto/cast5_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               119 arch/x86/crypto/cast5_avx_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               196 arch/x86/crypto/cast5_avx_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               198 arch/x86/crypto/cast5_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               205 arch/x86/crypto/cast5_avx_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               276 arch/x86/crypto/cast5_avx_glue.c static int ctr_crypt(struct skcipher_request *req)
req               278 arch/x86/crypto/cast5_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               285 arch/x86/crypto/cast5_avx_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               145 arch/x86/crypto/cast6_avx_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               147 arch/x86/crypto/cast6_avx_glue.c 	return glue_ecb_req_128bit(&cast6_enc, req);
req               150 arch/x86/crypto/cast6_avx_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               152 arch/x86/crypto/cast6_avx_glue.c 	return glue_ecb_req_128bit(&cast6_dec, req);
req               155 arch/x86/crypto/cast6_avx_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               158 arch/x86/crypto/cast6_avx_glue.c 					   req);
req               161 arch/x86/crypto/cast6_avx_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               163 arch/x86/crypto/cast6_avx_glue.c 	return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
req               166 arch/x86/crypto/cast6_avx_glue.c static int ctr_crypt(struct skcipher_request *req)
req               168 arch/x86/crypto/cast6_avx_glue.c 	return glue_ctr_req_128bit(&cast6_ctr, req);
req               197 arch/x86/crypto/cast6_avx_glue.c static int xts_encrypt(struct skcipher_request *req)
req               199 arch/x86/crypto/cast6_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               202 arch/x86/crypto/cast6_avx_glue.c 	return glue_xts_req_128bit(&cast6_enc_xts, req,
req               207 arch/x86/crypto/cast6_avx_glue.c static int xts_decrypt(struct skcipher_request *req)
req               209 arch/x86/crypto/cast6_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               212 arch/x86/crypto/cast6_avx_glue.c 	return glue_xts_req_128bit(&cast6_dec_xts, req,
req               162 arch/x86/crypto/chacha_glue.c static int chacha_simd(struct skcipher_request *req)
req               164 arch/x86/crypto/chacha_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               169 arch/x86/crypto/chacha_glue.c 	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
req               170 arch/x86/crypto/chacha_glue.c 		return crypto_chacha_crypt(req);
req               172 arch/x86/crypto/chacha_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               177 arch/x86/crypto/chacha_glue.c 	err = chacha_simd_stream_xor(&walk, ctx, req->iv);
req               182 arch/x86/crypto/chacha_glue.c static int xchacha_simd(struct skcipher_request *req)
req               184 arch/x86/crypto/chacha_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               192 arch/x86/crypto/chacha_glue.c 	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
req               193 arch/x86/crypto/chacha_glue.c 		return crypto_xchacha_crypt(req);
req               195 arch/x86/crypto/chacha_glue.c 	err = skcipher_walk_virt(&walk, req, true);
req               201 arch/x86/crypto/chacha_glue.c 	crypto_chacha_init(state, ctx, req->iv);
req               208 arch/x86/crypto/chacha_glue.c 	memcpy(&real_iv[0], req->iv + 24, 8);
req               209 arch/x86/crypto/chacha_glue.c 	memcpy(&real_iv[8], req->iv + 16, 8);
req                76 arch/x86/crypto/des3_ede_glue.c static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
req                83 arch/x86/crypto/des3_ede_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               120 arch/x86/crypto/des3_ede_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               122 arch/x86/crypto/des3_ede_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               125 arch/x86/crypto/des3_ede_glue.c 	return ecb_crypt(req, ctx->enc.expkey);
req               128 arch/x86/crypto/des3_ede_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               130 arch/x86/crypto/des3_ede_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               133 arch/x86/crypto/des3_ede_glue.c 	return ecb_crypt(req, ctx->dec.expkey);
req               159 arch/x86/crypto/des3_ede_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               161 arch/x86/crypto/des3_ede_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               167 arch/x86/crypto/des3_ede_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               238 arch/x86/crypto/des3_ede_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               240 arch/x86/crypto/des3_ede_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               246 arch/x86/crypto/des3_ede_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               321 arch/x86/crypto/des3_ede_glue.c static int ctr_crypt(struct skcipher_request *req)
req               323 arch/x86/crypto/des3_ede_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               329 arch/x86/crypto/des3_ede_glue.c 	err = skcipher_walk_virt(&walk, req, false);
req               162 arch/x86/crypto/ghash-clmulni-intel_glue.c static int ghash_async_init(struct ahash_request *req)
req               164 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               166 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               175 arch/x86/crypto/ghash-clmulni-intel_glue.c static int ghash_async_update(struct ahash_request *req)
req               177 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               178 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               184 arch/x86/crypto/ghash-clmulni-intel_glue.c 		memcpy(cryptd_req, req, sizeof(*req));
req               189 arch/x86/crypto/ghash-clmulni-intel_glue.c 		return shash_ahash_update(req, desc);
req               193 arch/x86/crypto/ghash-clmulni-intel_glue.c static int ghash_async_final(struct ahash_request *req)
req               195 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               196 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               202 arch/x86/crypto/ghash-clmulni-intel_glue.c 		memcpy(cryptd_req, req, sizeof(*req));
req               207 arch/x86/crypto/ghash-clmulni-intel_glue.c 		return crypto_shash_final(desc, req->result);
req               211 arch/x86/crypto/ghash-clmulni-intel_glue.c static int ghash_async_import(struct ahash_request *req, const void *in)
req               213 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               217 arch/x86/crypto/ghash-clmulni-intel_glue.c 	ghash_async_init(req);
req               223 arch/x86/crypto/ghash-clmulni-intel_glue.c static int ghash_async_export(struct ahash_request *req, void *out)
req               225 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               234 arch/x86/crypto/ghash-clmulni-intel_glue.c static int ghash_async_digest(struct ahash_request *req)
req               236 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               238 arch/x86/crypto/ghash-clmulni-intel_glue.c 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
req               243 arch/x86/crypto/ghash-clmulni-intel_glue.c 		memcpy(cryptd_req, req, sizeof(*req));
req               251 arch/x86/crypto/ghash-clmulni-intel_glue.c 		return shash_ahash_digest(req, desc);
req                22 arch/x86/crypto/glue_helper.c 			struct skcipher_request *req)
req                24 arch/x86/crypto/glue_helper.c 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req                31 arch/x86/crypto/glue_helper.c 	err = skcipher_walk_virt(&walk, req, false);
req                67 arch/x86/crypto/glue_helper.c 				struct skcipher_request *req)
req                69 arch/x86/crypto/glue_helper.c 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req                75 arch/x86/crypto/glue_helper.c 	err = skcipher_walk_virt(&walk, req, false);
req                99 arch/x86/crypto/glue_helper.c 				struct skcipher_request *req)
req               101 arch/x86/crypto/glue_helper.c 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req               108 arch/x86/crypto/glue_helper.c 	err = skcipher_walk_virt(&walk, req, false);
req               159 arch/x86/crypto/glue_helper.c 			struct skcipher_request *req)
req               161 arch/x86/crypto/glue_helper.c 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req               168 arch/x86/crypto/glue_helper.c 	err = skcipher_walk_virt(&walk, req, false);
req               261 arch/x86/crypto/glue_helper.c 			struct skcipher_request *req,
req               265 arch/x86/crypto/glue_helper.c 	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
req               273 arch/x86/crypto/glue_helper.c 	if (req->cryptlen < XTS_BLOCK_SIZE)
req               277 arch/x86/crypto/glue_helper.c 		struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               279 arch/x86/crypto/glue_helper.c 		tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE;
req               285 arch/x86/crypto/glue_helper.c 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
req               286 arch/x86/crypto/glue_helper.c 					   req->cryptlen - tail, req->iv);
req               287 arch/x86/crypto/glue_helper.c 		req = &subreq;
req               290 arch/x86/crypto/glue_helper.c 	err = skcipher_walk_virt(&walk, req, false);
req               311 arch/x86/crypto/glue_helper.c 		u8 *next_tweak, *final_tweak = req->iv;
req               316 arch/x86/crypto/glue_helper.c 		dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen);
req               317 arch/x86/crypto/glue_helper.c 		if (req->dst != req->src)
req               318 arch/x86/crypto/glue_helper.c 			dst = scatterwalk_ffwd(d, req->dst, req->cryptlen);
req               321 arch/x86/crypto/glue_helper.c 			next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE);
req               324 arch/x86/crypto/glue_helper.c 			next_tweak = req->iv;
req               330 arch/x86/crypto/glue_helper.c 		err = skcipher_walk_virt(&walk, req, false) ?:
req               345 arch/x86/crypto/glue_helper.c 		err = skcipher_walk_virt(&walk, req, false) ?:
req               137 arch/x86/crypto/serpent_avx2_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               139 arch/x86/crypto/serpent_avx2_glue.c 	return glue_ecb_req_128bit(&serpent_enc, req);
req               142 arch/x86/crypto/serpent_avx2_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               144 arch/x86/crypto/serpent_avx2_glue.c 	return glue_ecb_req_128bit(&serpent_dec, req);
req               147 arch/x86/crypto/serpent_avx2_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               150 arch/x86/crypto/serpent_avx2_glue.c 					   req);
req               153 arch/x86/crypto/serpent_avx2_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               155 arch/x86/crypto/serpent_avx2_glue.c 	return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
req               158 arch/x86/crypto/serpent_avx2_glue.c static int ctr_crypt(struct skcipher_request *req)
req               160 arch/x86/crypto/serpent_avx2_glue.c 	return glue_ctr_req_128bit(&serpent_ctr, req);
req               163 arch/x86/crypto/serpent_avx2_glue.c static int xts_encrypt(struct skcipher_request *req)
req               165 arch/x86/crypto/serpent_avx2_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               168 arch/x86/crypto/serpent_avx2_glue.c 	return glue_xts_req_128bit(&serpent_enc_xts, req,
req               173 arch/x86/crypto/serpent_avx2_glue.c static int xts_decrypt(struct skcipher_request *req)
req               175 arch/x86/crypto/serpent_avx2_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               178 arch/x86/crypto/serpent_avx2_glue.c 	return glue_xts_req_128bit(&serpent_dec_xts, req,
req               177 arch/x86/crypto/serpent_avx_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               179 arch/x86/crypto/serpent_avx_glue.c 	return glue_ecb_req_128bit(&serpent_enc, req);
req               182 arch/x86/crypto/serpent_avx_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               184 arch/x86/crypto/serpent_avx_glue.c 	return glue_ecb_req_128bit(&serpent_dec, req);
req               187 arch/x86/crypto/serpent_avx_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               190 arch/x86/crypto/serpent_avx_glue.c 					   req);
req               193 arch/x86/crypto/serpent_avx_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               195 arch/x86/crypto/serpent_avx_glue.c 	return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
req               198 arch/x86/crypto/serpent_avx_glue.c static int ctr_crypt(struct skcipher_request *req)
req               200 arch/x86/crypto/serpent_avx_glue.c 	return glue_ctr_req_128bit(&serpent_ctr, req);
req               203 arch/x86/crypto/serpent_avx_glue.c static int xts_encrypt(struct skcipher_request *req)
req               205 arch/x86/crypto/serpent_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               208 arch/x86/crypto/serpent_avx_glue.c 	return glue_xts_req_128bit(&serpent_enc_xts, req,
req               213 arch/x86/crypto/serpent_avx_glue.c static int xts_decrypt(struct skcipher_request *req)
req               215 arch/x86/crypto/serpent_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               218 arch/x86/crypto/serpent_avx_glue.c 	return glue_xts_req_128bit(&serpent_dec_xts, req,
req               128 arch/x86/crypto/serpent_sse2_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               130 arch/x86/crypto/serpent_sse2_glue.c 	return glue_ecb_req_128bit(&serpent_enc, req);
req               133 arch/x86/crypto/serpent_sse2_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               135 arch/x86/crypto/serpent_sse2_glue.c 	return glue_ecb_req_128bit(&serpent_dec, req);
req               138 arch/x86/crypto/serpent_sse2_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               141 arch/x86/crypto/serpent_sse2_glue.c 					   req);
req               144 arch/x86/crypto/serpent_sse2_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               146 arch/x86/crypto/serpent_sse2_glue.c 	return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
req               149 arch/x86/crypto/serpent_sse2_glue.c static int ctr_crypt(struct skcipher_request *req)
req               151 arch/x86/crypto/serpent_sse2_glue.c 	return glue_ctr_req_128bit(&serpent_ctr, req);
req               180 arch/x86/crypto/twofish_avx_glue.c static int ecb_encrypt(struct skcipher_request *req)
req               182 arch/x86/crypto/twofish_avx_glue.c 	return glue_ecb_req_128bit(&twofish_enc, req);
req               185 arch/x86/crypto/twofish_avx_glue.c static int ecb_decrypt(struct skcipher_request *req)
req               187 arch/x86/crypto/twofish_avx_glue.c 	return glue_ecb_req_128bit(&twofish_dec, req);
req               190 arch/x86/crypto/twofish_avx_glue.c static int cbc_encrypt(struct skcipher_request *req)
req               193 arch/x86/crypto/twofish_avx_glue.c 					   req);
req               196 arch/x86/crypto/twofish_avx_glue.c static int cbc_decrypt(struct skcipher_request *req)
req               198 arch/x86/crypto/twofish_avx_glue.c 	return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
req               201 arch/x86/crypto/twofish_avx_glue.c static int ctr_crypt(struct skcipher_request *req)
req               203 arch/x86/crypto/twofish_avx_glue.c 	return glue_ctr_req_128bit(&twofish_ctr, req);
req               206 arch/x86/crypto/twofish_avx_glue.c static int xts_encrypt(struct skcipher_request *req)
req               208 arch/x86/crypto/twofish_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               211 arch/x86/crypto/twofish_avx_glue.c 	return glue_xts_req_128bit(&twofish_enc_xts, req,
req               216 arch/x86/crypto/twofish_avx_glue.c static int xts_decrypt(struct skcipher_request *req)
req               218 arch/x86/crypto/twofish_avx_glue.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               221 arch/x86/crypto/twofish_avx_glue.c 	return glue_xts_req_128bit(&twofish_dec_xts, req,
req               143 arch/x86/crypto/twofish_glue_3way.c static int ecb_encrypt(struct skcipher_request *req)
req               145 arch/x86/crypto/twofish_glue_3way.c 	return glue_ecb_req_128bit(&twofish_enc, req);
req               148 arch/x86/crypto/twofish_glue_3way.c static int ecb_decrypt(struct skcipher_request *req)
req               150 arch/x86/crypto/twofish_glue_3way.c 	return glue_ecb_req_128bit(&twofish_dec, req);
req               153 arch/x86/crypto/twofish_glue_3way.c static int cbc_encrypt(struct skcipher_request *req)
req               156 arch/x86/crypto/twofish_glue_3way.c 					   req);
req               159 arch/x86/crypto/twofish_glue_3way.c static int cbc_decrypt(struct skcipher_request *req)
req               161 arch/x86/crypto/twofish_glue_3way.c 	return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
req               164 arch/x86/crypto/twofish_glue_3way.c static int ctr_crypt(struct skcipher_request *req)
req               166 arch/x86/crypto/twofish_glue_3way.c 	return glue_ctr_req_128bit(&twofish_ctr, req);
req               189 arch/x86/entry/calling.h .macro SET_NOFLUSH_BIT	reg:req
req               193 arch/x86/entry/calling.h .macro ADJUST_KERNEL_CR3 reg:req
req               199 arch/x86/entry/calling.h .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
req               210 arch/x86/entry/calling.h .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
req               244 arch/x86/entry/calling.h .macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
req               250 arch/x86/entry/calling.h .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
req               268 arch/x86/entry/calling.h .macro RESTORE_CR3 scratch_reg:req save_reg:req
req               306 arch/x86/entry/calling.h .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
req               308 arch/x86/entry/calling.h .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
req               310 arch/x86/entry/calling.h .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
req               312 arch/x86/entry/calling.h .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
req               314 arch/x86/entry/calling.h .macro RESTORE_CR3 scratch_reg:req save_reg:req
req               103 arch/x86/include/asm/crypto/glue_helper.h 			       struct skcipher_request *req);
req               106 arch/x86/include/asm/crypto/glue_helper.h 				       struct skcipher_request *req);
req               109 arch/x86/include/asm/crypto/glue_helper.h 				       struct skcipher_request *req);
req               112 arch/x86/include/asm/crypto/glue_helper.h 			       struct skcipher_request *req);
req               115 arch/x86/include/asm/crypto/glue_helper.h 			       struct skcipher_request *req,
req                85 arch/x86/include/asm/nospec-branch.h .macro RETPOLINE_JMP reg:req
req               100 arch/x86/include/asm/nospec-branch.h .macro RETPOLINE_CALL reg:req
req               113 arch/x86/include/asm/nospec-branch.h .macro JMP_NOSPEC reg:req
req               124 arch/x86/include/asm/nospec-branch.h .macro CALL_NOSPEC reg:req
req               139 arch/x86/include/asm/nospec-branch.h .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
req               258 arch/x86/include/asm/xen/hypercall.h HYPERVISOR_mmu_update(struct mmu_update *req, int count,
req               261 arch/x86/include/asm/xen/hypercall.h 	return _hypercall4(int, mmu_update, req, count, success_count, domid);
req               497 arch/x86/include/asm/xen/hypercall.h MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
req               501 arch/x86/include/asm/xen/hypercall.h 	mcl->args[0] = (unsigned long)req;
req               171 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct dev_pm_qos_request req;
req               179 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		dev_pm_qos_remove_request(&pm_req->req);
req               214 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					     &pm_req->req,
req              2246 block/bfq-iosched.c static int bfq_request_merge(struct request_queue *q, struct request **req,
req              2254 block/bfq-iosched.c 		*req = __rq;
req              2263 block/bfq-iosched.c static void bfq_request_merged(struct request_queue *q, struct request *req,
req              2267 block/bfq-iosched.c 	    rb_prev(&req->rb_node) &&
req              2268 block/bfq-iosched.c 	    blk_rq_pos(req) <
req              2269 block/bfq-iosched.c 	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
req              2271 block/bfq-iosched.c 		struct bfq_queue *bfqq = bfq_init_rq(req);
req              2281 block/bfq-iosched.c 		elv_rb_del(&bfqq->sort_list, req);
req              2282 block/bfq-iosched.c 		elv_rb_add(&bfqq->sort_list, req);
req              2286 block/bfq-iosched.c 		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
req               210 block/blk-core.c static void print_req_error(struct request *req, blk_status_t status,
req               222 block/blk-core.c 		req->rq_disk ? req->rq_disk->disk_name : "?",
req               223 block/blk-core.c 		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
req               224 block/blk-core.c 		req->cmd_flags & ~REQ_OP_MASK,
req               225 block/blk-core.c 		req->nr_phys_segments,
req               226 block/blk-core.c 		IOPRIO_PRIO_CLASS(req->ioprio));
req               580 block/blk-core.c 	struct request *req;
req               585 block/blk-core.c 	req = blk_mq_alloc_request(q, op, flags);
req               586 block/blk-core.c 	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
req               587 block/blk-core.c 		q->mq_ops->initialize_rq_fn(req);
req               589 block/blk-core.c 	return req;
req               593 block/blk-core.c void blk_put_request(struct request *req)
req               595 block/blk-core.c 	blk_mq_free_request(req);
req               599 block/blk-core.c bool bio_attempt_back_merge(struct request *req, struct bio *bio,
req               604 block/blk-core.c 	if (!ll_back_merge_fn(req, bio, nr_segs))
req               607 block/blk-core.c 	trace_block_bio_backmerge(req->q, req, bio);
req               608 block/blk-core.c 	rq_qos_merge(req->q, req, bio);
req               610 block/blk-core.c 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
req               611 block/blk-core.c 		blk_rq_set_mixed_merge(req);
req               613 block/blk-core.c 	req->biotail->bi_next = bio;
req               614 block/blk-core.c 	req->biotail = bio;
req               615 block/blk-core.c 	req->__data_len += bio->bi_iter.bi_size;
req               617 block/blk-core.c 	blk_account_io_start(req, false);
req               621 block/blk-core.c bool bio_attempt_front_merge(struct request *req, struct bio *bio,
req               626 block/blk-core.c 	if (!ll_front_merge_fn(req, bio, nr_segs))
req               629 block/blk-core.c 	trace_block_bio_frontmerge(req->q, req, bio);
req               630 block/blk-core.c 	rq_qos_merge(req->q, req, bio);
req               632 block/blk-core.c 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
req               633 block/blk-core.c 		blk_rq_set_mixed_merge(req);
req               635 block/blk-core.c 	bio->bi_next = req->bio;
req               636 block/blk-core.c 	req->bio = bio;
req               638 block/blk-core.c 	req->__sector = bio->bi_iter.bi_sector;
req               639 block/blk-core.c 	req->__data_len += bio->bi_iter.bi_size;
req               641 block/blk-core.c 	blk_account_io_start(req, false);
req               645 block/blk-core.c bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
req               648 block/blk-core.c 	unsigned short segments = blk_rq_nr_discard_segments(req);
req               652 block/blk-core.c 	if (blk_rq_sectors(req) + bio_sectors(bio) >
req               653 block/blk-core.c 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
req               656 block/blk-core.c 	rq_qos_merge(q, req, bio);
req               658 block/blk-core.c 	req->biotail->bi_next = bio;
req               659 block/blk-core.c 	req->biotail = bio;
req               660 block/blk-core.c 	req->__data_len += bio->bi_iter.bi_size;
req               661 block/blk-core.c 	req->nr_phys_segments = segments + 1;
req               663 block/blk-core.c 	blk_account_io_start(req, false);
req               666 block/blk-core.c 	req_set_nomerge(q, req);
req              1309 block/blk-core.c void blk_account_io_completion(struct request *req, unsigned int bytes)
req              1311 block/blk-core.c 	if (blk_do_io_stat(req)) {
req              1312 block/blk-core.c 		const int sgrp = op_stat_group(req_op(req));
req              1316 block/blk-core.c 		part = req->part;
req              1322 block/blk-core.c void blk_account_io_done(struct request *req, u64 now)
req              1329 block/blk-core.c 	if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
req              1330 block/blk-core.c 		const int sgrp = op_stat_group(req_op(req));
req              1334 block/blk-core.c 		part = req->part;
req              1338 block/blk-core.c 		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
req              1339 block/blk-core.c 		part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
req              1340 block/blk-core.c 		part_dec_in_flight(req->q, part, rq_data_dir(req));
req              1430 block/blk-core.c bool blk_update_request(struct request *req, blk_status_t error,
req              1435 block/blk-core.c 	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
req              1437 block/blk-core.c 	if (!req->bio)
req              1441 block/blk-core.c 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
req              1443 block/blk-core.c 		req->q->integrity.profile->complete_fn(req, nr_bytes);
req              1446 block/blk-core.c 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
req              1447 block/blk-core.c 		     !(req->rq_flags & RQF_QUIET)))
req              1448 block/blk-core.c 		print_req_error(req, error, __func__);
req              1450 block/blk-core.c 	blk_account_io_completion(req, nr_bytes);
req              1453 block/blk-core.c 	while (req->bio) {
req              1454 block/blk-core.c 		struct bio *bio = req->bio;
req              1458 block/blk-core.c 			req->bio = bio->bi_next;
req              1462 block/blk-core.c 		req_bio_endio(req, bio, bio_bytes, error);
req              1474 block/blk-core.c 	if (!req->bio) {
req              1480 block/blk-core.c 		req->__data_len = 0;
req              1484 block/blk-core.c 	req->__data_len -= total_bytes;
req              1487 block/blk-core.c 	if (!blk_rq_is_passthrough(req))
req              1488 block/blk-core.c 		req->__sector += total_bytes >> 9;
req              1491 block/blk-core.c 	if (req->rq_flags & RQF_MIXED_MERGE) {
req              1492 block/blk-core.c 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
req              1493 block/blk-core.c 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
req              1496 block/blk-core.c 	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
req              1501 block/blk-core.c 		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
req              1502 block/blk-core.c 			blk_dump_rq_flags(req, "request botched");
req              1503 block/blk-core.c 			req->__data_len = blk_rq_cur_bytes(req);
req              1507 block/blk-core.c 		req->nr_phys_segments = blk_recalc_rq_segments(req);
req               164 block/blk-integrity.c bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
req               167 block/blk-integrity.c 	if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
req               170 block/blk-integrity.c 	if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
req               173 block/blk-integrity.c 	if (bio_integrity(req->bio)->bip_flags !=
req               177 block/blk-integrity.c 	if (req->nr_integrity_segments + next->nr_integrity_segments >
req               181 block/blk-integrity.c 	if (integrity_req_gap_back_merge(req, next->bio))
req               188 block/blk-integrity.c bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
req               194 block/blk-integrity.c 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
req               197 block/blk-integrity.c 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
req               200 block/blk-integrity.c 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
req               207 block/blk-integrity.c 	if (req->nr_integrity_segments + nr_integrity_segs >
req               211 block/blk-integrity.c 	req->nr_integrity_segments += nr_integrity_segs;
req                51 block/blk-merge.c static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
req                53 block/blk-merge.c 	return bio_will_gap(req->q, req, req->biotail, bio);
req                56 block/blk-merge.c static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
req                58 block/blk-merge.c 	return bio_will_gap(req->q, NULL, bio, req->bio);
req               551 block/blk-merge.c static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
req               554 block/blk-merge.c 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
req               557 block/blk-merge.c 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
req               564 block/blk-merge.c 	req->nr_phys_segments += nr_phys_segs;
req               568 block/blk-merge.c 	req_set_nomerge(req->q, req);
req               572 block/blk-merge.c int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
req               574 block/blk-merge.c 	if (req_gap_back_merge(req, bio))
req               576 block/blk-merge.c 	if (blk_integrity_rq(req) &&
req               577 block/blk-merge.c 	    integrity_req_gap_back_merge(req, bio))
req               579 block/blk-merge.c 	if (blk_rq_sectors(req) + bio_sectors(bio) >
req               580 block/blk-merge.c 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
req               581 block/blk-merge.c 		req_set_nomerge(req->q, req);
req               585 block/blk-merge.c 	return ll_new_hw_segment(req, bio, nr_segs);
req               588 block/blk-merge.c int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
req               590 block/blk-merge.c 	if (req_gap_front_merge(req, bio))
req               592 block/blk-merge.c 	if (blk_integrity_rq(req) &&
req               593 block/blk-merge.c 	    integrity_req_gap_front_merge(req, bio))
req               595 block/blk-merge.c 	if (blk_rq_sectors(req) + bio_sectors(bio) >
req               596 block/blk-merge.c 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
req               597 block/blk-merge.c 		req_set_nomerge(req->q, req);
req               601 block/blk-merge.c 	return ll_new_hw_segment(req, bio, nr_segs);
req               604 block/blk-merge.c static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
req               607 block/blk-merge.c 	unsigned short segments = blk_rq_nr_discard_segments(req);
req               611 block/blk-merge.c 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
req               612 block/blk-merge.c 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
req               615 block/blk-merge.c 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
req               618 block/blk-merge.c 	req_set_nomerge(q, req);
req               622 block/blk-merge.c static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
req               627 block/blk-merge.c 	if (req_gap_back_merge(req, next->bio))
req               633 block/blk-merge.c 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
req               634 block/blk-merge.c 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
req               637 block/blk-merge.c 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
req               641 block/blk-merge.c 	if (blk_integrity_merge_rq(q, req, next) == false)
req               645 block/blk-merge.c 	req->nr_phys_segments = total_phys_segments;
req               679 block/blk-merge.c static void blk_account_io_merge(struct request *req)
req               681 block/blk-merge.c 	if (blk_do_io_stat(req)) {
req               685 block/blk-merge.c 		part = req->part;
req               687 block/blk-merge.c 		part_dec_in_flight(req->q, part, rq_data_dir(req));
req               701 block/blk-merge.c static inline bool blk_discard_mergable(struct request *req)
req               703 block/blk-merge.c 	if (req_op(req) == REQ_OP_DISCARD &&
req               704 block/blk-merge.c 	    queue_max_discard_segments(req->q) > 1)
req               709 block/blk-merge.c static enum elv_merge blk_try_req_merge(struct request *req,
req               712 block/blk-merge.c 	if (blk_discard_mergable(req))
req               714 block/blk-merge.c 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
req               725 block/blk-merge.c 				     struct request *req, struct request *next)
req               727 block/blk-merge.c 	if (!rq_mergeable(req) || !rq_mergeable(next))
req               730 block/blk-merge.c 	if (req_op(req) != req_op(next))
req               733 block/blk-merge.c 	if (rq_data_dir(req) != rq_data_dir(next)
req               734 block/blk-merge.c 	    || req->rq_disk != next->rq_disk)
req               737 block/blk-merge.c 	if (req_op(req) == REQ_OP_WRITE_SAME &&
req               738 block/blk-merge.c 	    !blk_write_same_mergeable(req->bio, next->bio))
req               745 block/blk-merge.c 	if (req->write_hint != next->write_hint)
req               748 block/blk-merge.c 	if (req->ioprio != next->ioprio)
req               759 block/blk-merge.c 	switch (blk_try_req_merge(req, next)) {
req               761 block/blk-merge.c 		if (!req_attempt_discard_merge(q, req, next))
req               765 block/blk-merge.c 		if (!ll_merge_requests_fn(q, req, next))
req               778 block/blk-merge.c 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
req               779 block/blk-merge.c 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
req               781 block/blk-merge.c 		blk_rq_set_mixed_merge(req);
req               790 block/blk-merge.c 	if (next->start_time_ns < req->start_time_ns)
req               791 block/blk-merge.c 		req->start_time_ns = next->start_time_ns;
req               793 block/blk-merge.c 	req->biotail->bi_next = next->bio;
req               794 block/blk-merge.c 	req->biotail = next->biotail;
req               796 block/blk-merge.c 	req->__data_len += blk_rq_bytes(next);
req               798 block/blk-merge.c 	if (!blk_discard_mergable(req))
req               799 block/blk-merge.c 		elv_merge_requests(q, req, next);
req               854 block/blk-mq.c static void blk_mq_rq_timed_out(struct request *req, bool reserved)
req               856 block/blk-mq.c 	req->rq_flags |= RQF_TIMED_OUT;
req               857 block/blk-mq.c 	if (req->q->mq_ops->timeout) {
req               860 block/blk-mq.c 		ret = req->q->mq_ops->timeout(req, reserved);
req               866 block/blk-mq.c 	blk_add_timer(req);
req                98 block/blk-softirq.c void __blk_complete_request(struct request *req)
req               100 block/blk-softirq.c 	struct request_queue *q = req->q;
req               101 block/blk-softirq.c 	int cpu, ccpu = req->mq_ctx->cpu;
req               131 block/blk-softirq.c 		list_add_tail(&req->ipi_list, list);
req               139 block/blk-softirq.c 		if (list->next == &req->ipi_list)
req               141 block/blk-softirq.c 	} else if (raise_blk_irq(ccpu, req))
req                81 block/blk-timeout.c void blk_abort_request(struct request *req)
req                88 block/blk-timeout.c 	WRITE_ONCE(req->deadline, jiffies);
req                89 block/blk-timeout.c 	kblockd_schedule_work(&req->q->timeout_work);
req               112 block/blk-timeout.c void blk_add_timer(struct request *req)
req               114 block/blk-timeout.c 	struct request_queue *q = req->q;
req               121 block/blk-timeout.c 	if (!req->timeout)
req               122 block/blk-timeout.c 		req->timeout = q->rq_timeout;
req               124 block/blk-timeout.c 	req->rq_flags &= ~RQF_TIMED_OUT;
req               126 block/blk-timeout.c 	expiry = jiffies + req->timeout;
req               127 block/blk-timeout.c 	WRITE_ONCE(req->deadline, expiry);
req                53 block/blk.h    is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
req                55 block/blk.h    	return hctx->fq->flush_rq == req;
req               133 block/blk.h    static inline bool integrity_req_gap_back_merge(struct request *req,
req               136 block/blk.h    	struct bio_integrity_payload *bip = bio_integrity(req->bio);
req               139 block/blk.h    	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
req               143 block/blk.h    static inline bool integrity_req_gap_front_merge(struct request *req,
req               147 block/blk.h    	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
req               149 block/blk.h    	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
req               153 block/blk.h    static inline bool integrity_req_gap_back_merge(struct request *req,
req               158 block/blk.h    static inline bool integrity_req_gap_front_merge(struct request *req,
req               177 block/blk.h    void blk_add_timer(struct request *req);
req               179 block/blk.h    bool bio_attempt_front_merge(struct request *req, struct bio *bio,
req               181 block/blk.h    bool bio_attempt_back_merge(struct request *req, struct bio *bio,
req               183 block/blk.h    bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
req               188 block/blk.h    void blk_account_io_start(struct request *req, bool new_io);
req               189 block/blk.h    void blk_account_io_completion(struct request *req, unsigned int bytes);
req               190 block/blk.h    void blk_account_io_done(struct request *req, u64 now);
req               231 block/blk.h    int ll_back_merge_fn(struct request *req, struct bio *bio,
req               233 block/blk.h    int ll_front_merge_fn(struct request *req,  struct bio *bio,
req               260 block/blk.h    static inline void req_set_nomerge(struct request_queue *q, struct request *req)
req               262 block/blk.h    	req->cmd_flags |= REQ_NOMERGE;
req               263 block/blk.h    	if (req == q->last_merge)
req               201 block/bsg-lib.c static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
req               203 block/bsg-lib.c 	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
req               205 block/bsg-lib.c 	BUG_ON(!req->nr_phys_segments);
req               210 block/bsg-lib.c 	sg_init_table(buf->sg_list, req->nr_phys_segments);
req               211 block/bsg-lib.c 	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
req               212 block/bsg-lib.c 	buf->payload_len = blk_rq_bytes(req);
req               221 block/bsg-lib.c static bool bsg_prepare_job(struct device *dev, struct request *req)
req               223 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
req               226 block/bsg-lib.c 	job->timeout = req->timeout;
req               228 block/bsg-lib.c 	if (req->bio) {
req               229 block/bsg-lib.c 		ret = bsg_map_buffer(&job->request_payload, req);
req               266 block/bsg-lib.c 	struct request *req = bd->rq;
req               272 block/bsg-lib.c 	blk_mq_start_request(req);
req               277 block/bsg-lib.c 	if (!bsg_prepare_job(dev, req))
req               280 block/bsg-lib.c 	ret = bset->job_fn(blk_mq_rq_to_pdu(req));
req               290 block/bsg-lib.c static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
req               293 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
req               302 block/bsg-lib.c static void bsg_initialize_rq(struct request *req)
req               304 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
req               313 block/bsg-lib.c static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
req               316 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
req               304 block/elevator.c enum elv_merge elv_merge(struct request_queue *q, struct request **req,
req               326 block/elevator.c 			*req = q->last_merge;
req               339 block/elevator.c 		*req = __rq;
req               344 block/elevator.c 		return e->type->ops.request_merge(q, req, bio);
req               126 block/mq-deadline.c static void dd_request_merged(struct request_queue *q, struct request *req,
req               135 block/mq-deadline.c 		elv_rb_del(deadline_rb_root(dd, req), req);
req               136 block/mq-deadline.c 		deadline_add_rq_rb(dd, req);
req               140 block/mq-deadline.c static void dd_merged_requests(struct request_queue *q, struct request *req,
req               147 block/mq-deadline.c 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
req               149 block/mq-deadline.c 				(unsigned long)req->fifo_time)) {
req               150 block/mq-deadline.c 			list_move(&req->queuelist, &next->queuelist);
req               151 block/mq-deadline.c 			req->fifo_time = next->fifo_time;
req               219 block/scsi_ioctl.c 	struct scsi_request *req = scsi_req(rq);
req               221 block/scsi_ioctl.c 	if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
req               223 block/scsi_ioctl.c 	if (blk_verify_command(req->cmd, mode))
req               229 block/scsi_ioctl.c 	req->cmd_len = hdr->cmd_len;
req               245 block/scsi_ioctl.c 	struct scsi_request *req = scsi_req(rq);
req               251 block/scsi_ioctl.c 	hdr->status = req->result & 0xff;
req               252 block/scsi_ioctl.c 	hdr->masked_status = status_byte(req->result);
req               253 block/scsi_ioctl.c 	hdr->msg_status = msg_byte(req->result);
req               254 block/scsi_ioctl.c 	hdr->host_status = host_byte(req->result);
req               255 block/scsi_ioctl.c 	hdr->driver_status = driver_byte(req->result);
req               259 block/scsi_ioctl.c 	hdr->resid = req->resid_len;
req               262 block/scsi_ioctl.c 	if (req->sense_len && hdr->sbp) {
req               263 block/scsi_ioctl.c 		int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
req               265 block/scsi_ioctl.c 		if (!copy_to_user(hdr->sbp, req->sense, len))
req               286 block/scsi_ioctl.c 	struct scsi_request *req;
req               313 block/scsi_ioctl.c 	req = scsi_req(rq);
req               316 block/scsi_ioctl.c 		req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
req               317 block/scsi_ioctl.c 		if (!req->cmd)
req               349 block/scsi_ioctl.c 	req->retries = 0;
req               364 block/scsi_ioctl.c 	scsi_req_free_cmd(req);
req               409 block/scsi_ioctl.c 	struct scsi_request *req;
req               442 block/scsi_ioctl.c 	req = scsi_req(rq);
req               450 block/scsi_ioctl.c 	req->cmd_len = cmdlen;
req               451 block/scsi_ioctl.c 	if (copy_from_user(req->cmd, sic->data, cmdlen))
req               457 block/scsi_ioctl.c 	err = blk_verify_command(req->cmd, mode);
req               462 block/scsi_ioctl.c 	req->retries = 5;
req               468 block/scsi_ioctl.c 		req->retries = 1;
req               481 block/scsi_ioctl.c 		req->retries = 1;
req               495 block/scsi_ioctl.c 	err = req->result & 0xff;	/* only 8 bit SCSI status */
req               497 block/scsi_ioctl.c 		if (req->sense_len && req->sense) {
req               498 block/scsi_ioctl.c 			bytes = (OMAX_SB_LEN > req->sense_len) ?
req               499 block/scsi_ioctl.c 				req->sense_len : OMAX_SB_LEN;
req               500 block/scsi_ioctl.c 			if (copy_to_user(sic->data, req->sense, bytes))
req               706 block/scsi_ioctl.c void scsi_req_init(struct scsi_request *req)
req               708 block/scsi_ioctl.c 	memset(req->__cmd, 0, sizeof(req->__cmd));
req               709 block/scsi_ioctl.c 	req->cmd = req->__cmd;
req               710 block/scsi_ioctl.c 	req->cmd_len = BLK_MAX_CDB;
req               711 block/scsi_ioctl.c 	req->sense_len = 0;
req                92 crypto/ablkcipher.c static int ablkcipher_walk_next(struct ablkcipher_request *req,
req                95 crypto/ablkcipher.c int ablkcipher_walk_done(struct ablkcipher_request *req,
req                98 crypto/ablkcipher.c 	struct crypto_tfm *tfm = req->base.tfm;
req               124 crypto/ablkcipher.c 		crypto_yield(req->base.flags);
req               125 crypto/ablkcipher.c 		return ablkcipher_walk_next(req, walk);
req               130 crypto/ablkcipher.c 	if (walk->iv != req->info)
req               131 crypto/ablkcipher.c 		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
req               137 crypto/ablkcipher.c static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
req               154 crypto/ablkcipher.c 		return ablkcipher_walk_done(req, walk, -ENOMEM);
req               202 crypto/ablkcipher.c static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
req               213 crypto/ablkcipher.c static int ablkcipher_walk_next(struct ablkcipher_request *req,
req               216 crypto/ablkcipher.c 	struct crypto_tfm *tfm = req->base.tfm;
req               224 crypto/ablkcipher.c 		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
req               225 crypto/ablkcipher.c 		return ablkcipher_walk_done(req, walk, -EINVAL);
req               238 crypto/ablkcipher.c 		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
req               245 crypto/ablkcipher.c 	return ablkcipher_next_fast(req, walk);
req               258 crypto/ablkcipher.c static int ablkcipher_walk_first(struct ablkcipher_request *req,
req               261 crypto/ablkcipher.c 	struct crypto_tfm *tfm = req->base.tfm;
req               268 crypto/ablkcipher.c 	walk->iv = req->info;
req               284 crypto/ablkcipher.c 	return ablkcipher_walk_next(req, walk);
req               287 crypto/ablkcipher.c int ablkcipher_walk_phys(struct ablkcipher_request *req,
req               290 crypto/ablkcipher.c 	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
req               291 crypto/ablkcipher.c 	return ablkcipher_walk_first(req, walk);
req               115 crypto/acompress.c 	struct acomp_req *req;
req               117 crypto/acompress.c 	req = __acomp_request_alloc(acomp);
req               118 crypto/acompress.c 	if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
req               119 crypto/acompress.c 		return crypto_acomp_scomp_alloc_ctx(req);
req               121 crypto/acompress.c 	return req;
req               125 crypto/acompress.c void acomp_request_free(struct acomp_req *req)
req               127 crypto/acompress.c 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
req               131 crypto/acompress.c 		crypto_acomp_scomp_free_ctx(req);
req               133 crypto/acompress.c 	if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
req               134 crypto/acompress.c 		acomp->dst_free(req->dst);
req               135 crypto/acompress.c 		req->dst = NULL;
req               138 crypto/acompress.c 	__acomp_request_free(req);
req               126 crypto/adiantum.c 		struct skcipher_request req; /* must be last */
req               151 crypto/adiantum.c 	skcipher_request_set_tfm(&data->req, tctx->streamcipher);
req               152 crypto/adiantum.c 	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
req               155 crypto/adiantum.c 	skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
req               157 crypto/adiantum.c 	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
req               227 crypto/adiantum.c static void adiantum_hash_header(struct skcipher_request *req)
req               229 crypto/adiantum.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               231 crypto/adiantum.c 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
req               232 crypto/adiantum.c 	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
req               248 crypto/adiantum.c 	poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
req               255 crypto/adiantum.c static int adiantum_hash_message(struct skcipher_request *req,
req               258 crypto/adiantum.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               260 crypto/adiantum.c 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
req               261 crypto/adiantum.c 	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
req               290 crypto/adiantum.c static int adiantum_finish(struct skcipher_request *req)
req               292 crypto/adiantum.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               294 crypto/adiantum.c 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
req               295 crypto/adiantum.c 	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
req               309 crypto/adiantum.c 	err = adiantum_hash_message(req, req->dst, &digest);
req               314 crypto/adiantum.c 	scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
req               322 crypto/adiantum.c 	struct skcipher_request *req = areq->data;
req               325 crypto/adiantum.c 		err = adiantum_finish(req);
req               327 crypto/adiantum.c 	skcipher_request_complete(req, err);
req               330 crypto/adiantum.c static int adiantum_crypt(struct skcipher_request *req, bool enc)
req               332 crypto/adiantum.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               334 crypto/adiantum.c 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
req               335 crypto/adiantum.c 	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
req               340 crypto/adiantum.c 	if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
req               350 crypto/adiantum.c 	adiantum_hash_header(req);
req               351 crypto/adiantum.c 	err = adiantum_hash_message(req, req->src, &digest);
req               355 crypto/adiantum.c 	scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
req               382 crypto/adiantum.c 	if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
req               386 crypto/adiantum.c 	skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
req               387 crypto/adiantum.c 				   req->dst, stream_len, &rctx->rbuf);
req               389 crypto/adiantum.c 				      req->base.flags,
req               390 crypto/adiantum.c 				      adiantum_streamcipher_done, req);
req               392 crypto/adiantum.c 		adiantum_finish(req);
req               395 crypto/adiantum.c static int adiantum_encrypt(struct skcipher_request *req)
req               397 crypto/adiantum.c 	return adiantum_crypt(req, true);
req               400 crypto/adiantum.c static int adiantum_decrypt(struct skcipher_request *req)
req               402 crypto/adiantum.c 	return adiantum_crypt(req, false);
req                88 crypto/aead.c  int crypto_aead_encrypt(struct aead_request *req)
req                90 crypto/aead.c  	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req                92 crypto/aead.c  	unsigned int cryptlen = req->cryptlen;
req                99 crypto/aead.c  		ret = crypto_aead_alg(aead)->encrypt(req);
req               105 crypto/aead.c  int crypto_aead_decrypt(struct aead_request *req)
req               107 crypto/aead.c  	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               109 crypto/aead.c  	unsigned int cryptlen = req->cryptlen;
req               115 crypto/aead.c  	else if (req->cryptlen < crypto_aead_authsize(aead))
req               118 crypto/aead.c  		ret = crypto_aead_alg(aead)->decrypt(req);
req                40 crypto/aegis128-core.c 				  struct aead_request *req, bool atomic);
req               327 crypto/aegis128-core.c 					  struct aead_request *req,
req               332 crypto/aegis128-core.c 	ops->skcipher_walk_init(&walk, req, false);
req               393 crypto/aegis128-core.c static void crypto_aegis128_crypt(struct aead_request *req,
req               398 crypto/aegis128-core.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               402 crypto/aegis128-core.c 	crypto_aegis128_init(&state, &ctx->key, req->iv);
req               403 crypto/aegis128-core.c 	crypto_aegis128_process_ad(&state, req->src, req->assoclen);
req               404 crypto/aegis128-core.c 	crypto_aegis128_process_crypt(&state, req, ops);
req               405 crypto/aegis128-core.c 	crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen);
req               408 crypto/aegis128-core.c static int crypto_aegis128_encrypt(struct aead_request *req)
req               415 crypto/aegis128-core.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               418 crypto/aegis128-core.c 	unsigned int cryptlen = req->cryptlen;
req               425 crypto/aegis128-core.c 	crypto_aegis128_crypt(req, &tag, cryptlen, ops);
req               427 crypto/aegis128-core.c 	scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
req               432 crypto/aegis128-core.c static int crypto_aegis128_decrypt(struct aead_request *req)
req               440 crypto/aegis128-core.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               443 crypto/aegis128-core.c 	unsigned int cryptlen = req->cryptlen - authsize;
req               445 crypto/aegis128-core.c 	scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
req               453 crypto/aegis128-core.c 	crypto_aegis128_crypt(req, &tag, cryptlen, ops);
req               129 crypto/ahash.c int crypto_hash_walk_first(struct ahash_request *req,
req               132 crypto/ahash.c 	walk->total = req->nbytes;
req               139 crypto/ahash.c 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
req               140 crypto/ahash.c 	walk->sg = req->src;
req               141 crypto/ahash.c 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
req               147 crypto/ahash.c int crypto_ahash_walk_first(struct ahash_request *req,
req               150 crypto/ahash.c 	walk->total = req->nbytes;
req               157 crypto/ahash.c 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
req               158 crypto/ahash.c 	walk->sg = req->src;
req               159 crypto/ahash.c 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
req               230 crypto/ahash.c static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
req               232 crypto/ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               238 crypto/ahash.c 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               268 crypto/ahash.c 	priv->result = req->result;
req               269 crypto/ahash.c 	priv->complete = req->base.complete;
req               270 crypto/ahash.c 	priv->data = req->base.data;
req               271 crypto/ahash.c 	priv->flags = req->base.flags;
req               279 crypto/ahash.c 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
req               280 crypto/ahash.c 	req->base.complete = cplt;
req               281 crypto/ahash.c 	req->base.data = req;
req               282 crypto/ahash.c 	req->priv = priv;
req               287 crypto/ahash.c static void ahash_restore_req(struct ahash_request *req, int err)
req               289 crypto/ahash.c 	struct ahash_request_priv *priv = req->priv;
req               292 crypto/ahash.c 		memcpy(priv->result, req->result,
req               293 crypto/ahash.c 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
req               296 crypto/ahash.c 	req->result = priv->result;
req               298 crypto/ahash.c 	ahash_request_set_callback(req, priv->flags,
req               300 crypto/ahash.c 	req->priv = NULL;
req               306 crypto/ahash.c static void ahash_notify_einprogress(struct ahash_request *req)
req               308 crypto/ahash.c 	struct ahash_request_priv *priv = req->priv;
req               316 crypto/ahash.c static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
req               318 crypto/ahash.c 	struct ahash_request *areq = req->data;
req               341 crypto/ahash.c static int ahash_op_unaligned(struct ahash_request *req,
req               346 crypto/ahash.c 	err = ahash_save_req(req, ahash_op_unaligned_done);
req               350 crypto/ahash.c 	err = op(req);
req               354 crypto/ahash.c 	ahash_restore_req(req, err);
req               359 crypto/ahash.c static int crypto_ahash_op(struct ahash_request *req,
req               362 crypto/ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               365 crypto/ahash.c 	if ((unsigned long)req->result & alignmask)
req               366 crypto/ahash.c 		return ahash_op_unaligned(req, op);
req               368 crypto/ahash.c 	return op(req);
req               371 crypto/ahash.c int crypto_ahash_final(struct ahash_request *req)
req               373 crypto/ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               375 crypto/ahash.c 	unsigned int nbytes = req->nbytes;
req               379 crypto/ahash.c 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
req               385 crypto/ahash.c int crypto_ahash_finup(struct ahash_request *req)
req               387 crypto/ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               389 crypto/ahash.c 	unsigned int nbytes = req->nbytes;
req               393 crypto/ahash.c 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
req               399 crypto/ahash.c int crypto_ahash_digest(struct ahash_request *req)
req               401 crypto/ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               403 crypto/ahash.c 	unsigned int nbytes = req->nbytes;
req               410 crypto/ahash.c 		ret = crypto_ahash_op(req, tfm->digest);
req               416 crypto/ahash.c static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
req               418 crypto/ahash.c 	struct ahash_request *areq = req->data;
req               428 crypto/ahash.c static int ahash_def_finup_finish1(struct ahash_request *req, int err)
req               433 crypto/ahash.c 	req->base.complete = ahash_def_finup_done2;
req               435 crypto/ahash.c 	err = crypto_ahash_reqtfm(req)->final(req);
req               440 crypto/ahash.c 	ahash_restore_req(req, err);
req               444 crypto/ahash.c static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
req               446 crypto/ahash.c 	struct ahash_request *areq = req->data;
req               462 crypto/ahash.c static int ahash_def_finup(struct ahash_request *req)
req               464 crypto/ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               467 crypto/ahash.c 	err = ahash_save_req(req, ahash_def_finup_done1);
req               471 crypto/ahash.c 	err = tfm->update(req);
req               475 crypto/ahash.c 	return ahash_def_finup_finish1(req, err);
req               117 crypto/akcipher.c static int akcipher_default_op(struct akcipher_request *req)
req                29 crypto/algif_hash.c 	struct ahash_request req;
req                39 crypto/algif_hash.c 	ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
req                57 crypto/algif_hash.c 	ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
req                81 crypto/algif_hash.c 		err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
req               100 crypto/algif_hash.c 		ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
req               102 crypto/algif_hash.c 		err = crypto_wait_req(crypto_ahash_update(&ctx->req),
req               120 crypto/algif_hash.c 		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
req               121 crypto/algif_hash.c 		err = crypto_wait_req(crypto_ahash_final(&ctx->req),
req               153 crypto/algif_hash.c 	ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
req               157 crypto/algif_hash.c 			err = crypto_ahash_finup(&ctx->req);
req               159 crypto/algif_hash.c 			err = crypto_ahash_digest(&ctx->req);
req               162 crypto/algif_hash.c 			err = crypto_ahash_init(&ctx->req);
req               168 crypto/algif_hash.c 		err = crypto_ahash_update(&ctx->req);
req               189 crypto/algif_hash.c 	unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
req               204 crypto/algif_hash.c 	ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
req               207 crypto/algif_hash.c 		err = crypto_wait_req(crypto_ahash_init(&ctx->req),
req               215 crypto/algif_hash.c 		err = crypto_wait_req(crypto_ahash_final(&ctx->req),
req               236 crypto/algif_hash.c 	struct ahash_request *req = &ctx->req;
req               246 crypto/algif_hash.c 	err = more ? crypto_ahash_export(req, state) : 0;
req               264 crypto/algif_hash.c 	err = crypto_ahash_import(&ctx2->req, state);
req               444 crypto/algif_hash.c 	ahash_request_set_tfm(&ctx->req, tfm);
req               445 crypto/algif_hash.c 	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               598 crypto/api.c   void crypto_req_done(struct crypto_async_request *req, int err)
req               600 crypto/api.c   	struct crypto_wait *wait = req->data;
req                24 crypto/arc4.c  static int crypto_arc4_crypt(struct skcipher_request *req)
req                26 crypto/arc4.c  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                31 crypto/arc4.c  	err = skcipher_walk_virt(&walk, req, false);
req               465 crypto/asymmetric_keys/asym_tpm.c 	struct akcipher_request *req;
req               490 crypto/asymmetric_keys/asym_tpm.c 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
req               491 crypto/asymmetric_keys/asym_tpm.c 	if (!req)
req               496 crypto/asymmetric_keys/asym_tpm.c 	akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
req               499 crypto/asymmetric_keys/asym_tpm.c 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               503 crypto/asymmetric_keys/asym_tpm.c 	ret = crypto_akcipher_encrypt(req);
req               507 crypto/asymmetric_keys/asym_tpm.c 		ret = req->dst_len;
req               509 crypto/asymmetric_keys/asym_tpm.c 	akcipher_request_free(req);
req               754 crypto/asymmetric_keys/asym_tpm.c 	struct akcipher_request *req;
req               786 crypto/asymmetric_keys/asym_tpm.c 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
req               787 crypto/asymmetric_keys/asym_tpm.c 	if (!req)
req               793 crypto/asymmetric_keys/asym_tpm.c 	akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
req               796 crypto/asymmetric_keys/asym_tpm.c 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               799 crypto/asymmetric_keys/asym_tpm.c 	ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
req               801 crypto/asymmetric_keys/asym_tpm.c 	akcipher_request_free(req);
req                54 crypto/asymmetric_keys/asymmetric_type.c 	char *req, *p;
req                68 crypto/asymmetric_keys/asymmetric_type.c 	p = req = kmalloc(2 + 1 + len * 2 + 1, GFP_KERNEL);
req                69 crypto/asymmetric_keys/asymmetric_type.c 	if (!req)
req                83 crypto/asymmetric_keys/asymmetric_type.c 	pr_debug("Look up: \"%s\"\n", req);
req                86 crypto/asymmetric_keys/asymmetric_type.c 			     &key_type_asymmetric, req, true);
req                88 crypto/asymmetric_keys/asymmetric_type.c 		pr_debug("Request for key '%s' err %ld\n", req, PTR_ERR(ref));
req                89 crypto/asymmetric_keys/asymmetric_type.c 	kfree(req);
req               167 crypto/asymmetric_keys/public_key.c 	struct akcipher_request *req;
req               188 crypto/asymmetric_keys/public_key.c 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
req               189 crypto/asymmetric_keys/public_key.c 	if (!req)
req               212 crypto/asymmetric_keys/public_key.c 	akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
req               215 crypto/asymmetric_keys/public_key.c 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               222 crypto/asymmetric_keys/public_key.c 		ret = crypto_akcipher_encrypt(req);
req               225 crypto/asymmetric_keys/public_key.c 		ret = crypto_akcipher_decrypt(req);
req               228 crypto/asymmetric_keys/public_key.c 		ret = crypto_akcipher_sign(req);
req               236 crypto/asymmetric_keys/public_key.c 		ret = req->dst_len;
req               241 crypto/asymmetric_keys/public_key.c 	akcipher_request_free(req);
req               256 crypto/asymmetric_keys/public_key.c 	struct akcipher_request *req;
req               279 crypto/asymmetric_keys/public_key.c 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
req               280 crypto/asymmetric_keys/public_key.c 	if (!req)
req               304 crypto/asymmetric_keys/public_key.c 	akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
req               307 crypto/asymmetric_keys/public_key.c 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               310 crypto/asymmetric_keys/public_key.c 	ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
req               315 crypto/asymmetric_keys/public_key.c 	akcipher_request_free(req);
req                40 crypto/authenc.c static void authenc_request_complete(struct aead_request *req, int err)
req                43 crypto/authenc.c 		aead_request_complete(req, err);
req               124 crypto/authenc.c 	struct aead_request *req = areq->data;
req               125 crypto/authenc.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req               128 crypto/authenc.c 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
req               134 crypto/authenc.c 	scatterwalk_map_and_copy(ahreq->result, req->dst,
req               135 crypto/authenc.c 				 req->assoclen + req->cryptlen,
req               139 crypto/authenc.c 	aead_request_complete(req, err);
req               142 crypto/authenc.c static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
req               144 crypto/authenc.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req               149 crypto/authenc.c 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
req               158 crypto/authenc.c 	ahash_request_set_crypt(ahreq, req->dst, hash,
req               159 crypto/authenc.c 				req->assoclen + req->cryptlen);
req               161 crypto/authenc.c 				   authenc_geniv_ahash_done, req);
req               167 crypto/authenc.c 	scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
req               173 crypto/authenc.c static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
req               176 crypto/authenc.c 	struct aead_request *areq = req->data;
req               187 crypto/authenc.c static int crypto_authenc_copy_assoc(struct aead_request *req)
req               189 crypto/authenc.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req               194 crypto/authenc.c 	skcipher_request_set_callback(skreq, aead_request_flags(req),
req               196 crypto/authenc.c 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
req               202 crypto/authenc.c static int crypto_authenc_encrypt(struct aead_request *req)
req               204 crypto/authenc.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req               208 crypto/authenc.c 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
req               210 crypto/authenc.c 	unsigned int cryptlen = req->cryptlen;
req               216 crypto/authenc.c 	src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
req               219 crypto/authenc.c 	if (req->src != req->dst) {
req               220 crypto/authenc.c 		err = crypto_authenc_copy_assoc(req);
req               224 crypto/authenc.c 		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
req               228 crypto/authenc.c 	skcipher_request_set_callback(skreq, aead_request_flags(req),
req               229 crypto/authenc.c 				      crypto_authenc_encrypt_done, req);
req               230 crypto/authenc.c 	skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
req               236 crypto/authenc.c 	return crypto_authenc_genicv(req, aead_request_flags(req));
req               239 crypto/authenc.c static int crypto_authenc_decrypt_tail(struct aead_request *req,
req               242 crypto/authenc.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req               246 crypto/authenc.c 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
req               254 crypto/authenc.c 	scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
req               259 crypto/authenc.c 	src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
req               262 crypto/authenc.c 	if (req->src != req->dst)
req               263 crypto/authenc.c 		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
req               266 crypto/authenc.c 	skcipher_request_set_callback(skreq, aead_request_flags(req),
req               267 crypto/authenc.c 				      req->base.complete, req->base.data);
req               269 crypto/authenc.c 				   req->cryptlen - authsize, req->iv);
req               277 crypto/authenc.c 	struct aead_request *req = areq->data;
req               282 crypto/authenc.c 	err = crypto_authenc_decrypt_tail(req, 0);
req               285 crypto/authenc.c 	authenc_request_complete(req, err);
req               288 crypto/authenc.c static int crypto_authenc_decrypt(struct aead_request *req)
req               290 crypto/authenc.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req               296 crypto/authenc.c 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
req               305 crypto/authenc.c 	ahash_request_set_crypt(ahreq, req->src, hash,
req               306 crypto/authenc.c 				req->assoclen + req->cryptlen - authsize);
req               307 crypto/authenc.c 	ahash_request_set_callback(ahreq, aead_request_flags(req),
req               308 crypto/authenc.c 				   authenc_verify_ahash_done, req);
req               314 crypto/authenc.c 	return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
req                43 crypto/authencesn.c static void authenc_esn_request_complete(struct aead_request *req, int err)
req                46 crypto/authencesn.c 		aead_request_complete(req, err);
req                96 crypto/authencesn.c static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
req                99 crypto/authencesn.c 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
req               101 crypto/authencesn.c 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
req               106 crypto/authencesn.c 	unsigned int assoclen = req->assoclen;
req               107 crypto/authencesn.c 	unsigned int cryptlen = req->cryptlen;
req               108 crypto/authencesn.c 	struct scatterlist *dst = req->dst;
req               123 crypto/authencesn.c 	struct aead_request *req = areq->data;
req               125 crypto/authencesn.c 	err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
req               126 crypto/authencesn.c 	aead_request_complete(req, err);
req               129 crypto/authencesn.c static int crypto_authenc_esn_genicv(struct aead_request *req,
req               132 crypto/authencesn.c 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
req               133 crypto/authencesn.c 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
req               140 crypto/authencesn.c 	unsigned int assoclen = req->assoclen;
req               141 crypto/authencesn.c 	unsigned int cryptlen = req->cryptlen;
req               142 crypto/authencesn.c 	struct scatterlist *dst = req->dst;
req               159 crypto/authencesn.c 				   authenc_esn_geniv_ahash_done, req);
req               162 crypto/authencesn.c 	       crypto_authenc_esn_genicv_tail(req, aead_request_flags(req));
req               166 crypto/authencesn.c static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
req               169 crypto/authencesn.c 	struct aead_request *areq = req->data;
req               177 crypto/authencesn.c static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
req               179 crypto/authencesn.c 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
req               184 crypto/authencesn.c 	skcipher_request_set_callback(skreq, aead_request_flags(req),
req               186 crypto/authencesn.c 	skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
req               191 crypto/authencesn.c static int crypto_authenc_esn_encrypt(struct aead_request *req)
req               193 crypto/authencesn.c 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
req               194 crypto/authencesn.c 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
req               199 crypto/authencesn.c 	unsigned int assoclen = req->assoclen;
req               200 crypto/authencesn.c 	unsigned int cryptlen = req->cryptlen;
req               205 crypto/authencesn.c 	src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
req               208 crypto/authencesn.c 	if (req->src != req->dst) {
req               209 crypto/authencesn.c 		err = crypto_authenc_esn_copy(req, assoclen);
req               214 crypto/authencesn.c 		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
req               218 crypto/authencesn.c 	skcipher_request_set_callback(skreq, aead_request_flags(req),
req               219 crypto/authencesn.c 				      crypto_authenc_esn_encrypt_done, req);
req               220 crypto/authencesn.c 	skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
req               226 crypto/authencesn.c 	return crypto_authenc_esn_genicv(req, aead_request_flags(req));
req               229 crypto/authencesn.c static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
req               232 crypto/authencesn.c 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
req               234 crypto/authencesn.c 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
req               241 crypto/authencesn.c 	unsigned int cryptlen = req->cryptlen - authsize;
req               242 crypto/authencesn.c 	unsigned int assoclen = req->assoclen;
req               243 crypto/authencesn.c 	struct scatterlist *dst = req->dst;
req               265 crypto/authencesn.c 				      req->base.complete, req->base.data);
req               266 crypto/authencesn.c 	skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
req               274 crypto/authencesn.c 	struct aead_request *req = areq->data;
req               276 crypto/authencesn.c 	err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
req               277 crypto/authencesn.c 	authenc_esn_request_complete(req, err);
req               280 crypto/authencesn.c static int crypto_authenc_esn_decrypt(struct aead_request *req)
req               282 crypto/authencesn.c 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
req               283 crypto/authencesn.c 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
req               290 crypto/authencesn.c 	unsigned int assoclen = req->assoclen;
req               291 crypto/authencesn.c 	unsigned int cryptlen = req->cryptlen;
req               293 crypto/authencesn.c 	struct scatterlist *dst = req->dst;
req               299 crypto/authencesn.c 	if (req->src != dst) {
req               300 crypto/authencesn.c 		err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
req               305 crypto/authencesn.c 	scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
req               321 crypto/authencesn.c 	ahash_request_set_callback(ahreq, aead_request_flags(req),
req               322 crypto/authencesn.c 				   authenc_esn_verify_ahash_done, req);
req               329 crypto/authencesn.c 	return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req));
req               412 crypto/blkcipher.c static int async_encrypt(struct ablkcipher_request *req)
req               414 crypto/blkcipher.c 	struct crypto_tfm *tfm = req->base.tfm;
req               418 crypto/blkcipher.c 		.info = req->info,
req               419 crypto/blkcipher.c 		.flags = req->base.flags,
req               423 crypto/blkcipher.c 	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
req               426 crypto/blkcipher.c static int async_decrypt(struct ablkcipher_request *req)
req               428 crypto/blkcipher.c 	struct crypto_tfm *tfm = req->base.tfm;
req               432 crypto/blkcipher.c 		.info = req->info,
req               433 crypto/blkcipher.c 		.flags = req->base.flags,
req               436 crypto/blkcipher.c 	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
req                23 crypto/cbc.c   static int crypto_cbc_encrypt(struct skcipher_request *req)
req                25 crypto/cbc.c   	return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
req                34 crypto/cbc.c   static int crypto_cbc_decrypt(struct skcipher_request *req)
req                36 crypto/cbc.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                40 crypto/cbc.c   	err = skcipher_walk_virt(&walk, req, false);
req                63 crypto/ccm.c   	struct aead_request *req)
req                65 crypto/ccm.c   	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
req                67 crypto/ccm.c   	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
req               135 crypto/ccm.c   static int format_input(u8 *info, struct aead_request *req,
req               138 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               139 crypto/ccm.c   	unsigned int lp = req->iv[0];
req               145 crypto/ccm.c   	memcpy(info, req->iv, 16);
req               151 crypto/ccm.c   	if (req->assoclen)
req               176 crypto/ccm.c   static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
req               179 crypto/ccm.c   	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
req               180 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               183 crypto/ccm.c   	unsigned int assoclen = req->assoclen;
req               190 crypto/ccm.c   	err = format_input(odata, req, cryptlen);
req               201 crypto/ccm.c   		sg_chain(sg, 3, req->src);
req               204 crypto/ccm.c   		sg_chain(sg, 2, req->src);
req               237 crypto/ccm.c   	struct aead_request *req = areq->data;
req               238 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               239 crypto/ccm.c   	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
req               243 crypto/ccm.c   		scatterwalk_map_and_copy(odata, req->dst,
req               244 crypto/ccm.c   					 req->assoclen + req->cryptlen,
req               246 crypto/ccm.c   	aead_request_complete(req, err);
req               258 crypto/ccm.c   static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag)
req               260 crypto/ccm.c   	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
req               262 crypto/ccm.c   	u8 *iv = req->iv;
req               269 crypto/ccm.c   	pctx->flags = aead_request_flags(req);
req               278 crypto/ccm.c   	sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
req               282 crypto/ccm.c   	if (req->src != req->dst) {
req               285 crypto/ccm.c   		sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
req               293 crypto/ccm.c   static int crypto_ccm_encrypt(struct aead_request *req)
req               295 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               297 crypto/ccm.c   	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
req               300 crypto/ccm.c   	unsigned int cryptlen = req->cryptlen;
req               302 crypto/ccm.c   	u8 *iv = req->iv;
req               305 crypto/ccm.c   	err = crypto_ccm_init_crypt(req, odata);
req               309 crypto/ccm.c   	err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
req               314 crypto/ccm.c   	if (req->src != req->dst)
req               319 crypto/ccm.c   				      crypto_ccm_encrypt_done, req);
req               334 crypto/ccm.c   	struct aead_request *req = areq->data;
req               335 crypto/ccm.c   	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
req               336 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               338 crypto/ccm.c   	unsigned int cryptlen = req->cryptlen - authsize;
req               343 crypto/ccm.c   	dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
req               346 crypto/ccm.c   		err = crypto_ccm_auth(req, dst, cryptlen);
req               350 crypto/ccm.c   	aead_request_complete(req, err);
req               353 crypto/ccm.c   static int crypto_ccm_decrypt(struct aead_request *req)
req               355 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               357 crypto/ccm.c   	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
req               361 crypto/ccm.c   	unsigned int cryptlen = req->cryptlen;
req               369 crypto/ccm.c   	err = crypto_ccm_init_crypt(req, authtag);
req               377 crypto/ccm.c   	if (req->src != req->dst)
req               380 crypto/ccm.c   	memcpy(iv, req->iv, 16);
req               384 crypto/ccm.c   				      crypto_ccm_decrypt_done, req);
req               390 crypto/ccm.c   	err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
req               642 crypto/ccm.c   static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
req               644 crypto/ccm.c   	struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
req               646 crypto/ccm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               657 crypto/ccm.c   	memcpy(iv + 4, req->iv, 8);
req               659 crypto/ccm.c   	scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);
req               662 crypto/ccm.c   	sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
req               663 crypto/ccm.c   	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
req               667 crypto/ccm.c   	if (req->src != req->dst) {
req               669 crypto/ccm.c   		sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
req               670 crypto/ccm.c   		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
req               676 crypto/ccm.c   	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req               677 crypto/ccm.c   				  req->base.data);
req               679 crypto/ccm.c   			       req->src == req->dst ? rctx->src : rctx->dst,
req               680 crypto/ccm.c   			       req->cryptlen, iv);
req               681 crypto/ccm.c   	aead_request_set_ad(subreq, req->assoclen - 8);
req               686 crypto/ccm.c   static int crypto_rfc4309_encrypt(struct aead_request *req)
req               688 crypto/ccm.c   	if (req->assoclen != 16 && req->assoclen != 20)
req               691 crypto/ccm.c   	req = crypto_rfc4309_crypt(req);
req               693 crypto/ccm.c   	return crypto_aead_encrypt(req);
req               696 crypto/ccm.c   static int crypto_rfc4309_decrypt(struct aead_request *req)
req               698 crypto/ccm.c   	if (req->assoclen != 16 && req->assoclen != 20)
req               701 crypto/ccm.c   	req = crypto_rfc4309_crypt(req);
req               703 crypto/ccm.c   	return crypto_aead_decrypt(req);
req               102 crypto/cfb.c   static int crypto_cfb_encrypt(struct skcipher_request *req)
req               104 crypto/cfb.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               109 crypto/cfb.c   	err = skcipher_walk_virt(&walk, req, false);
req               178 crypto/cfb.c   static int crypto_cfb_decrypt(struct skcipher_request *req)
req               180 crypto/cfb.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               185 crypto/cfb.c   	err = skcipher_walk_virt(&walk, req, false);
req                44 crypto/chacha20poly1305.c 	struct ahash_request req; /* must be last member */
req                50 crypto/chacha20poly1305.c 	struct skcipher_request req; /* must be last member */
req                72 crypto/chacha20poly1305.c static inline void async_done_continue(struct aead_request *req, int err,
req                76 crypto/chacha20poly1305.c 		struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req                79 crypto/chacha20poly1305.c 		err = cont(req);
req                83 crypto/chacha20poly1305.c 		aead_request_complete(req, err);
req                86 crypto/chacha20poly1305.c static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb)
req                88 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req                93 crypto/chacha20poly1305.c 	memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv,
req                97 crypto/chacha20poly1305.c static int poly_verify_tag(struct aead_request *req)
req                99 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               102 crypto/chacha20poly1305.c 	scatterwalk_map_and_copy(tag, req->src,
req               103 crypto/chacha20poly1305.c 				 req->assoclen + rctx->cryptlen,
req               110 crypto/chacha20poly1305.c static int poly_copy_tag(struct aead_request *req)
req               112 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               114 crypto/chacha20poly1305.c 	scatterwalk_map_and_copy(rctx->tag, req->dst,
req               115 crypto/chacha20poly1305.c 				 req->assoclen + rctx->cryptlen,
req               125 crypto/chacha20poly1305.c static int chacha_decrypt(struct aead_request *req)
req               127 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               128 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               136 crypto/chacha20poly1305.c 	chacha_iv(creq->iv, req, 1);
req               138 crypto/chacha20poly1305.c 	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
req               140 crypto/chacha20poly1305.c 	if (req->src != req->dst)
req               141 crypto/chacha20poly1305.c 		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
req               143 crypto/chacha20poly1305.c 	skcipher_request_set_callback(&creq->req, rctx->flags,
req               144 crypto/chacha20poly1305.c 				      chacha_decrypt_done, req);
req               145 crypto/chacha20poly1305.c 	skcipher_request_set_tfm(&creq->req, ctx->chacha);
req               146 crypto/chacha20poly1305.c 	skcipher_request_set_crypt(&creq->req, src, dst,
req               148 crypto/chacha20poly1305.c 	err = crypto_skcipher_decrypt(&creq->req);
req               153 crypto/chacha20poly1305.c 	return poly_verify_tag(req);
req               156 crypto/chacha20poly1305.c static int poly_tail_continue(struct aead_request *req)
req               158 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               160 crypto/chacha20poly1305.c 	if (rctx->cryptlen == req->cryptlen) /* encrypting */
req               161 crypto/chacha20poly1305.c 		return poly_copy_tag(req);
req               163 crypto/chacha20poly1305.c 	return chacha_decrypt(req);
req               171 crypto/chacha20poly1305.c static int poly_tail(struct aead_request *req)
req               173 crypto/chacha20poly1305.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               175 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               183 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               184 crypto/chacha20poly1305.c 				   poly_tail_done, req);
req               185 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               186 crypto/chacha20poly1305.c 	ahash_request_set_crypt(&preq->req, preq->src,
req               189 crypto/chacha20poly1305.c 	err = crypto_ahash_finup(&preq->req);
req               193 crypto/chacha20poly1305.c 	return poly_tail_continue(req);
req               201 crypto/chacha20poly1305.c static int poly_cipherpad(struct aead_request *req)
req               203 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               204 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               213 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               214 crypto/chacha20poly1305.c 				   poly_cipherpad_done, req);
req               215 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               216 crypto/chacha20poly1305.c 	ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
req               218 crypto/chacha20poly1305.c 	err = crypto_ahash_update(&preq->req);
req               222 crypto/chacha20poly1305.c 	return poly_tail(req);
req               230 crypto/chacha20poly1305.c static int poly_cipher(struct aead_request *req)
req               232 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               233 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               235 crypto/chacha20poly1305.c 	struct scatterlist *crypt = req->src;
req               238 crypto/chacha20poly1305.c 	if (rctx->cryptlen == req->cryptlen) /* encrypting */
req               239 crypto/chacha20poly1305.c 		crypt = req->dst;
req               241 crypto/chacha20poly1305.c 	crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
req               243 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               244 crypto/chacha20poly1305.c 				   poly_cipher_done, req);
req               245 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               246 crypto/chacha20poly1305.c 	ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
req               248 crypto/chacha20poly1305.c 	err = crypto_ahash_update(&preq->req);
req               252 crypto/chacha20poly1305.c 	return poly_cipherpad(req);
req               260 crypto/chacha20poly1305.c static int poly_adpad(struct aead_request *req)
req               262 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               263 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               272 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               273 crypto/chacha20poly1305.c 				   poly_adpad_done, req);
req               274 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               275 crypto/chacha20poly1305.c 	ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
req               277 crypto/chacha20poly1305.c 	err = crypto_ahash_update(&preq->req);
req               281 crypto/chacha20poly1305.c 	return poly_cipher(req);
req               289 crypto/chacha20poly1305.c static int poly_ad(struct aead_request *req)
req               291 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               292 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               296 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               297 crypto/chacha20poly1305.c 				   poly_ad_done, req);
req               298 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               299 crypto/chacha20poly1305.c 	ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
req               301 crypto/chacha20poly1305.c 	err = crypto_ahash_update(&preq->req);
req               305 crypto/chacha20poly1305.c 	return poly_adpad(req);
req               313 crypto/chacha20poly1305.c static int poly_setkey(struct aead_request *req)
req               315 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               316 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               322 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               323 crypto/chacha20poly1305.c 				   poly_setkey_done, req);
req               324 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               325 crypto/chacha20poly1305.c 	ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
req               327 crypto/chacha20poly1305.c 	err = crypto_ahash_update(&preq->req);
req               331 crypto/chacha20poly1305.c 	return poly_ad(req);
req               339 crypto/chacha20poly1305.c static int poly_init(struct aead_request *req)
req               341 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               342 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               346 crypto/chacha20poly1305.c 	ahash_request_set_callback(&preq->req, rctx->flags,
req               347 crypto/chacha20poly1305.c 				   poly_init_done, req);
req               348 crypto/chacha20poly1305.c 	ahash_request_set_tfm(&preq->req, ctx->poly);
req               350 crypto/chacha20poly1305.c 	err = crypto_ahash_init(&preq->req);
req               354 crypto/chacha20poly1305.c 	return poly_setkey(req);
req               362 crypto/chacha20poly1305.c static int poly_genkey(struct aead_request *req)
req               364 crypto/chacha20poly1305.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               366 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               370 crypto/chacha20poly1305.c 	rctx->assoclen = req->assoclen;
req               381 crypto/chacha20poly1305.c 	chacha_iv(creq->iv, req, 0);
req               383 crypto/chacha20poly1305.c 	skcipher_request_set_callback(&creq->req, rctx->flags,
req               384 crypto/chacha20poly1305.c 				      poly_genkey_done, req);
req               385 crypto/chacha20poly1305.c 	skcipher_request_set_tfm(&creq->req, ctx->chacha);
req               386 crypto/chacha20poly1305.c 	skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
req               389 crypto/chacha20poly1305.c 	err = crypto_skcipher_decrypt(&creq->req);
req               393 crypto/chacha20poly1305.c 	return poly_init(req);
req               401 crypto/chacha20poly1305.c static int chacha_encrypt(struct aead_request *req)
req               403 crypto/chacha20poly1305.c 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               404 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               409 crypto/chacha20poly1305.c 	if (req->cryptlen == 0)
req               412 crypto/chacha20poly1305.c 	chacha_iv(creq->iv, req, 1);
req               414 crypto/chacha20poly1305.c 	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
req               416 crypto/chacha20poly1305.c 	if (req->src != req->dst)
req               417 crypto/chacha20poly1305.c 		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
req               419 crypto/chacha20poly1305.c 	skcipher_request_set_callback(&creq->req, rctx->flags,
req               420 crypto/chacha20poly1305.c 				      chacha_encrypt_done, req);
req               421 crypto/chacha20poly1305.c 	skcipher_request_set_tfm(&creq->req, ctx->chacha);
req               422 crypto/chacha20poly1305.c 	skcipher_request_set_crypt(&creq->req, src, dst,
req               423 crypto/chacha20poly1305.c 				   req->cryptlen, creq->iv);
req               424 crypto/chacha20poly1305.c 	err = crypto_skcipher_encrypt(&creq->req);
req               429 crypto/chacha20poly1305.c 	return poly_genkey(req);
req               432 crypto/chacha20poly1305.c static int chachapoly_encrypt(struct aead_request *req)
req               434 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               436 crypto/chacha20poly1305.c 	rctx->cryptlen = req->cryptlen;
req               437 crypto/chacha20poly1305.c 	rctx->flags = aead_request_flags(req);
req               451 crypto/chacha20poly1305.c 	return chacha_encrypt(req);
req               454 crypto/chacha20poly1305.c static int chachapoly_decrypt(struct aead_request *req)
req               456 crypto/chacha20poly1305.c 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
req               458 crypto/chacha20poly1305.c 	rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
req               459 crypto/chacha20poly1305.c 	rctx->flags = aead_request_flags(req);
req               473 crypto/chacha20poly1305.c 	return poly_genkey(req);
req               535 crypto/chacha20poly1305.c 		max(offsetof(struct chacha_req, req) +
req               538 crypto/chacha20poly1305.c 		    offsetof(struct poly_req, req) +
req                34 crypto/chacha_generic.c static int chacha_stream_xor(struct skcipher_request *req,
req                41 crypto/chacha_generic.c 	err = skcipher_walk_virt(&walk, req, false);
req               110 crypto/chacha_generic.c int crypto_chacha_crypt(struct skcipher_request *req)
req               112 crypto/chacha_generic.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               115 crypto/chacha_generic.c 	return chacha_stream_xor(req, ctx, req->iv);
req               119 crypto/chacha_generic.c int crypto_xchacha_crypt(struct skcipher_request *req)
req               121 crypto/chacha_generic.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               128 crypto/chacha_generic.c 	crypto_chacha_init(state, ctx, req->iv);
req               133 crypto/chacha_generic.c 	memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
req               134 crypto/chacha_generic.c 	memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
req               137 crypto/chacha_generic.c 	return chacha_stream_xor(req, &subctx, real_iv);
req               160 crypto/cryptd.c 	struct crypto_async_request *req, *backlog;
req               172 crypto/cryptd.c 	req = crypto_dequeue_request(&cpu_queue->queue);
req               176 crypto/cryptd.c 	if (!req)
req               181 crypto/cryptd.c 	req->complete(req, 0);
req               268 crypto/cryptd.c static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
req               270 crypto/cryptd.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               272 crypto/cryptd.c 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
req               276 crypto/cryptd.c 	rctx->complete(&req->base, err);
req               286 crypto/cryptd.c 	struct skcipher_request *req = skcipher_request_cast(base);
req               287 crypto/cryptd.c 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
req               288 crypto/cryptd.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               299 crypto/cryptd.c 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req               300 crypto/cryptd.c 				   req->iv);
req               305 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               308 crypto/cryptd.c 	cryptd_skcipher_complete(req, err);
req               314 crypto/cryptd.c 	struct skcipher_request *req = skcipher_request_cast(base);
req               315 crypto/cryptd.c 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
req               316 crypto/cryptd.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               327 crypto/cryptd.c 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req               328 crypto/cryptd.c 				   req->iv);
req               333 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               336 crypto/cryptd.c 	cryptd_skcipher_complete(req, err);
req               339 crypto/cryptd.c static int cryptd_skcipher_enqueue(struct skcipher_request *req,
req               342 crypto/cryptd.c 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
req               343 crypto/cryptd.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               347 crypto/cryptd.c 	rctx->complete = req->base.complete;
req               348 crypto/cryptd.c 	req->base.complete = compl;
req               350 crypto/cryptd.c 	return cryptd_enqueue_request(queue, &req->base);
req               353 crypto/cryptd.c static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
req               355 crypto/cryptd.c 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
req               358 crypto/cryptd.c static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
req               360 crypto/cryptd.c 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
req               505 crypto/cryptd.c static int cryptd_hash_enqueue(struct ahash_request *req,
req               508 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               509 crypto/cryptd.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               513 crypto/cryptd.c 	rctx->complete = req->base.complete;
req               514 crypto/cryptd.c 	req->base.complete = compl;
req               516 crypto/cryptd.c 	return cryptd_enqueue_request(queue, &req->base);
req               519 crypto/cryptd.c static void cryptd_hash_complete(struct ahash_request *req, int err)
req               521 crypto/cryptd.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               523 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               527 crypto/cryptd.c 	rctx->complete(&req->base, err);
req               538 crypto/cryptd.c 	struct ahash_request *req = ahash_request_cast(req_async);
req               539 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               549 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               552 crypto/cryptd.c 	cryptd_hash_complete(req, err);
req               555 crypto/cryptd.c static int cryptd_hash_init_enqueue(struct ahash_request *req)
req               557 crypto/cryptd.c 	return cryptd_hash_enqueue(req, cryptd_hash_init);
req               562 crypto/cryptd.c 	struct ahash_request *req = ahash_request_cast(req_async);
req               565 crypto/cryptd.c 	rctx = ahash_request_ctx(req);
req               570 crypto/cryptd.c 	err = shash_ahash_update(req, &rctx->desc);
req               572 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               575 crypto/cryptd.c 	cryptd_hash_complete(req, err);
req               578 crypto/cryptd.c static int cryptd_hash_update_enqueue(struct ahash_request *req)
req               580 crypto/cryptd.c 	return cryptd_hash_enqueue(req, cryptd_hash_update);
req               585 crypto/cryptd.c 	struct ahash_request *req = ahash_request_cast(req_async);
req               586 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               591 crypto/cryptd.c 	err = crypto_shash_final(&rctx->desc, req->result);
req               593 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               596 crypto/cryptd.c 	cryptd_hash_complete(req, err);
req               599 crypto/cryptd.c static int cryptd_hash_final_enqueue(struct ahash_request *req)
req               601 crypto/cryptd.c 	return cryptd_hash_enqueue(req, cryptd_hash_final);
req               606 crypto/cryptd.c 	struct ahash_request *req = ahash_request_cast(req_async);
req               607 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               612 crypto/cryptd.c 	err = shash_ahash_finup(req, &rctx->desc);
req               614 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               617 crypto/cryptd.c 	cryptd_hash_complete(req, err);
req               620 crypto/cryptd.c static int cryptd_hash_finup_enqueue(struct ahash_request *req)
req               622 crypto/cryptd.c 	return cryptd_hash_enqueue(req, cryptd_hash_finup);
req               629 crypto/cryptd.c 	struct ahash_request *req = ahash_request_cast(req_async);
req               630 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               638 crypto/cryptd.c 	err = shash_ahash_digest(req, desc);
req               640 crypto/cryptd.c 	req->base.complete = rctx->complete;
req               643 crypto/cryptd.c 	cryptd_hash_complete(req, err);
req               646 crypto/cryptd.c static int cryptd_hash_digest_enqueue(struct ahash_request *req)
req               648 crypto/cryptd.c 	return cryptd_hash_enqueue(req, cryptd_hash_digest);
req               651 crypto/cryptd.c static int cryptd_hash_export(struct ahash_request *req, void *out)
req               653 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req               658 crypto/cryptd.c static int cryptd_hash_import(struct ahash_request *req, const void *in)
req               660 crypto/cryptd.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               662 crypto/cryptd.c 	struct shash_desc *desc = cryptd_shash_desc(req);
req               752 crypto/cryptd.c static void cryptd_aead_crypt(struct aead_request *req,
req               755 crypto/cryptd.c 			int (*crypt)(struct aead_request *req))
req               763 crypto/cryptd.c 	rctx = aead_request_ctx(req);
req               766 crypto/cryptd.c 	tfm = crypto_aead_reqtfm(req);
req               770 crypto/cryptd.c 	aead_request_set_tfm(req, child);
req               771 crypto/cryptd.c 	err = crypt( req );
req               778 crypto/cryptd.c 	compl(&req->base, err);
req               789 crypto/cryptd.c 	struct aead_request *req;
req               791 crypto/cryptd.c 	req = container_of(areq, struct aead_request, base);
req               792 crypto/cryptd.c 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
req               799 crypto/cryptd.c 	struct aead_request *req;
req               801 crypto/cryptd.c 	req = container_of(areq, struct aead_request, base);
req               802 crypto/cryptd.c 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
req               805 crypto/cryptd.c static int cryptd_aead_enqueue(struct aead_request *req,
req               808 crypto/cryptd.c 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
req               809 crypto/cryptd.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               812 crypto/cryptd.c 	rctx->complete = req->base.complete;
req               813 crypto/cryptd.c 	req->base.complete = compl;
req               814 crypto/cryptd.c 	return cryptd_enqueue_request(queue, &req->base);
req               817 crypto/cryptd.c static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
req               819 crypto/cryptd.c 	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
req               822 crypto/cryptd.c static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
req               824 crypto/cryptd.c 	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
req              1046 crypto/cryptd.c struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
req              1048 crypto/cryptd.c 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req                25 crypto/crypto_engine.c 			     struct crypto_async_request *req, int err)
req                33 crypto/crypto_engine.c 	if (engine->cur_req == req)
req                38 crypto/crypto_engine.c 		enginectx = crypto_tfm_ctx(req->tfm);
req                41 crypto/crypto_engine.c 			ret = enginectx->op.unprepare_request(engine, req);
req                51 crypto/crypto_engine.c 	req->complete(req, err);
req               182 crypto/crypto_engine.c 				   struct crypto_async_request *req,
req               195 crypto/crypto_engine.c 	ret = crypto_enqueue_request(&engine->queue, req);
req               211 crypto/crypto_engine.c 					     struct crypto_async_request *req)
req               213 crypto/crypto_engine.c 	return crypto_transfer_request(engine, req, true);
req               224 crypto/crypto_engine.c 						 struct ablkcipher_request *req)
req               226 crypto/crypto_engine.c 	return crypto_transfer_request_to_engine(engine, &req->base);
req               237 crypto/crypto_engine.c 					   struct aead_request *req)
req               239 crypto/crypto_engine.c 	return crypto_transfer_request_to_engine(engine, &req->base);
req               250 crypto/crypto_engine.c 					       struct akcipher_request *req)
req               252 crypto/crypto_engine.c 	return crypto_transfer_request_to_engine(engine, &req->base);
req               263 crypto/crypto_engine.c 					   struct ahash_request *req)
req               265 crypto/crypto_engine.c 	return crypto_transfer_request_to_engine(engine, &req->base);
req               276 crypto/crypto_engine.c 					       struct skcipher_request *req)
req               278 crypto/crypto_engine.c 	return crypto_transfer_request_to_engine(engine, &req->base);
req               291 crypto/crypto_engine.c 					struct ablkcipher_request *req, int err)
req               293 crypto/crypto_engine.c 	return crypto_finalize_request(engine, &req->base, err);
req               305 crypto/crypto_engine.c 				  struct aead_request *req, int err)
req               307 crypto/crypto_engine.c 	return crypto_finalize_request(engine, &req->base, err);
req               319 crypto/crypto_engine.c 				      struct akcipher_request *req, int err)
req               321 crypto/crypto_engine.c 	return crypto_finalize_request(engine, &req->base, err);
req               333 crypto/crypto_engine.c 				  struct ahash_request *req, int err)
req               335 crypto/crypto_engine.c 	return crypto_finalize_request(engine, &req->base, err);
req               347 crypto/crypto_engine.c 				      struct skcipher_request *req, int err)
req               349 crypto/crypto_engine.c 	return crypto_finalize_request(engine, &req->base, err);
req                76 crypto/crypto_null.c static int null_skcipher_crypt(struct skcipher_request *req)
req                81 crypto/crypto_null.c 	err = skcipher_walk_virt(&walk, req, false);
req                98 crypto/ctr.c   static int crypto_ctr_crypt(struct skcipher_request *req)
req               100 crypto/ctr.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               107 crypto/ctr.c   	err = skcipher_walk_virt(&walk, req, false);
req               195 crypto/ctr.c   static int crypto_rfc3686_crypt(struct skcipher_request *req)
req               197 crypto/ctr.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               202 crypto/ctr.c   		(void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
req               208 crypto/ctr.c   	memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
req               215 crypto/ctr.c   	skcipher_request_set_callback(subreq, req->base.flags,
req               216 crypto/ctr.c   				      req->base.complete, req->base.data);
req               217 crypto/ctr.c   	skcipher_request_set_crypt(subreq, req->src, req->dst,
req               218 crypto/ctr.c   				   req->cryptlen, iv);
req                65 crypto/cts.c   static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
req                67 crypto/cts.c   	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
req                68 crypto/cts.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                94 crypto/cts.c   	struct skcipher_request *req = areq->data;
req                99 crypto/cts.c   	skcipher_request_complete(req, err);
req               102 crypto/cts.c   static int cts_cbc_encrypt(struct skcipher_request *req)
req               104 crypto/cts.c   	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
req               105 crypto/cts.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               114 crypto/cts.c   	lastn = req->cryptlen - offset;
req               116 crypto/cts.c   	sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
req               120 crypto/cts.c   	scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
req               125 crypto/cts.c   	skcipher_request_set_callback(subreq, req->base.flags &
req               127 crypto/cts.c   				      cts_cbc_crypt_done, req);
req               128 crypto/cts.c   	skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
req               134 crypto/cts.c   	struct skcipher_request *req = areq->data;
req               139 crypto/cts.c   	err = cts_cbc_encrypt(req);
req               144 crypto/cts.c   	skcipher_request_complete(req, err);
req               147 crypto/cts.c   static int crypto_cts_encrypt(struct skcipher_request *req)
req               149 crypto/cts.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               150 crypto/cts.c   	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
req               154 crypto/cts.c   	unsigned int nbytes = req->cryptlen;
req               163 crypto/cts.c   		skcipher_request_set_callback(subreq, req->base.flags,
req               164 crypto/cts.c   					      req->base.complete,
req               165 crypto/cts.c   					      req->base.data);
req               166 crypto/cts.c   		skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
req               167 crypto/cts.c   					   req->iv);
req               174 crypto/cts.c   	skcipher_request_set_callback(subreq, req->base.flags,
req               175 crypto/cts.c   				      crypto_cts_encrypt_done, req);
req               176 crypto/cts.c   	skcipher_request_set_crypt(subreq, req->src, req->dst,
req               177 crypto/cts.c   				   offset, req->iv);
req               180 crypto/cts.c   	       cts_cbc_encrypt(req);
req               183 crypto/cts.c   static int cts_cbc_decrypt(struct skcipher_request *req)
req               185 crypto/cts.c   	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
req               186 crypto/cts.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               196 crypto/cts.c   	lastn = req->cryptlen - offset;
req               198 crypto/cts.c   	sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
req               202 crypto/cts.c   	space = crypto_cts_reqctx_space(req);
req               206 crypto/cts.c   	scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
req               218 crypto/cts.c   	skcipher_request_set_callback(subreq, req->base.flags &
req               220 crypto/cts.c   				      cts_cbc_crypt_done, req);
req               228 crypto/cts.c   	struct skcipher_request *req = areq->data;
req               233 crypto/cts.c   	err = cts_cbc_decrypt(req);
req               238 crypto/cts.c   	skcipher_request_complete(req, err);
req               241 crypto/cts.c   static int crypto_cts_decrypt(struct skcipher_request *req)
req               243 crypto/cts.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               244 crypto/cts.c   	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
req               248 crypto/cts.c   	unsigned int nbytes = req->cryptlen;
req               258 crypto/cts.c   		skcipher_request_set_callback(subreq, req->base.flags,
req               259 crypto/cts.c   					      req->base.complete,
req               260 crypto/cts.c   					      req->base.data);
req               261 crypto/cts.c   		skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
req               262 crypto/cts.c   					   req->iv);
req               266 crypto/cts.c   	skcipher_request_set_callback(subreq, req->base.flags,
req               267 crypto/cts.c   				      crypto_cts_decrypt_done, req);
req               269 crypto/cts.c   	space = crypto_cts_reqctx_space(req);
req               275 crypto/cts.c   		memcpy(space, req->iv, bsize);
req               277 crypto/cts.c   		scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
req               280 crypto/cts.c   	skcipher_request_set_crypt(subreq, req->src, req->dst,
req               281 crypto/cts.c   				   offset, req->iv);
req               284 crypto/cts.c   	       cts_cbc_decrypt(req);
req               149 crypto/dh.c    static int dh_compute_value(struct kpp_request *req)
req               151 crypto/dh.c    	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req               165 crypto/dh.c    	if (req->src) {
req               166 crypto/dh.c    		base = mpi_read_raw_from_sgl(req->src, req->src_len);
req               182 crypto/dh.c    	ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
req               189 crypto/dh.c    	if (req->src)
req               982 crypto/drbg.c  		unsigned char req[8];
req              1014 crypto/drbg.c  	drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
req              1744 crypto/drbg.c  	struct skcipher_request *req;
req              1772 crypto/drbg.c  	req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
req              1773 crypto/drbg.c  	if (!req) {
req              1778 crypto/drbg.c  	drbg->ctr_req = req;
req              1779 crypto/drbg.c  	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req                15 crypto/ecb.c   static int crypto_ecb_crypt(struct skcipher_request *req,
req                24 crypto/ecb.c   	err = skcipher_walk_virt(&walk, req, false);
req                43 crypto/ecb.c   static int crypto_ecb_encrypt(struct skcipher_request *req)
req                45 crypto/ecb.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                48 crypto/ecb.c   	return crypto_ecb_crypt(req, cipher,
req                52 crypto/ecb.c   static int crypto_ecb_decrypt(struct skcipher_request *req)
req                54 crypto/ecb.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                57 crypto/ecb.c   	return crypto_ecb_crypt(req, cipher,
req                65 crypto/ecdh.c  static int ecdh_compute_value(struct kpp_request *req)
req                67 crypto/ecdh.c  	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req                83 crypto/ecdh.c  	if (req->src) {
req                92 crypto/ecdh.c  		if (public_key_sz != req->src_len)
req                95 crypto/ecdh.c  		copied = sg_copy_to_buffer(req->src,
req                96 crypto/ecdh.c  					   sg_nents_for_len(req->src,
req               118 crypto/ecdh.c  	nbytes = min_t(size_t, nbytes, req->dst_len);
req               119 crypto/ecdh.c  	copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
req                26 crypto/echainiv.c static int echainiv_encrypt(struct aead_request *req)
req                28 crypto/echainiv.c 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
req                30 crypto/echainiv.c 	struct aead_request *subreq = aead_request_ctx(req);
req                37 crypto/echainiv.c 	if (req->cryptlen < ivsize)
req                42 crypto/echainiv.c 	info = req->iv;
req                44 crypto/echainiv.c 	if (req->src != req->dst) {
req                48 crypto/echainiv.c 		skcipher_request_set_callback(nreq, req->base.flags,
req                50 crypto/echainiv.c 		skcipher_request_set_crypt(nreq, req->src, req->dst,
req                51 crypto/echainiv.c 					   req->assoclen + req->cryptlen,
req                59 crypto/echainiv.c 	aead_request_set_callback(subreq, req->base.flags,
req                60 crypto/echainiv.c 				  req->base.complete, req->base.data);
req                61 crypto/echainiv.c 	aead_request_set_crypt(subreq, req->dst, req->dst,
req                62 crypto/echainiv.c 			       req->cryptlen, info);
req                63 crypto/echainiv.c 	aead_request_set_ad(subreq, req->assoclen);
req                69 crypto/echainiv.c 	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
req                85 crypto/echainiv.c static int echainiv_decrypt(struct aead_request *req)
req                87 crypto/echainiv.c 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
req                89 crypto/echainiv.c 	struct aead_request *subreq = aead_request_ctx(req);
req                94 crypto/echainiv.c 	if (req->cryptlen < ivsize)
req                99 crypto/echainiv.c 	compl = req->base.complete;
req               100 crypto/echainiv.c 	data = req->base.data;
req               102 crypto/echainiv.c 	aead_request_set_callback(subreq, req->base.flags, compl, data);
req               103 crypto/echainiv.c 	aead_request_set_crypt(subreq, req->src, req->dst,
req               104 crypto/echainiv.c 			       req->cryptlen - ivsize, req->iv);
req               105 crypto/echainiv.c 	aead_request_set_ad(subreq, req->assoclen + ivsize);
req               107 crypto/echainiv.c 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
req                70 crypto/ecrdsa.c static int ecrdsa_verify(struct akcipher_request *req)
req                72 crypto/ecrdsa.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req                76 crypto/ecrdsa.c 	unsigned int ndigits = req->dst_len / sizeof(u64);
req                93 crypto/ecrdsa.c 	    !req->src ||
req                95 crypto/ecrdsa.c 	    req->dst_len != ctx->digest_len ||
req                96 crypto/ecrdsa.c 	    req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
req                98 crypto/ecrdsa.c 	    req->dst_len * 2 != req->src_len ||
req                99 crypto/ecrdsa.c 	    WARN_ON(req->src_len > sizeof(sig)) ||
req               100 crypto/ecrdsa.c 	    WARN_ON(req->dst_len > sizeof(digest)))
req               103 crypto/ecrdsa.c 	sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
req               104 crypto/ecrdsa.c 			  sig, req->src_len);
req               105 crypto/ecrdsa.c 	sg_pcopy_to_buffer(req->src,
req               106 crypto/ecrdsa.c 			   sg_nents_for_len(req->src,
req               107 crypto/ecrdsa.c 					    req->src_len + req->dst_len),
req               108 crypto/ecrdsa.c 			   digest, req->dst_len, req->src_len);
req               153 crypto/essiv.c 	struct skcipher_request *req = areq->data;
req               155 crypto/essiv.c 	skcipher_request_complete(req, err);
req               158 crypto/essiv.c static int essiv_skcipher_crypt(struct skcipher_request *req, bool enc)
req               160 crypto/essiv.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               162 crypto/essiv.c 	struct skcipher_request *subreq = skcipher_request_ctx(req);
req               164 crypto/essiv.c 	crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
req               167 crypto/essiv.c 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req               168 crypto/essiv.c 				   req->iv);
req               169 crypto/essiv.c 	skcipher_request_set_callback(subreq, skcipher_request_flags(req),
req               170 crypto/essiv.c 				      essiv_skcipher_done, req);
req               176 crypto/essiv.c static int essiv_skcipher_encrypt(struct skcipher_request *req)
req               178 crypto/essiv.c 	return essiv_skcipher_crypt(req, true);
req               181 crypto/essiv.c static int essiv_skcipher_decrypt(struct skcipher_request *req)
req               183 crypto/essiv.c 	return essiv_skcipher_crypt(req, false);
req               188 crypto/essiv.c 	struct aead_request *req = areq->data;
req               189 crypto/essiv.c 	struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
req               193 crypto/essiv.c 	aead_request_complete(req, err);
req               196 crypto/essiv.c static int essiv_aead_crypt(struct aead_request *req, bool enc)
req               198 crypto/essiv.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               200 crypto/essiv.c 	struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
req               202 crypto/essiv.c 	struct scatterlist *src = req->src;
req               205 crypto/essiv.c 	crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
req               213 crypto/essiv.c 	if (req->src == req->dst || !enc) {
req               214 crypto/essiv.c 		scatterwalk_map_and_copy(req->iv, req->dst,
req               215 crypto/essiv.c 					 req->assoclen - crypto_aead_ivsize(tfm),
req               218 crypto/essiv.c 		u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset;
req               220 crypto/essiv.c 		int ssize = req->assoclen - ivsize;
req               227 crypto/essiv.c 		nents = sg_nents_for_len(req->src, ssize);
req               231 crypto/essiv.c 		memcpy(iv, req->iv, ivsize);
req               243 crypto/essiv.c 			scatterwalk_map_and_copy(rctx->assoc, req->src, 0,
req               247 crypto/essiv.c 			sg_set_page(rctx->sg, sg_page(req->src), ssize,
req               248 crypto/essiv.c 				    req->src->offset);
req               252 crypto/essiv.c 		sg = scatterwalk_ffwd(rctx->sg + 2, req->src, req->assoclen);
req               260 crypto/essiv.c 	aead_request_set_ad(subreq, req->assoclen);
req               261 crypto/essiv.c 	aead_request_set_callback(subreq, aead_request_flags(req),
req               262 crypto/essiv.c 				  essiv_aead_done, req);
req               263 crypto/essiv.c 	aead_request_set_crypt(subreq, src, req->dst, req->cryptlen, req->iv);
req               273 crypto/essiv.c static int essiv_aead_encrypt(struct aead_request *req)
req               275 crypto/essiv.c 	return essiv_aead_crypt(req, true);
req               278 crypto/essiv.c static int essiv_aead_decrypt(struct aead_request *req)
req               280 crypto/essiv.c 	return essiv_aead_crypt(req, false);
req                61 crypto/gcm.c   	int (*complete)(struct aead_request *req, u32 flags);
req                83 crypto/gcm.c   static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
req                86 crypto/gcm.c   	struct aead_request *req)
req                88 crypto/gcm.c   	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
req                90 crypto/gcm.c   	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
req               106 crypto/gcm.c   		struct skcipher_request req;
req               126 crypto/gcm.c   	skcipher_request_set_tfm(&data->req, ctr);
req               127 crypto/gcm.c   	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
req               131 crypto/gcm.c   	skcipher_request_set_crypt(&data->req, data->sg, data->sg,
req               134 crypto/gcm.c   	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
req               158 crypto/gcm.c   static void crypto_gcm_init_common(struct aead_request *req)
req               160 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               165 crypto/gcm.c   	memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
req               170 crypto/gcm.c   	sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
req               174 crypto/gcm.c   	if (req->src != req->dst) {
req               177 crypto/gcm.c   		sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
req               183 crypto/gcm.c   static void crypto_gcm_init_crypt(struct aead_request *req,
req               186 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               188 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               192 crypto/gcm.c   	dst = req->src == req->dst ? pctx->src : pctx->dst;
req               208 crypto/gcm.c   static int gcm_hash_update(struct aead_request *req,
req               213 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               216 crypto/gcm.c   	ahash_request_set_callback(ahreq, flags, compl, req);
req               222 crypto/gcm.c   static int gcm_hash_remain(struct aead_request *req,
req               226 crypto/gcm.c   	return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags);
req               229 crypto/gcm.c   static int gcm_hash_len(struct aead_request *req, u32 flags)
req               231 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               236 crypto/gcm.c   	lengths.a = cpu_to_be64(req->assoclen * 8);
req               240 crypto/gcm.c   	ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req);
req               247 crypto/gcm.c   static int gcm_hash_len_continue(struct aead_request *req, u32 flags)
req               249 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               252 crypto/gcm.c   	return gctx->complete(req, flags);
req               257 crypto/gcm.c   	struct aead_request *req = areq->data;
req               262 crypto/gcm.c   	err = gcm_hash_len_continue(req, 0);
req               267 crypto/gcm.c   	aead_request_complete(req, err);
req               270 crypto/gcm.c   static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags)
req               272 crypto/gcm.c   	return gcm_hash_len(req, flags) ?:
req               273 crypto/gcm.c   	       gcm_hash_len_continue(req, flags);
req               279 crypto/gcm.c   	struct aead_request *req = areq->data;
req               284 crypto/gcm.c   	err = gcm_hash_crypt_remain_continue(req, 0);
req               289 crypto/gcm.c   	aead_request_complete(req, err);
req               292 crypto/gcm.c   static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags)
req               294 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               300 crypto/gcm.c   		return gcm_hash_remain(req, remain,
req               302 crypto/gcm.c   		       gcm_hash_crypt_remain_continue(req, flags);
req               304 crypto/gcm.c   	return gcm_hash_crypt_remain_continue(req, flags);
req               309 crypto/gcm.c   	struct aead_request *req = areq->data;
req               314 crypto/gcm.c   	err = gcm_hash_crypt_continue(req, 0);
req               319 crypto/gcm.c   	aead_request_complete(req, err);
req               322 crypto/gcm.c   static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags)
req               324 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               328 crypto/gcm.c   		return gcm_hash_update(req, gcm_hash_crypt_done,
req               330 crypto/gcm.c   		       gcm_hash_crypt_continue(req, flags);
req               332 crypto/gcm.c   	return gcm_hash_crypt_remain_continue(req, flags);
req               338 crypto/gcm.c   	struct aead_request *req = areq->data;
req               343 crypto/gcm.c   	err = gcm_hash_assoc_remain_continue(req, 0);
req               348 crypto/gcm.c   	aead_request_complete(req, err);
req               351 crypto/gcm.c   static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags)
req               355 crypto/gcm.c   	remain = gcm_remain(req->assoclen);
req               357 crypto/gcm.c   		return gcm_hash_remain(req, remain,
req               359 crypto/gcm.c   		       gcm_hash_assoc_remain_continue(req, flags);
req               361 crypto/gcm.c   	return gcm_hash_assoc_remain_continue(req, flags);
req               366 crypto/gcm.c   	struct aead_request *req = areq->data;
req               371 crypto/gcm.c   	err = gcm_hash_assoc_continue(req, 0);
req               376 crypto/gcm.c   	aead_request_complete(req, err);
req               379 crypto/gcm.c   static int gcm_hash_init_continue(struct aead_request *req, u32 flags)
req               381 crypto/gcm.c   	if (req->assoclen)
req               382 crypto/gcm.c   		return gcm_hash_update(req, gcm_hash_assoc_done,
req               383 crypto/gcm.c   				       req->src, req->assoclen, flags) ?:
req               384 crypto/gcm.c   		       gcm_hash_assoc_continue(req, flags);
req               386 crypto/gcm.c   	return gcm_hash_assoc_remain_continue(req, flags);
req               391 crypto/gcm.c   	struct aead_request *req = areq->data;
req               396 crypto/gcm.c   	err = gcm_hash_init_continue(req, 0);
req               401 crypto/gcm.c   	aead_request_complete(req, err);
req               404 crypto/gcm.c   static int gcm_hash(struct aead_request *req, u32 flags)
req               406 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               408 crypto/gcm.c   	struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               412 crypto/gcm.c   	ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req);
req               414 crypto/gcm.c   	       gcm_hash_init_continue(req, flags);
req               417 crypto/gcm.c   static int gcm_enc_copy_hash(struct aead_request *req, u32 flags)
req               419 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               420 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               424 crypto/gcm.c   	scatterwalk_map_and_copy(auth_tag, req->dst,
req               425 crypto/gcm.c   				 req->assoclen + req->cryptlen,
req               430 crypto/gcm.c   static int gcm_encrypt_continue(struct aead_request *req, u32 flags)
req               432 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               435 crypto/gcm.c   	gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
req               436 crypto/gcm.c   	gctx->cryptlen = req->cryptlen;
req               439 crypto/gcm.c   	return gcm_hash(req, flags);
req               444 crypto/gcm.c   	struct aead_request *req = areq->data;
req               449 crypto/gcm.c   	err = gcm_encrypt_continue(req, 0);
req               454 crypto/gcm.c   	aead_request_complete(req, err);
req               457 crypto/gcm.c   static int crypto_gcm_encrypt(struct aead_request *req)
req               459 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               461 crypto/gcm.c   	u32 flags = aead_request_flags(req);
req               463 crypto/gcm.c   	crypto_gcm_init_common(req);
req               464 crypto/gcm.c   	crypto_gcm_init_crypt(req, req->cryptlen);
req               465 crypto/gcm.c   	skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
req               468 crypto/gcm.c   	       gcm_encrypt_continue(req, flags);
req               471 crypto/gcm.c   static int crypto_gcm_verify(struct aead_request *req)
req               473 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               474 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               478 crypto/gcm.c   	unsigned int cryptlen = req->cryptlen - authsize;
req               481 crypto/gcm.c   	scatterwalk_map_and_copy(iauth_tag, req->src,
req               482 crypto/gcm.c   				 req->assoclen + cryptlen, authsize, 0);
req               488 crypto/gcm.c   	struct aead_request *req = areq->data;
req               491 crypto/gcm.c   		err = crypto_gcm_verify(req);
req               493 crypto/gcm.c   	aead_request_complete(req, err);
req               496 crypto/gcm.c   static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
req               498 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               502 crypto/gcm.c   	crypto_gcm_init_crypt(req, gctx->cryptlen);
req               503 crypto/gcm.c   	skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
req               504 crypto/gcm.c   	return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
req               507 crypto/gcm.c   static int crypto_gcm_decrypt(struct aead_request *req)
req               509 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               510 crypto/gcm.c   	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
req               513 crypto/gcm.c   	unsigned int cryptlen = req->cryptlen;
req               514 crypto/gcm.c   	u32 flags = aead_request_flags(req);
req               518 crypto/gcm.c   	crypto_gcm_init_common(req);
req               524 crypto/gcm.c   	return gcm_hash(req, flags);
req               761 crypto/gcm.c   static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
req               763 crypto/gcm.c   	struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
req               764 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               772 crypto/gcm.c   	scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
req               775 crypto/gcm.c   	memcpy(iv + 4, req->iv, 8);
req               778 crypto/gcm.c   	sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
req               779 crypto/gcm.c   	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
req               783 crypto/gcm.c   	if (req->src != req->dst) {
req               785 crypto/gcm.c   		sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
req               786 crypto/gcm.c   		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
req               792 crypto/gcm.c   	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req               793 crypto/gcm.c   				  req->base.data);
req               795 crypto/gcm.c   			       req->src == req->dst ? rctx->src : rctx->dst,
req               796 crypto/gcm.c   			       req->cryptlen, iv);
req               797 crypto/gcm.c   	aead_request_set_ad(subreq, req->assoclen - 8);
req               802 crypto/gcm.c   static int crypto_rfc4106_encrypt(struct aead_request *req)
req               806 crypto/gcm.c   	err = crypto_ipsec_check_assoclen(req->assoclen);
req               810 crypto/gcm.c   	req = crypto_rfc4106_crypt(req);
req               812 crypto/gcm.c   	return crypto_aead_encrypt(req);
req               815 crypto/gcm.c   static int crypto_rfc4106_decrypt(struct aead_request *req)
req               819 crypto/gcm.c   	err = crypto_ipsec_check_assoclen(req->assoclen);
req               823 crypto/gcm.c   	req = crypto_rfc4106_crypt(req);
req               825 crypto/gcm.c   	return crypto_aead_decrypt(req);
req               988 crypto/gcm.c   static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
req               990 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               992 crypto/gcm.c   	struct crypto_rfc4543_req_ctx *rctx = aead_request_ctx(req);
req               999 crypto/gcm.c   	if (req->src != req->dst) {
req              1000 crypto/gcm.c   		err = crypto_rfc4543_copy_src_to_dst(req, enc);
req              1006 crypto/gcm.c   	memcpy(iv + 4, req->iv, 8);
req              1009 crypto/gcm.c   	aead_request_set_callback(subreq, req->base.flags,
req              1010 crypto/gcm.c   				  req->base.complete, req->base.data);
req              1011 crypto/gcm.c   	aead_request_set_crypt(subreq, req->src, req->dst,
req              1013 crypto/gcm.c   	aead_request_set_ad(subreq, req->assoclen + req->cryptlen -
req              1019 crypto/gcm.c   static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
req              1021 crypto/gcm.c   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1024 crypto/gcm.c   	unsigned int nbytes = req->assoclen + req->cryptlen -
req              1029 crypto/gcm.c   	skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
req              1030 crypto/gcm.c   	skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
req              1035 crypto/gcm.c   static int crypto_rfc4543_encrypt(struct aead_request *req)
req              1037 crypto/gcm.c   	return crypto_ipsec_check_assoclen(req->assoclen) ?:
req              1038 crypto/gcm.c   	       crypto_rfc4543_crypt(req, true);
req              1041 crypto/gcm.c   static int crypto_rfc4543_decrypt(struct aead_request *req)
req              1043 crypto/gcm.c   	return crypto_ipsec_check_assoclen(req->assoclen) ?:
req              1044 crypto/gcm.c   	       crypto_rfc4543_crypt(req, false);
req               123 crypto/keywrap.c static int crypto_kw_decrypt(struct skcipher_request *req)
req               125 crypto/keywrap.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               129 crypto/keywrap.c 	u64 t = 6 * ((req->cryptlen) >> 3);
req               137 crypto/keywrap.c 	if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
req               141 crypto/keywrap.c 	memcpy(&block.A, req->iv, SEMIBSIZE);
req               148 crypto/keywrap.c 	src = req->src;
req               149 crypto/keywrap.c 	dst = req->dst;
req               153 crypto/keywrap.c 		unsigned int nbytes = req->cryptlen;
req               179 crypto/keywrap.c 		src = req->dst;
req               180 crypto/keywrap.c 		dst = req->dst;
req               192 crypto/keywrap.c static int crypto_kw_encrypt(struct skcipher_request *req)
req               194 crypto/keywrap.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               207 crypto/keywrap.c 	if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
req               221 crypto/keywrap.c 	src = req->src;
req               222 crypto/keywrap.c 	dst = req->dst;
req               226 crypto/keywrap.c 		unsigned int nbytes = req->cryptlen;
req               251 crypto/keywrap.c 		src = req->dst;
req               252 crypto/keywrap.c 		dst = req->dst;
req               256 crypto/keywrap.c 	memcpy(req->iv, &block.A, SEMIBSIZE);
req               142 crypto/lrw.c   static int xor_tweak(struct skcipher_request *req, bool second_pass)
req               145 crypto/lrw.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               147 crypto/lrw.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               155 crypto/lrw.c   		req = &rctx->subreq;
req               157 crypto/lrw.c   		skcipher_request_set_tfm(req, tfm);
req               160 crypto/lrw.c   	err = skcipher_walk_virt(&w, req, false);
req               199 crypto/lrw.c   static int xor_tweak_pre(struct skcipher_request *req)
req               201 crypto/lrw.c   	return xor_tweak(req, false);
req               204 crypto/lrw.c   static int xor_tweak_post(struct skcipher_request *req)
req               206 crypto/lrw.c   	return xor_tweak(req, true);
req               211 crypto/lrw.c   	struct skcipher_request *req = areq->data;
req               214 crypto/lrw.c   		struct rctx *rctx = skcipher_request_ctx(req);
req               217 crypto/lrw.c   		err = xor_tweak_post(req);
req               220 crypto/lrw.c   	skcipher_request_complete(req, err);
req               223 crypto/lrw.c   static void init_crypt(struct skcipher_request *req)
req               225 crypto/lrw.c   	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req               226 crypto/lrw.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               230 crypto/lrw.c   	skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
req               232 crypto/lrw.c   	skcipher_request_set_crypt(subreq, req->dst, req->dst,
req               233 crypto/lrw.c   				   req->cryptlen, req->iv);
req               236 crypto/lrw.c   	memcpy(&rctx->t, req->iv, sizeof(rctx->t));
req               242 crypto/lrw.c   static int encrypt(struct skcipher_request *req)
req               244 crypto/lrw.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               247 crypto/lrw.c   	init_crypt(req);
req               248 crypto/lrw.c   	return xor_tweak_pre(req) ?:
req               250 crypto/lrw.c   		xor_tweak_post(req);
req               253 crypto/lrw.c   static int decrypt(struct skcipher_request *req)
req               255 crypto/lrw.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               258 crypto/lrw.c   	init_crypt(req);
req               259 crypto/lrw.c   	return xor_tweak_pre(req) ?:
req               261 crypto/lrw.c   		xor_tweak_post(req);
req                17 crypto/ofb.c   static int crypto_ofb_crypt(struct skcipher_request *req)
req                19 crypto/ofb.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                25 crypto/ofb.c   	err = skcipher_walk_virt(&walk, req, false);
req                19 crypto/pcbc.c  static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
req                41 crypto/pcbc.c  static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
req                63 crypto/pcbc.c  static int crypto_pcbc_encrypt(struct skcipher_request *req)
req                65 crypto/pcbc.c  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                71 crypto/pcbc.c  	err = skcipher_walk_virt(&walk, req, false);
req                75 crypto/pcbc.c  			nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
req                78 crypto/pcbc.c  			nbytes = crypto_pcbc_encrypt_segment(req, &walk,
req                86 crypto/pcbc.c  static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
req               108 crypto/pcbc.c  static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
req               130 crypto/pcbc.c  static int crypto_pcbc_decrypt(struct skcipher_request *req)
req               132 crypto/pcbc.c  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               138 crypto/pcbc.c  	err = skcipher_walk_virt(&walk, req, false);
req               142 crypto/pcbc.c  			nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
req               145 crypto/pcbc.c  			nbytes = crypto_pcbc_decrypt_segment(req, &walk,
req                62 crypto/pcrypt.c 	struct aead_request *req = pcrypt_request_ctx(preq);
req                64 crypto/pcrypt.c 	aead_request_complete(req->base.data, padata->info);
req                69 crypto/pcrypt.c 	struct aead_request *req = areq->data;
req                70 crypto/pcrypt.c 	struct pcrypt_request *preq = aead_request_ctx(req);
req                81 crypto/pcrypt.c 	struct aead_request *req = pcrypt_request_ctx(preq);
req                83 crypto/pcrypt.c 	padata->info = crypto_aead_encrypt(req);
req                91 crypto/pcrypt.c static int pcrypt_aead_encrypt(struct aead_request *req)
req                94 crypto/pcrypt.c 	struct pcrypt_request *preq = aead_request_ctx(req);
req                97 crypto/pcrypt.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req                99 crypto/pcrypt.c 	u32 flags = aead_request_flags(req);
req               111 crypto/pcrypt.c 				  pcrypt_aead_done, req);
req               112 crypto/pcrypt.c 	aead_request_set_crypt(creq, req->src, req->dst,
req               113 crypto/pcrypt.c 			       req->cryptlen, req->iv);
req               114 crypto/pcrypt.c 	aead_request_set_ad(creq, req->assoclen);
req               126 crypto/pcrypt.c 	struct aead_request *req = pcrypt_request_ctx(preq);
req               128 crypto/pcrypt.c 	padata->info = crypto_aead_decrypt(req);
req               136 crypto/pcrypt.c static int pcrypt_aead_decrypt(struct aead_request *req)
req               139 crypto/pcrypt.c 	struct pcrypt_request *preq = aead_request_ctx(req);
req               142 crypto/pcrypt.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               144 crypto/pcrypt.c 	u32 flags = aead_request_flags(req);
req               156 crypto/pcrypt.c 				  pcrypt_aead_done, req);
req               157 crypto/pcrypt.c 	aead_request_set_crypt(creq, req->src, req->dst,
req               158 crypto/pcrypt.c 			       req->cryptlen, req->iv);
req               159 crypto/pcrypt.c 	aead_request_set_ad(creq, req->assoclen);
req               173 crypto/rsa-pkcs1pad.c static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
req               175 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               177 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               197 crypto/rsa-pkcs1pad.c 	sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
req               199 crypto/rsa-pkcs1pad.c 	sg_copy_from_buffer(req->dst,
req               200 crypto/rsa-pkcs1pad.c 			    sg_nents_for_len(req->dst, ctx->key_size),
req               205 crypto/rsa-pkcs1pad.c 	req->dst_len = ctx->key_size;
req               215 crypto/rsa-pkcs1pad.c 	struct akcipher_request *req = child_async_req->data;
req               221 crypto/rsa-pkcs1pad.c 	async_req.data = req->base.data;
req               222 crypto/rsa-pkcs1pad.c 	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
req               224 crypto/rsa-pkcs1pad.c 	req->base.complete(&async_req,
req               225 crypto/rsa-pkcs1pad.c 			pkcs1pad_encrypt_sign_complete(req, err));
req               228 crypto/rsa-pkcs1pad.c static int pkcs1pad_encrypt(struct akcipher_request *req)
req               230 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               232 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               239 crypto/rsa-pkcs1pad.c 	if (req->src_len > ctx->key_size - 11)
req               242 crypto/rsa-pkcs1pad.c 	if (req->dst_len < ctx->key_size) {
req               243 crypto/rsa-pkcs1pad.c 		req->dst_len = ctx->key_size;
req               247 crypto/rsa-pkcs1pad.c 	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
req               252 crypto/rsa-pkcs1pad.c 	ps_end = ctx->key_size - req->src_len - 2;
req               259 crypto/rsa-pkcs1pad.c 			ctx->key_size - 1 - req->src_len, req->src);
req               262 crypto/rsa-pkcs1pad.c 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
req               263 crypto/rsa-pkcs1pad.c 			pkcs1pad_encrypt_sign_complete_cb, req);
req               267 crypto/rsa-pkcs1pad.c 				   req->dst, ctx->key_size - 1, req->dst_len);
req               271 crypto/rsa-pkcs1pad.c 		return pkcs1pad_encrypt_sign_complete(req, err);
req               276 crypto/rsa-pkcs1pad.c static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
req               278 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               280 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               315 crypto/rsa-pkcs1pad.c 	if (req->dst_len < dst_len - pos)
req               317 crypto/rsa-pkcs1pad.c 	req->dst_len = dst_len - pos;
req               320 crypto/rsa-pkcs1pad.c 		sg_copy_from_buffer(req->dst,
req               321 crypto/rsa-pkcs1pad.c 				sg_nents_for_len(req->dst, req->dst_len),
req               322 crypto/rsa-pkcs1pad.c 				out_buf + pos, req->dst_len);
req               333 crypto/rsa-pkcs1pad.c 	struct akcipher_request *req = child_async_req->data;
req               339 crypto/rsa-pkcs1pad.c 	async_req.data = req->base.data;
req               340 crypto/rsa-pkcs1pad.c 	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
req               342 crypto/rsa-pkcs1pad.c 	req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
req               345 crypto/rsa-pkcs1pad.c static int pkcs1pad_decrypt(struct akcipher_request *req)
req               347 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               349 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               352 crypto/rsa-pkcs1pad.c 	if (!ctx->key_size || req->src_len != ctx->key_size)
req               363 crypto/rsa-pkcs1pad.c 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
req               364 crypto/rsa-pkcs1pad.c 			pkcs1pad_decrypt_complete_cb, req);
req               367 crypto/rsa-pkcs1pad.c 	akcipher_request_set_crypt(&req_ctx->child_req, req->src,
req               368 crypto/rsa-pkcs1pad.c 				   req_ctx->out_sg, req->src_len,
req               373 crypto/rsa-pkcs1pad.c 		return pkcs1pad_decrypt_complete(req, err);
req               378 crypto/rsa-pkcs1pad.c static int pkcs1pad_sign(struct akcipher_request *req)
req               380 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               382 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               395 crypto/rsa-pkcs1pad.c 	if (req->src_len + digest_size > ctx->key_size - 11)
req               398 crypto/rsa-pkcs1pad.c 	if (req->dst_len < ctx->key_size) {
req               399 crypto/rsa-pkcs1pad.c 		req->dst_len = ctx->key_size;
req               403 crypto/rsa-pkcs1pad.c 	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
req               408 crypto/rsa-pkcs1pad.c 	ps_end = ctx->key_size - digest_size - req->src_len - 2;
req               418 crypto/rsa-pkcs1pad.c 			ctx->key_size - 1 - req->src_len, req->src);
req               421 crypto/rsa-pkcs1pad.c 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
req               422 crypto/rsa-pkcs1pad.c 			pkcs1pad_encrypt_sign_complete_cb, req);
req               426 crypto/rsa-pkcs1pad.c 				   req->dst, ctx->key_size - 1, req->dst_len);
req               430 crypto/rsa-pkcs1pad.c 		return pkcs1pad_encrypt_sign_complete(req, err);
req               435 crypto/rsa-pkcs1pad.c static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
req               437 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               439 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               487 crypto/rsa-pkcs1pad.c 	if (req->dst_len != dst_len - pos) {
req               489 crypto/rsa-pkcs1pad.c 		req->dst_len = dst_len - pos;
req               493 crypto/rsa-pkcs1pad.c 	sg_pcopy_to_buffer(req->src,
req               494 crypto/rsa-pkcs1pad.c 			   sg_nents_for_len(req->src,
req               495 crypto/rsa-pkcs1pad.c 					    req->src_len + req->dst_len),
req               497 crypto/rsa-pkcs1pad.c 			   req->dst_len, ctx->key_size);
req               500 crypto/rsa-pkcs1pad.c 		   req->dst_len) != 0)
req               511 crypto/rsa-pkcs1pad.c 	struct akcipher_request *req = child_async_req->data;
req               517 crypto/rsa-pkcs1pad.c 	async_req.data = req->base.data;
req               518 crypto/rsa-pkcs1pad.c 	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
req               520 crypto/rsa-pkcs1pad.c 	req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
req               531 crypto/rsa-pkcs1pad.c static int pkcs1pad_verify(struct akcipher_request *req)
req               533 crypto/rsa-pkcs1pad.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               535 crypto/rsa-pkcs1pad.c 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
req               538 crypto/rsa-pkcs1pad.c 	if (WARN_ON(req->dst) ||
req               539 crypto/rsa-pkcs1pad.c 	    WARN_ON(!req->dst_len) ||
req               540 crypto/rsa-pkcs1pad.c 	    !ctx->key_size || req->src_len < ctx->key_size)
req               543 crypto/rsa-pkcs1pad.c 	req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
req               551 crypto/rsa-pkcs1pad.c 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
req               552 crypto/rsa-pkcs1pad.c 			pkcs1pad_verify_complete_cb, req);
req               555 crypto/rsa-pkcs1pad.c 	akcipher_request_set_crypt(&req_ctx->child_req, req->src,
req               556 crypto/rsa-pkcs1pad.c 				   req_ctx->out_sg, req->src_len,
req               561 crypto/rsa-pkcs1pad.c 		return pkcs1pad_verify_complete(req, err);
req                54 crypto/rsa.c   static int rsa_enc(struct akcipher_request *req)
req                56 crypto/rsa.c   	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req                71 crypto/rsa.c   	m = mpi_read_raw_from_sgl(req->src, req->src_len);
req                79 crypto/rsa.c   	ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign);
req                93 crypto/rsa.c   static int rsa_dec(struct akcipher_request *req)
req                95 crypto/rsa.c   	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               110 crypto/rsa.c   	c = mpi_read_raw_from_sgl(req->src, req->src_len);
req               118 crypto/rsa.c   	ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
req               153 crypto/salsa20_generic.c static int salsa20_crypt(struct skcipher_request *req)
req               155 crypto/salsa20_generic.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               161 crypto/salsa20_generic.c 	err = skcipher_walk_virt(&walk, req, false);
req               163 crypto/salsa20_generic.c 	salsa20_init(state, ctx, req->iv);
req               120 crypto/scompress.c static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
req               122 crypto/scompress.c 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
req               125 crypto/scompress.c 	void **ctx = acomp_request_ctx(req);
req               129 crypto/scompress.c 	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
req               132 crypto/scompress.c 	if (req->dst && !req->dlen)
req               135 crypto/scompress.c 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req               136 crypto/scompress.c 		req->dlen = SCOMP_SCRATCH_SIZE;
req               141 crypto/scompress.c 	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
req               143 crypto/scompress.c 		ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
req               144 crypto/scompress.c 					    scratch->dst, &req->dlen, *ctx);
req               146 crypto/scompress.c 		ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
req               147 crypto/scompress.c 					      scratch->dst, &req->dlen, *ctx);
req               149 crypto/scompress.c 		if (!req->dst) {
req               150 crypto/scompress.c 			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
req               151 crypto/scompress.c 			if (!req->dst) {
req               156 crypto/scompress.c 		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
req               164 crypto/scompress.c static int scomp_acomp_compress(struct acomp_req *req)
req               166 crypto/scompress.c 	return scomp_acomp_comp_decomp(req, 1);
req               169 crypto/scompress.c static int scomp_acomp_decompress(struct acomp_req *req)
req               171 crypto/scompress.c 	return scomp_acomp_comp_decomp(req, 0);
req               213 crypto/scompress.c struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
req               215 crypto/scompress.c 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
req               223 crypto/scompress.c 		kfree(req);
req               227 crypto/scompress.c 	*req->__ctx = ctx;
req               229 crypto/scompress.c 	return req;
req               232 crypto/scompress.c void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
req               234 crypto/scompress.c 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
req               238 crypto/scompress.c 	void *ctx = *req->__ctx;
req                23 crypto/seqiv.c static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
req                25 crypto/seqiv.c 	struct aead_request *subreq = aead_request_ctx(req);
req                34 crypto/seqiv.c 	geniv = crypto_aead_reqtfm(req);
req                35 crypto/seqiv.c 	memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv));
req                44 crypto/seqiv.c 	struct aead_request *req = base->data;
req                46 crypto/seqiv.c 	seqiv_aead_encrypt_complete2(req, err);
req                47 crypto/seqiv.c 	aead_request_complete(req, err);
req                50 crypto/seqiv.c static int seqiv_aead_encrypt(struct aead_request *req)
req                52 crypto/seqiv.c 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
req                54 crypto/seqiv.c 	struct aead_request *subreq = aead_request_ctx(req);
req                61 crypto/seqiv.c 	if (req->cryptlen < ivsize)
req                66 crypto/seqiv.c 	compl = req->base.complete;
req                67 crypto/seqiv.c 	data = req->base.data;
req                68 crypto/seqiv.c 	info = req->iv;
req                70 crypto/seqiv.c 	if (req->src != req->dst) {
req                74 crypto/seqiv.c 		skcipher_request_set_callback(nreq, req->base.flags,
req                76 crypto/seqiv.c 		skcipher_request_set_crypt(nreq, req->src, req->dst,
req                77 crypto/seqiv.c 					   req->assoclen + req->cryptlen,
req                87 crypto/seqiv.c 		info = kmemdup(req->iv, ivsize, req->base.flags &
req                94 crypto/seqiv.c 		data = req;
req                97 crypto/seqiv.c 	aead_request_set_callback(subreq, req->base.flags, compl, data);
req                98 crypto/seqiv.c 	aead_request_set_crypt(subreq, req->dst, req->dst,
req                99 crypto/seqiv.c 			       req->cryptlen - ivsize, info);
req               100 crypto/seqiv.c 	aead_request_set_ad(subreq, req->assoclen + ivsize);
req               103 crypto/seqiv.c 	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
req               106 crypto/seqiv.c 	if (unlikely(info != req->iv))
req               107 crypto/seqiv.c 		seqiv_aead_encrypt_complete2(req, err);
req               111 crypto/seqiv.c static int seqiv_aead_decrypt(struct aead_request *req)
req               113 crypto/seqiv.c 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
req               115 crypto/seqiv.c 	struct aead_request *subreq = aead_request_ctx(req);
req               120 crypto/seqiv.c 	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
req               125 crypto/seqiv.c 	compl = req->base.complete;
req               126 crypto/seqiv.c 	data = req->base.data;
req               128 crypto/seqiv.c 	aead_request_set_callback(subreq, req->base.flags, compl, data);
req               129 crypto/seqiv.c 	aead_request_set_crypt(subreq, req->src, req->dst,
req               130 crypto/seqiv.c 			       req->cryptlen - ivsize, req->iv);
req               131 crypto/seqiv.c 	aead_request_set_ad(subreq, req->assoclen + ivsize);
req               133 crypto/seqiv.c 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
req               230 crypto/shash.c static int shash_async_init(struct ahash_request *req)
req               232 crypto/shash.c 	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               233 crypto/shash.c 	struct shash_desc *desc = ahash_request_ctx(req);
req               240 crypto/shash.c int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
req               245 crypto/shash.c 	for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
req               253 crypto/shash.c static int shash_async_update(struct ahash_request *req)
req               255 crypto/shash.c 	return shash_ahash_update(req, ahash_request_ctx(req));
req               258 crypto/shash.c static int shash_async_final(struct ahash_request *req)
req               260 crypto/shash.c 	return crypto_shash_final(ahash_request_ctx(req), req->result);
req               263 crypto/shash.c int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
req               268 crypto/shash.c 	nbytes = crypto_hash_walk_first(req, &walk);
req               270 crypto/shash.c 		return crypto_shash_final(desc, req->result);
req               275 crypto/shash.c 					    req->result) :
req               284 crypto/shash.c static int shash_async_finup(struct ahash_request *req)
req               286 crypto/shash.c 	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               287 crypto/shash.c 	struct shash_desc *desc = ahash_request_ctx(req);
req               291 crypto/shash.c 	return shash_ahash_finup(req, desc);
req               294 crypto/shash.c int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
req               296 crypto/shash.c 	unsigned int nbytes = req->nbytes;
req               302 crypto/shash.c 	    (sg = req->src, offset = sg->offset,
req               308 crypto/shash.c 					  req->result);
req               312 crypto/shash.c 		      shash_ahash_finup(req, desc);
req               318 crypto/shash.c static int shash_async_digest(struct ahash_request *req)
req               320 crypto/shash.c 	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               321 crypto/shash.c 	struct shash_desc *desc = ahash_request_ctx(req);
req               325 crypto/shash.c 	return shash_ahash_digest(req, desc);
req               328 crypto/shash.c static int shash_async_export(struct ahash_request *req, void *out)
req               330 crypto/shash.c 	return crypto_shash_export(ahash_request_ctx(req), out);
req               333 crypto/shash.c static int shash_async_import(struct ahash_request *req, const void *in)
req               335 crypto/shash.c 	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               336 crypto/shash.c 	struct shash_desc *desc = ahash_request_ctx(req);
req                66 crypto/simd.c  static int simd_skcipher_encrypt(struct skcipher_request *req)
req                68 crypto/simd.c  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                73 crypto/simd.c  	subreq = skcipher_request_ctx(req);
req                74 crypto/simd.c  	*subreq = *req;
req                87 crypto/simd.c  static int simd_skcipher_decrypt(struct skcipher_request *req)
req                89 crypto/simd.c  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                94 crypto/simd.c  	subreq = skcipher_request_ctx(req);
req                95 crypto/simd.c  	*subreq = *req;
req               317 crypto/simd.c  static int simd_aead_encrypt(struct aead_request *req)
req               319 crypto/simd.c  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               324 crypto/simd.c  	subreq = aead_request_ctx(req);
req               325 crypto/simd.c  	*subreq = *req;
req               338 crypto/simd.c  static int simd_aead_decrypt(struct aead_request *req)
req               340 crypto/simd.c  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               345 crypto/simd.c  	subreq = aead_request_ctx(req);
req               346 crypto/simd.c  	*subreq = *req;
req               449 crypto/skcipher.c 				  struct skcipher_request *req)
req               451 crypto/skcipher.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               453 crypto/skcipher.c 	walk->total = req->cryptlen;
req               455 crypto/skcipher.c 	walk->iv = req->iv;
req               456 crypto/skcipher.c 	walk->oiv = req->iv;
req               461 crypto/skcipher.c 	scatterwalk_start(&walk->in, req->src);
req               462 crypto/skcipher.c 	scatterwalk_start(&walk->out, req->dst);
req               465 crypto/skcipher.c 	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
req               477 crypto/skcipher.c 		       struct skcipher_request *req, bool atomic)
req               481 crypto/skcipher.c 	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               485 crypto/skcipher.c 	err = skcipher_walk_skcipher(walk, req);
req               500 crypto/skcipher.c 			struct skcipher_request *req)
req               506 crypto/skcipher.c 	return skcipher_walk_skcipher(walk, req);
req               511 crypto/skcipher.c 				     struct aead_request *req, bool atomic)
req               513 crypto/skcipher.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               517 crypto/skcipher.c 	walk->iv = req->iv;
req               518 crypto/skcipher.c 	walk->oiv = req->iv;
req               525 crypto/skcipher.c 	scatterwalk_start(&walk->in, req->src);
req               526 crypto/skcipher.c 	scatterwalk_start(&walk->out, req->dst);
req               528 crypto/skcipher.c 	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
req               529 crypto/skcipher.c 	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
req               534 crypto/skcipher.c 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
req               552 crypto/skcipher.c int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
req               555 crypto/skcipher.c 	walk->total = req->cryptlen;
req               557 crypto/skcipher.c 	return skcipher_walk_aead_common(walk, req, atomic);
req               562 crypto/skcipher.c 			       struct aead_request *req, bool atomic)
req               564 crypto/skcipher.c 	walk->total = req->cryptlen;
req               566 crypto/skcipher.c 	return skcipher_walk_aead_common(walk, req, atomic);
req               571 crypto/skcipher.c 			       struct aead_request *req, bool atomic)
req               573 crypto/skcipher.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               575 crypto/skcipher.c 	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
req               577 crypto/skcipher.c 	return skcipher_walk_aead_common(walk, req, atomic);
req               620 crypto/skcipher.c static int skcipher_crypt_blkcipher(struct skcipher_request *req,
req               626 crypto/skcipher.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               630 crypto/skcipher.c 		.info = req->iv,
req               631 crypto/skcipher.c 		.flags = req->base.flags,
req               635 crypto/skcipher.c 	return crypt(&desc, req->dst, req->src, req->cryptlen);
req               638 crypto/skcipher.c static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
req               640 crypto/skcipher.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               644 crypto/skcipher.c 	return skcipher_crypt_blkcipher(req, alg->encrypt);
req               647 crypto/skcipher.c static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
req               649 crypto/skcipher.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               653 crypto/skcipher.c 	return skcipher_crypt_blkcipher(req, alg->decrypt);
req               721 crypto/skcipher.c static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
req               724 crypto/skcipher.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               726 crypto/skcipher.c 	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
req               729 crypto/skcipher.c 	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
req               730 crypto/skcipher.c 					req->base.complete, req->base.data);
req               731 crypto/skcipher.c 	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req               732 crypto/skcipher.c 				     req->iv);
req               737 crypto/skcipher.c static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
req               739 crypto/skcipher.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               743 crypto/skcipher.c 	return skcipher_crypt_ablkcipher(req, alg->encrypt);
req               746 crypto/skcipher.c static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
req               748 crypto/skcipher.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               752 crypto/skcipher.c 	return skcipher_crypt_ablkcipher(req, alg->decrypt);
req               844 crypto/skcipher.c int crypto_skcipher_encrypt(struct skcipher_request *req)
req               846 crypto/skcipher.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               848 crypto/skcipher.c 	unsigned int cryptlen = req->cryptlen;
req               855 crypto/skcipher.c 		ret = tfm->encrypt(req);
req               861 crypto/skcipher.c int crypto_skcipher_decrypt(struct skcipher_request *req)
req               863 crypto/skcipher.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               865 crypto/skcipher.c 	unsigned int cryptlen = req->cryptlen;
req               872 crypto/skcipher.c 		ret = tfm->decrypt(req);
req               139 crypto/tcrypt.c static inline int do_one_aead_op(struct aead_request *req, int ret)
req               141 crypto/tcrypt.c 	struct crypto_wait *wait = req->base.data;
req               149 crypto/tcrypt.c 	struct aead_request *req;
req               164 crypto/tcrypt.c 			rc[i] = crypto_aead_encrypt(data[i].req);
req               166 crypto/tcrypt.c 			rc[i] = crypto_aead_decrypt(data[i].req);
req               315 crypto/tcrypt.c 		data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
req               316 crypto/tcrypt.c 		if (!data[i].req) {
req               320 crypto/tcrypt.c 				aead_request_free(data[i].req);
req               327 crypto/tcrypt.c 		aead_request_set_callback(data[i].req,
req               389 crypto/tcrypt.c 				aead_request_set_ad(cur->req, aad_size);
req               393 crypto/tcrypt.c 					aead_request_set_crypt(cur->req,
req               397 crypto/tcrypt.c 					ret = crypto_aead_encrypt(cur->req);
req               398 crypto/tcrypt.c 					ret = do_one_aead_op(cur->req, ret);
req               407 crypto/tcrypt.c 				aead_request_set_crypt(cur->req, cur->sg,
req               435 crypto/tcrypt.c 		aead_request_free(data[i].req);
req               453 crypto/tcrypt.c static int test_aead_jiffies(struct aead_request *req, int enc,
req               463 crypto/tcrypt.c 			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
req               465 crypto/tcrypt.c 			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
req               476 crypto/tcrypt.c static int test_aead_cycles(struct aead_request *req, int enc, int blen)
req               485 crypto/tcrypt.c 			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
req               487 crypto/tcrypt.c 			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
req               499 crypto/tcrypt.c 			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
req               501 crypto/tcrypt.c 			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
req               527 crypto/tcrypt.c 	struct aead_request *req;
req               578 crypto/tcrypt.c 	req = aead_request_alloc(tfm, GFP_KERNEL);
req               579 crypto/tcrypt.c 	if (!req) {
req               585 crypto/tcrypt.c 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               636 crypto/tcrypt.c 			aead_request_set_ad(req, aad_size);
req               645 crypto/tcrypt.c 				aead_request_set_crypt(req, sgout, sg,
req               647 crypto/tcrypt.c 				ret = do_one_aead_op(req,
req               648 crypto/tcrypt.c 						     crypto_aead_encrypt(req));
req               657 crypto/tcrypt.c 			aead_request_set_crypt(req, sg, sgout,
req               662 crypto/tcrypt.c 				ret = test_aead_jiffies(req, enc, *b_size,
req               666 crypto/tcrypt.c 				ret = test_aead_cycles(req, enc, *b_size);
req               680 crypto/tcrypt.c 	aead_request_free(req);
req               706 crypto/tcrypt.c static inline int do_one_ahash_op(struct ahash_request *req, int ret)
req               708 crypto/tcrypt.c 	struct crypto_wait *wait = req->base.data;
req               716 crypto/tcrypt.c 	struct ahash_request *req;
req               728 crypto/tcrypt.c 		rc[i] = crypto_ahash_digest(data[i].req);
req               836 crypto/tcrypt.c 		data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
req               837 crypto/tcrypt.c 		if (!data[i].req) {
req               843 crypto/tcrypt.c 		ahash_request_set_callback(data[i].req, 0, crypto_req_done,
req               871 crypto/tcrypt.c 			ahash_request_set_crypt(data[k].req, data[k].sg,
req               896 crypto/tcrypt.c 		ahash_request_free(data[k].req);
req               907 crypto/tcrypt.c static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
req               916 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
req               927 crypto/tcrypt.c static int test_ahash_jiffies(struct ahash_request *req, int blen,
req               935 crypto/tcrypt.c 		return test_ahash_jiffies_digest(req, blen, out, secs);
req               939 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_init(req));
req               943 crypto/tcrypt.c 			ret = do_one_ahash_op(req, crypto_ahash_update(req));
req               948 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_final(req));
req               959 crypto/tcrypt.c static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
req               967 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
req               978 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
req               997 crypto/tcrypt.c static int test_ahash_cycles(struct ahash_request *req, int blen,
req              1004 crypto/tcrypt.c 		return test_ahash_cycles_digest(req, blen, out);
req              1008 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_init(req));
req              1012 crypto/tcrypt.c 			ret = do_one_ahash_op(req, crypto_ahash_update(req));
req              1016 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_final(req));
req              1027 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_init(req));
req              1031 crypto/tcrypt.c 			ret = do_one_ahash_op(req, crypto_ahash_update(req));
req              1035 crypto/tcrypt.c 		ret = do_one_ahash_op(req, crypto_ahash_final(req));
req              1059 crypto/tcrypt.c 	struct ahash_request *req;
req              1081 crypto/tcrypt.c 	req = ahash_request_alloc(tfm, GFP_KERNEL);
req              1082 crypto/tcrypt.c 	if (!req) {
req              1088 crypto/tcrypt.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              1109 crypto/tcrypt.c 		ahash_request_set_crypt(req, sg, output, speed[i].plen);
req              1112 crypto/tcrypt.c 			ret = test_ahash_jiffies(req, speed[i].blen,
req              1116 crypto/tcrypt.c 			ret = test_ahash_cycles(req, speed[i].blen,
req              1129 crypto/tcrypt.c 	ahash_request_free(req);
req              1149 crypto/tcrypt.c 	struct skcipher_request *req;
req              1162 crypto/tcrypt.c 			rc[i] = crypto_skcipher_encrypt(data[i].req);
req              1164 crypto/tcrypt.c 			rc[i] = crypto_skcipher_decrypt(data[i].req);
req              1294 crypto/tcrypt.c 		data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
req              1295 crypto/tcrypt.c 		if (!data[i].req) {
req              1299 crypto/tcrypt.c 				skcipher_request_free(data[i].req);
req              1305 crypto/tcrypt.c 		skcipher_request_set_callback(data[i].req,
req              1372 crypto/tcrypt.c 				skcipher_request_set_crypt(cur->req, cur->sg,
req              1400 crypto/tcrypt.c 		skcipher_request_free(data[i].req);
req              1410 crypto/tcrypt.c static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
req              1412 crypto/tcrypt.c 	struct crypto_wait *wait = req->base.data;
req              1417 crypto/tcrypt.c static int test_acipher_jiffies(struct skcipher_request *req, int enc,
req              1427 crypto/tcrypt.c 			ret = do_one_acipher_op(req,
req              1428 crypto/tcrypt.c 						crypto_skcipher_encrypt(req));
req              1430 crypto/tcrypt.c 			ret = do_one_acipher_op(req,
req              1431 crypto/tcrypt.c 						crypto_skcipher_decrypt(req));
req              1442 crypto/tcrypt.c static int test_acipher_cycles(struct skcipher_request *req, int enc,
req              1452 crypto/tcrypt.c 			ret = do_one_acipher_op(req,
req              1453 crypto/tcrypt.c 						crypto_skcipher_encrypt(req));
req              1455 crypto/tcrypt.c 			ret = do_one_acipher_op(req,
req              1456 crypto/tcrypt.c 						crypto_skcipher_decrypt(req));
req              1468 crypto/tcrypt.c 			ret = do_one_acipher_op(req,
req              1469 crypto/tcrypt.c 						crypto_skcipher_encrypt(req));
req              1471 crypto/tcrypt.c 			ret = do_one_acipher_op(req,
req              1472 crypto/tcrypt.c 						crypto_skcipher_decrypt(req));
req              1497 crypto/tcrypt.c 	struct skcipher_request *req;
req              1520 crypto/tcrypt.c 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
req              1521 crypto/tcrypt.c 	if (!req) {
req              1527 crypto/tcrypt.c 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              1591 crypto/tcrypt.c 			skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
req              1594 crypto/tcrypt.c 				ret = test_acipher_jiffies(req, enc,
req              1598 crypto/tcrypt.c 				ret = test_acipher_cycles(req, enc,
req              1614 crypto/tcrypt.c 	skcipher_request_free(req);
req              1234 crypto/testmgr.c static int do_ahash_op(int (*op)(struct ahash_request *req),
req              1235 crypto/testmgr.c 		       struct ahash_request *req,
req              1243 crypto/testmgr.c 	err = op(req);
req              1274 crypto/testmgr.c 			      struct ahash_request *req,
req              1278 crypto/testmgr.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1319 crypto/testmgr.c 	testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
req              1325 crypto/testmgr.c 		ahash_request_set_callback(req, req_flags, crypto_req_done,
req              1327 crypto/testmgr.c 		ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
req              1328 crypto/testmgr.c 		err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
req              1347 crypto/testmgr.c 	ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
req              1348 crypto/testmgr.c 	ahash_request_set_crypt(req, NULL, result, 0);
req              1349 crypto/testmgr.c 	err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
req              1361 crypto/testmgr.c 			ahash_request_set_callback(req, req_flags,
req              1363 crypto/testmgr.c 			ahash_request_set_crypt(req, pending_sgl, result,
req              1365 crypto/testmgr.c 			err = do_ahash_op(crypto_ahash_update, req, &wait,
req              1379 crypto/testmgr.c 			err = crypto_ahash_export(req, hashstate);
req              1392 crypto/testmgr.c 			testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
req              1393 crypto/testmgr.c 			err = crypto_ahash_import(req, hashstate);
req              1405 crypto/testmgr.c 	ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
req              1406 crypto/testmgr.c 	ahash_request_set_crypt(req, pending_sgl, result, pending_len);
req              1409 crypto/testmgr.c 		err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
req              1414 crypto/testmgr.c 		err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
req              1422 crypto/testmgr.c 		err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
req              1439 crypto/testmgr.c 			     struct ahash_request *req,
req              1459 crypto/testmgr.c 	return test_ahash_vec_cfg(driver, vec, vec_name, cfg, req, tsgl,
req              1464 crypto/testmgr.c 			 unsigned int vec_num, struct ahash_request *req,
req              1477 crypto/testmgr.c 					req, desc, tsgl, hashstate);
req              1491 crypto/testmgr.c 						req, desc, tsgl, hashstate);
req              1550 crypto/testmgr.c 				     struct ahash_request *req,
req              1555 crypto/testmgr.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1647 crypto/testmgr.c 					req, desc, tsgl, hashstate);
req              1666 crypto/testmgr.c 				     struct ahash_request *req,
req              1714 crypto/testmgr.c 	struct ahash_request *req = NULL;
req              1735 crypto/testmgr.c 	req = ahash_request_alloc(atfm, GFP_KERNEL);
req              1736 crypto/testmgr.c 	if (!req) {
req              1773 crypto/testmgr.c 		err = test_hash_vec(driver, &vecs[i], i, req, desc, tsgl,
req              1779 crypto/testmgr.c 	err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
req              1789 crypto/testmgr.c 	ahash_request_free(req);
req              1841 crypto/testmgr.c 			     struct aead_request *req,
req              1844 crypto/testmgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1919 crypto/testmgr.c 	testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm));
req              1920 crypto/testmgr.c 	aead_request_set_callback(req, req_flags, crypto_req_done, &wait);
req              1921 crypto/testmgr.c 	aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
req              1923 crypto/testmgr.c 	aead_request_set_ad(req, vec->alen);
req              1926 crypto/testmgr.c 	err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
req              1932 crypto/testmgr.c 	if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
req              1933 crypto/testmgr.c 	    req->assoclen != vec->alen ||
req              1934 crypto/testmgr.c 	    req->iv != iv ||
req              1935 crypto/testmgr.c 	    req->src != tsgls->src.sgl_ptr ||
req              1936 crypto/testmgr.c 	    req->dst != tsgls->dst.sgl_ptr ||
req              1937 crypto/testmgr.c 	    crypto_aead_reqtfm(req) != tfm ||
req              1938 crypto/testmgr.c 	    req->base.complete != crypto_req_done ||
req              1939 crypto/testmgr.c 	    req->base.flags != req_flags ||
req              1940 crypto/testmgr.c 	    req->base.data != &wait) {
req              1943 crypto/testmgr.c 		if (req->cryptlen != (enc ? vec->plen : vec->clen))
req              1945 crypto/testmgr.c 		if (req->assoclen != vec->alen)
req              1947 crypto/testmgr.c 		if (req->iv != iv)
req              1949 crypto/testmgr.c 		if (req->src != tsgls->src.sgl_ptr)
req              1951 crypto/testmgr.c 		if (req->dst != tsgls->dst.sgl_ptr)
req              1953 crypto/testmgr.c 		if (crypto_aead_reqtfm(req) != tfm)
req              1955 crypto/testmgr.c 		if (req->base.complete != crypto_req_done)
req              1957 crypto/testmgr.c 		if (req->base.flags != req_flags)
req              1959 crypto/testmgr.c 		if (req->base.data != &wait)
req              2010 crypto/testmgr.c 			 struct aead_request *req,
req              2025 crypto/testmgr.c 					req, tsgls);
req              2039 crypto/testmgr.c 						&cfg, req, tsgls);
req              2054 crypto/testmgr.c static void generate_random_aead_testvec(struct aead_request *req,
req              2060 crypto/testmgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2118 crypto/testmgr.c 	aead_request_set_callback(req, 0, crypto_req_done, &wait);
req              2119 crypto/testmgr.c 	aead_request_set_crypt(req, src, &dst, vec->plen, iv);
req              2120 crypto/testmgr.c 	aead_request_set_ad(req, vec->alen);
req              2121 crypto/testmgr.c 	vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait);
req              2136 crypto/testmgr.c 				     struct aead_request *req,
req              2139 crypto/testmgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2246 crypto/testmgr.c 					req, tsgls);
req              2251 crypto/testmgr.c 						cfg, req, tsgls);
req              2272 crypto/testmgr.c 				     struct aead_request *req,
req              2281 crypto/testmgr.c 		     struct aead_request *req,
req              2288 crypto/testmgr.c 		err = test_aead_vec(driver, enc, &suite->vecs[i], i, req,
req              2302 crypto/testmgr.c 	struct aead_request *req = NULL;
req              2318 crypto/testmgr.c 	req = aead_request_alloc(tfm, GFP_KERNEL);
req              2319 crypto/testmgr.c 	if (!req) {
req              2334 crypto/testmgr.c 	err = test_aead(driver, ENCRYPT, suite, req, tsgls);
req              2338 crypto/testmgr.c 	err = test_aead(driver, DECRYPT, suite, req, tsgls);
req              2342 crypto/testmgr.c 	err = test_aead_vs_generic_impl(driver, desc, req, tsgls);
req              2345 crypto/testmgr.c 	aead_request_free(req);
req              2441 crypto/testmgr.c 				 struct skcipher_request *req,
req              2444 crypto/testmgr.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req              2509 crypto/testmgr.c 	testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm));
req              2510 crypto/testmgr.c 	skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
req              2511 crypto/testmgr.c 	skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
req              2515 crypto/testmgr.c 	err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
req              2521 crypto/testmgr.c 	if (req->cryptlen != vec->len ||
req              2522 crypto/testmgr.c 	    req->iv != iv ||
req              2523 crypto/testmgr.c 	    req->src != tsgls->src.sgl_ptr ||
req              2524 crypto/testmgr.c 	    req->dst != tsgls->dst.sgl_ptr ||
req              2525 crypto/testmgr.c 	    crypto_skcipher_reqtfm(req) != tfm ||
req              2526 crypto/testmgr.c 	    req->base.complete != crypto_req_done ||
req              2527 crypto/testmgr.c 	    req->base.flags != req_flags ||
req              2528 crypto/testmgr.c 	    req->base.data != &wait) {
req              2531 crypto/testmgr.c 		if (req->cryptlen != vec->len)
req              2533 crypto/testmgr.c 		if (req->iv != iv)
req              2535 crypto/testmgr.c 		if (req->src != tsgls->src.sgl_ptr)
req              2537 crypto/testmgr.c 		if (req->dst != tsgls->dst.sgl_ptr)
req              2539 crypto/testmgr.c 		if (crypto_skcipher_reqtfm(req) != tfm)
req              2541 crypto/testmgr.c 		if (req->base.complete != crypto_req_done)
req              2543 crypto/testmgr.c 		if (req->base.flags != req_flags)
req              2545 crypto/testmgr.c 		if (req->base.data != &wait)
req              2603 crypto/testmgr.c 			     struct skcipher_request *req,
req              2618 crypto/testmgr.c 					    req, tsgls);
req              2632 crypto/testmgr.c 						    &cfg, req, tsgls);
req              2647 crypto/testmgr.c static void generate_random_cipher_testvec(struct skcipher_request *req,
req              2652 crypto/testmgr.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req              2681 crypto/testmgr.c 	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
req              2682 crypto/testmgr.c 	skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
req              2683 crypto/testmgr.c 	vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
req              2704 crypto/testmgr.c 					 struct skcipher_request *req,
req              2707 crypto/testmgr.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req              2808 crypto/testmgr.c 					    cfg, req, tsgls);
req              2812 crypto/testmgr.c 					    cfg, req, tsgls);
req              2831 crypto/testmgr.c 					 struct skcipher_request *req,
req              2840 crypto/testmgr.c 			 struct skcipher_request *req,
req              2847 crypto/testmgr.c 		err = test_skcipher_vec(driver, enc, &suite->vecs[i], i, req,
req              2861 crypto/testmgr.c 	struct skcipher_request *req = NULL;
req              2877 crypto/testmgr.c 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
req              2878 crypto/testmgr.c 	if (!req) {
req              2893 crypto/testmgr.c 	err = test_skcipher(driver, ENCRYPT, suite, req, tsgls);
req              2897 crypto/testmgr.c 	err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
req              2901 crypto/testmgr.c 	err = test_skcipher_vs_generic_impl(driver, desc->generic_driver, req,
req              2905 crypto/testmgr.c 	skcipher_request_free(req);
req              3026 crypto/testmgr.c 	struct acomp_req *req;
req              3055 crypto/testmgr.c 		req = acomp_request_alloc(tfm);
req              3056 crypto/testmgr.c 		if (!req) {
req              3064 crypto/testmgr.c 		acomp_request_set_params(req, &src, &dst, ilen, dlen);
req              3065 crypto/testmgr.c 		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3068 crypto/testmgr.c 		ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
req              3073 crypto/testmgr.c 			acomp_request_free(req);
req              3077 crypto/testmgr.c 		ilen = req->dlen;
req              3082 crypto/testmgr.c 		acomp_request_set_params(req, &src, &dst, ilen, dlen);
req              3084 crypto/testmgr.c 		ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
req              3089 crypto/testmgr.c 			acomp_request_free(req);
req              3093 crypto/testmgr.c 		if (req->dlen != ctemplate[i].inlen) {
req              3095 crypto/testmgr.c 			       i + 1, algo, req->dlen);
req              3098 crypto/testmgr.c 			acomp_request_free(req);
req              3102 crypto/testmgr.c 		if (memcmp(input_vec, decomp_out, req->dlen)) {
req              3105 crypto/testmgr.c 			hexdump(output, req->dlen);
req              3108 crypto/testmgr.c 			acomp_request_free(req);
req              3113 crypto/testmgr.c 		acomp_request_free(req);
req              3132 crypto/testmgr.c 		req = acomp_request_alloc(tfm);
req              3133 crypto/testmgr.c 		if (!req) {
req              3141 crypto/testmgr.c 		acomp_request_set_params(req, &src, &dst, ilen, dlen);
req              3142 crypto/testmgr.c 		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3145 crypto/testmgr.c 		ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
req              3150 crypto/testmgr.c 			acomp_request_free(req);
req              3154 crypto/testmgr.c 		if (req->dlen != dtemplate[i].outlen) {
req              3156 crypto/testmgr.c 			       i + 1, algo, req->dlen);
req              3159 crypto/testmgr.c 			acomp_request_free(req);
req              3163 crypto/testmgr.c 		if (memcmp(output, dtemplate[i].output, req->dlen)) {
req              3166 crypto/testmgr.c 			hexdump(output, req->dlen);
req              3169 crypto/testmgr.c 			acomp_request_free(req);
req              3174 crypto/testmgr.c 		acomp_request_free(req);
req              3476 crypto/testmgr.c 	struct kpp_request *req;
req              3487 crypto/testmgr.c 	req = kpp_request_alloc(tfm, GFP_KERNEL);
req              3488 crypto/testmgr.c 	if (!req)
req              3505 crypto/testmgr.c 	kpp_request_set_input(req, NULL, 0);
req              3507 crypto/testmgr.c 	kpp_request_set_output(req, &dst, out_len_max);
req              3508 crypto/testmgr.c 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3512 crypto/testmgr.c 	err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
req              3521 crypto/testmgr.c 		a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL);
req              3528 crypto/testmgr.c 		if (memcmp(vec->expected_a_public, sg_virt(req->dst),
req              3546 crypto/testmgr.c 	kpp_request_set_input(req, &src, vec->b_public_size);
req              3547 crypto/testmgr.c 	kpp_request_set_output(req, &dst, out_len_max);
req              3548 crypto/testmgr.c 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3550 crypto/testmgr.c 	err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
req              3559 crypto/testmgr.c 		a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL);
req              3576 crypto/testmgr.c 		kpp_request_set_input(req, &src, vec->expected_a_public_size);
req              3577 crypto/testmgr.c 		kpp_request_set_output(req, &dst, out_len_max);
req              3578 crypto/testmgr.c 		kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3580 crypto/testmgr.c 		err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
req              3597 crypto/testmgr.c 	if (memcmp(shared_secret, sg_virt(req->dst),
req              3611 crypto/testmgr.c 	kpp_request_free(req);
req              3661 crypto/testmgr.c 	struct akcipher_request *req;
req              3676 crypto/testmgr.c 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
req              3677 crypto/testmgr.c 	if (!req)
req              3738 crypto/testmgr.c 		akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
req              3741 crypto/testmgr.c 		akcipher_request_set_crypt(req, src_tab, &dst, m_size,
req              3744 crypto/testmgr.c 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3749 crypto/testmgr.c 			      crypto_akcipher_verify(req) :
req              3751 crypto/testmgr.c 			      crypto_akcipher_encrypt(req), &wait);
req              3757 crypto/testmgr.c 		if (req->dst_len != c_size) {
req              3795 crypto/testmgr.c 	akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max);
req              3799 crypto/testmgr.c 			      crypto_akcipher_sign(req) :
req              3801 crypto/testmgr.c 			      crypto_akcipher_decrypt(req), &wait);
req              3806 crypto/testmgr.c 	out_len = req->dst_len;
req              3824 crypto/testmgr.c 	akcipher_request_free(req);
req                87 crypto/xts.c   static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
req                89 crypto/xts.c   	struct rctx *rctx = skcipher_request_ctx(req);
req                90 crypto/xts.c   	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                91 crypto/xts.c   	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
req                98 crypto/xts.c   		req = &rctx->subreq;
req               100 crypto/xts.c   		skcipher_request_set_tfm(req, tfm);
req               102 crypto/xts.c   	err = skcipher_walk_virt(&w, req, false);
req               137 crypto/xts.c   static int xor_tweak_pre(struct skcipher_request *req, bool enc)
req               139 crypto/xts.c   	return xor_tweak(req, false, enc);
req               142 crypto/xts.c   static int xor_tweak_post(struct skcipher_request *req, bool enc)
req               144 crypto/xts.c   	return xor_tweak(req, true, enc);
req               149 crypto/xts.c   	struct skcipher_request *req = areq->data;
req               153 crypto/xts.c   		struct rctx *rctx = skcipher_request_ctx(req);
req               160 crypto/xts.c   	skcipher_request_complete(req, err);
req               163 crypto/xts.c   static int cts_final(struct skcipher_request *req,
req               164 crypto/xts.c   		     int (*crypt)(struct skcipher_request *req))
req               166 crypto/xts.c   	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req               167 crypto/xts.c   	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
req               168 crypto/xts.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               170 crypto/xts.c   	int tail = req->cryptlen % XTS_BLOCK_SIZE;
req               174 crypto/xts.c   	rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
req               179 crypto/xts.c   	scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
req               186 crypto/xts.c   	skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
req               203 crypto/xts.c   	struct skcipher_request *req = areq->data;
req               206 crypto/xts.c   		struct rctx *rctx = skcipher_request_ctx(req);
req               209 crypto/xts.c   		err = xor_tweak_post(req, true);
req               211 crypto/xts.c   		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
req               212 crypto/xts.c   			err = cts_final(req, crypto_skcipher_encrypt);
req               218 crypto/xts.c   	skcipher_request_complete(req, err);
req               223 crypto/xts.c   	struct skcipher_request *req = areq->data;
req               226 crypto/xts.c   		struct rctx *rctx = skcipher_request_ctx(req);
req               229 crypto/xts.c   		err = xor_tweak_post(req, false);
req               231 crypto/xts.c   		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
req               232 crypto/xts.c   			err = cts_final(req, crypto_skcipher_decrypt);
req               238 crypto/xts.c   	skcipher_request_complete(req, err);
req               241 crypto/xts.c   static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
req               243 crypto/xts.c   	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
req               244 crypto/xts.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               247 crypto/xts.c   	if (req->cryptlen < XTS_BLOCK_SIZE)
req               251 crypto/xts.c   	skcipher_request_set_callback(subreq, req->base.flags, compl, req);
req               252 crypto/xts.c   	skcipher_request_set_crypt(subreq, req->dst, req->dst,
req               253 crypto/xts.c   				   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
req               256 crypto/xts.c   	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
req               261 crypto/xts.c   static int encrypt(struct skcipher_request *req)
req               263 crypto/xts.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               267 crypto/xts.c   	err = init_crypt(req, encrypt_done) ?:
req               268 crypto/xts.c   	      xor_tweak_pre(req, true) ?:
req               270 crypto/xts.c   	      xor_tweak_post(req, true);
req               272 crypto/xts.c   	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
req               275 crypto/xts.c   	return cts_final(req, crypto_skcipher_encrypt);
req               278 crypto/xts.c   static int decrypt(struct skcipher_request *req)
req               280 crypto/xts.c   	struct rctx *rctx = skcipher_request_ctx(req);
req               284 crypto/xts.c   	err = init_crypt(req, decrypt_done) ?:
req               285 crypto/xts.c   	      xor_tweak_pre(req, false) ?:
req               287 crypto/xts.c   	      xor_tweak_post(req, false);
req               289 crypto/xts.c   	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
req               292 crypto/xts.c   	return cts_final(req, crypto_skcipher_decrypt);
req               356 drivers/acpi/pci_root.c acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
req               366 drivers/acpi/pci_root.c 	if ((ctrl & req) != req)
req               392 drivers/acpi/pci_root.c 	if ((ctrl & req) != req) {
req               394 drivers/acpi/pci_root.c 				   req & ~(ctrl));
req               101 drivers/android/binderfs.c 					 struct binderfs_device *req)
req               150 drivers/android/binderfs.c 	req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
req               151 drivers/android/binderfs.c 	name_len = strlen(req->name);
req               153 drivers/android/binderfs.c 	name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
req               165 drivers/android/binderfs.c 	req->major = MAJOR(binderfs_dev);
req               166 drivers/android/binderfs.c 	req->minor = minor;
req               168 drivers/android/binderfs.c 	if (userp && copy_to_user(userp, req, sizeof(*req))) {
req                42 drivers/base/devtmpfs.c 	struct req *next;
req                99 drivers/base/devtmpfs.c 	struct req req;
req               104 drivers/base/devtmpfs.c 	req.mode = 0;
req               105 drivers/base/devtmpfs.c 	req.uid = GLOBAL_ROOT_UID;
req               106 drivers/base/devtmpfs.c 	req.gid = GLOBAL_ROOT_GID;
req               107 drivers/base/devtmpfs.c 	req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
req               108 drivers/base/devtmpfs.c 	if (!req.name)
req               111 drivers/base/devtmpfs.c 	if (req.mode == 0)
req               112 drivers/base/devtmpfs.c 		req.mode = 0600;
req               114 drivers/base/devtmpfs.c 		req.mode |= S_IFBLK;
req               116 drivers/base/devtmpfs.c 		req.mode |= S_IFCHR;
req               118 drivers/base/devtmpfs.c 	req.dev = dev;
req               120 drivers/base/devtmpfs.c 	init_completion(&req.done);
req               123 drivers/base/devtmpfs.c 	req.next = requests;
req               124 drivers/base/devtmpfs.c 	requests = &req;
req               128 drivers/base/devtmpfs.c 	wait_for_completion(&req.done);
req               132 drivers/base/devtmpfs.c 	return req.err;
req               138 drivers/base/devtmpfs.c 	struct req req;
req               143 drivers/base/devtmpfs.c 	req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
req               144 drivers/base/devtmpfs.c 	if (!req.name)
req               147 drivers/base/devtmpfs.c 	req.mode = 0;
req               148 drivers/base/devtmpfs.c 	req.dev = dev;
req               150 drivers/base/devtmpfs.c 	init_completion(&req.done);
req               153 drivers/base/devtmpfs.c 	req.next = requests;
req               154 drivers/base/devtmpfs.c 	requests = &req;
req               158 drivers/base/devtmpfs.c 	wait_for_completion(&req.done);
req               161 drivers/base/devtmpfs.c 	return req.err;
req               406 drivers/base/devtmpfs.c 			struct req *req = requests;
req               409 drivers/base/devtmpfs.c 			while (req) {
req               410 drivers/base/devtmpfs.c 				struct req *next = req->next;
req               411 drivers/base/devtmpfs.c 				req->err = handle(req->name, req->mode,
req               412 drivers/base/devtmpfs.c 						  req->uid, req->gid, req->dev);
req               413 drivers/base/devtmpfs.c 				complete(&req->done);
req               414 drivers/base/devtmpfs.c 				req = next;
req               140 drivers/base/power/qos.c static int apply_constraint(struct dev_pm_qos_request *req,
req               143 drivers/base/power/qos.c 	struct dev_pm_qos *qos = req->dev->power.qos;
req               146 drivers/base/power/qos.c 	switch(req->type) {
req               152 drivers/base/power/qos.c 					   &req->data.pnode, action, value);
req               156 drivers/base/power/qos.c 					   &req->data.pnode, action, value);
req               159 drivers/base/power/qos.c 			req->dev->power.set_latency_tolerance(req->dev, value);
req               163 drivers/base/power/qos.c 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
req               233 drivers/base/power/qos.c 	struct dev_pm_qos_request *req, *tmp;
req               257 drivers/base/power/qos.c 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
req               262 drivers/base/power/qos.c 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
req               263 drivers/base/power/qos.c 		memset(req, 0, sizeof(*req));
req               267 drivers/base/power/qos.c 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
req               268 drivers/base/power/qos.c 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
req               269 drivers/base/power/qos.c 		memset(req, 0, sizeof(*req));
req               273 drivers/base/power/qos.c 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
req               274 drivers/base/power/qos.c 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
req               275 drivers/base/power/qos.c 		memset(req, 0, sizeof(*req));
req               299 drivers/base/power/qos.c 				    struct dev_pm_qos_request *req,
req               304 drivers/base/power/qos.c 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
req               307 drivers/base/power/qos.c 	if (WARN(dev_pm_qos_request_active(req),
req               318 drivers/base/power/qos.c 		req->dev = dev;
req               319 drivers/base/power/qos.c 		req->type = type;
req               320 drivers/base/power/qos.c 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
req               347 drivers/base/power/qos.c int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
req               353 drivers/base/power/qos.c 	ret = __dev_pm_qos_add_request(dev, req, type, value);
req               364 drivers/base/power/qos.c static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
req               370 drivers/base/power/qos.c 	if (!req) /*guard against callers passing in null */
req               373 drivers/base/power/qos.c 	if (WARN(!dev_pm_qos_request_active(req),
req               377 drivers/base/power/qos.c 	if (IS_ERR_OR_NULL(req->dev->power.qos))
req               380 drivers/base/power/qos.c 	switch(req->type) {
req               383 drivers/base/power/qos.c 		curr_value = req->data.pnode.prio;
req               386 drivers/base/power/qos.c 		curr_value = req->data.flr.flags;
req               392 drivers/base/power/qos.c 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
req               395 drivers/base/power/qos.c 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
req               418 drivers/base/power/qos.c int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
req               423 drivers/base/power/qos.c 	ret = __dev_pm_qos_update_request(req, new_value);
req               429 drivers/base/power/qos.c static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
req               433 drivers/base/power/qos.c 	if (!req) /*guard against callers passing in null */
req               436 drivers/base/power/qos.c 	if (WARN(!dev_pm_qos_request_active(req),
req               440 drivers/base/power/qos.c 	if (IS_ERR_OR_NULL(req->dev->power.qos))
req               443 drivers/base/power/qos.c 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
req               445 drivers/base/power/qos.c 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
req               446 drivers/base/power/qos.c 	memset(req, 0, sizeof(*req));
req               465 drivers/base/power/qos.c int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
req               470 drivers/base/power/qos.c 	ret = __dev_pm_qos_remove_request(req);
req               568 drivers/base/power/qos.c 				    struct dev_pm_qos_request *req,
req               589 drivers/base/power/qos.c 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
req               592 drivers/base/power/qos.c 		req->dev = NULL;
req               601 drivers/base/power/qos.c 	struct dev_pm_qos_request *req = NULL;
req               605 drivers/base/power/qos.c 		req = dev->power.qos->resume_latency_req;
req               609 drivers/base/power/qos.c 		req = dev->power.qos->latency_tolerance_req;
req               613 drivers/base/power/qos.c 		req = dev->power.qos->flags_req;
req               620 drivers/base/power/qos.c 	__dev_pm_qos_remove_request(req);
req               621 drivers/base/power/qos.c 	kfree(req);
req               639 drivers/base/power/qos.c 	struct dev_pm_qos_request *req;
req               645 drivers/base/power/qos.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               646 drivers/base/power/qos.c 	if (!req)
req               649 drivers/base/power/qos.c 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
req               651 drivers/base/power/qos.c 		kfree(req);
req               665 drivers/base/power/qos.c 		__dev_pm_qos_remove_request(req);
req               666 drivers/base/power/qos.c 		kfree(req);
req               670 drivers/base/power/qos.c 	dev->power.qos->resume_latency_req = req;
req               715 drivers/base/power/qos.c 	struct dev_pm_qos_request *req;
req               721 drivers/base/power/qos.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               722 drivers/base/power/qos.c 	if (!req)
req               725 drivers/base/power/qos.c 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
req               727 drivers/base/power/qos.c 		kfree(req);
req               742 drivers/base/power/qos.c 		__dev_pm_qos_remove_request(req);
req               743 drivers/base/power/qos.c 		kfree(req);
req               747 drivers/base/power/qos.c 	dev->power.qos->flags_req = req;
req               851 drivers/base/power/qos.c 		struct dev_pm_qos_request *req;
req               860 drivers/base/power/qos.c 		req = kzalloc(sizeof(*req), GFP_KERNEL);
req               861 drivers/base/power/qos.c 		if (!req) {
req               865 drivers/base/power/qos.c 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
req               867 drivers/base/power/qos.c 			kfree(req);
req               870 drivers/base/power/qos.c 		dev->power.qos->latency_tolerance_req = req;
req               892 drivers/base/power/runtime.c 	enum rpm_request req;
req               899 drivers/base/power/runtime.c 	req = dev->power.request;
req               903 drivers/base/power/runtime.c 	switch (req) {
req               109 drivers/bcma/core.c void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
req               113 drivers/bcma/core.c 	WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
req               117 drivers/bcma/core.c 		bcma_set32(core, BCMA_CLKCTLST, req);
req               134 drivers/bcma/core.c 		bcma_mask32(core, BCMA_CLKCTLST, ~req);
req               839 drivers/block/aoe/aoecmd.c 	struct aoe_req *req;
req               859 drivers/block/aoe/aoecmd.c 		req = blk_mq_rq_to_pdu(rq);
req               860 drivers/block/aoe/aoecmd.c 		req->nr_bios = 0;
req               862 drivers/block/aoe/aoecmd.c 			req->nr_bios++;
req              1067 drivers/block/aoe/aoecmd.c 	struct aoe_req *req = blk_mq_rq_to_pdu(rq);
req              1072 drivers/block/aoe/aoecmd.c 	if (--req->nr_bios == 0)
req               163 drivers/block/aoe/aoedev.c 	struct aoe_req *req;
req               171 drivers/block/aoe/aoedev.c 	req = blk_mq_rq_to_pdu(rq);
req               175 drivers/block/aoe/aoedev.c 		req->nr_bios--;
req               178 drivers/block/aoe/aoedev.c 	if (!req->nr_bios)
req                92 drivers/block/cryptoloop.c typedef int (*encdec_cbc_t)(struct skcipher_request *req);
req               101 drivers/block/cryptoloop.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
req               110 drivers/block/cryptoloop.c 	skcipher_request_set_sync_tfm(req, tfm);
req               111 drivers/block/cryptoloop.c 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
req               139 drivers/block/cryptoloop.c 		skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
req               140 drivers/block/cryptoloop.c 		err = encdecfunc(req);
req               153 drivers/block/cryptoloop.c 	skcipher_request_zero(req);
req                54 drivers/block/drbd/drbd_debugfs.c static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
req                56 drivers/block/drbd/drbd_debugfs.c 	unsigned int s = req->rq_state;
req                59 drivers/block/drbd/drbd_debugfs.c 	seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
req                96 drivers/block/drbd/drbd_debugfs.c static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
req                99 drivers/block/drbd/drbd_debugfs.c 	unsigned int s = req->rq_state;
req               103 drivers/block/drbd/drbd_debugfs.c 		req->epoch,
req               104 drivers/block/drbd/drbd_debugfs.c 		(unsigned long long)req->i.sector, req->i.size >> 9,
req               108 drivers/block/drbd/drbd_debugfs.c 	seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
req               109 drivers/block/drbd/drbd_debugfs.c 	seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
req               110 drivers/block/drbd/drbd_debugfs.c 	seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
req               113 drivers/block/drbd/drbd_debugfs.c 	seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
req               114 drivers/block/drbd/drbd_debugfs.c 	seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
req               115 drivers/block/drbd/drbd_debugfs.c 	seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
req               118 drivers/block/drbd/drbd_debugfs.c 	seq_print_request_state(m, req);
req               122 drivers/block/drbd/drbd_debugfs.c static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
req               124 drivers/block/drbd/drbd_debugfs.c 	seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
req               125 drivers/block/drbd/drbd_debugfs.c 	seq_print_one_request(m, req, now);
req               165 drivers/block/drbd/drbd_debugfs.c 		struct drbd_request *req;
req               169 drivers/block/drbd/drbd_debugfs.c 			req = list_first_entry_or_null(&device->pending_master_completion[1],
req               173 drivers/block/drbd/drbd_debugfs.c 			if (req && !(req->rq_state & RQ_IN_ACT_LOG))
req               174 drivers/block/drbd/drbd_debugfs.c 				jif = req->start_jif;
req               176 drivers/block/drbd/drbd_debugfs.c 				req = NULL;
req               181 drivers/block/drbd/drbd_debugfs.c 			if (req)
req               305 drivers/block/drbd/drbd_debugfs.c 	struct drbd_request *req;
req               311 drivers/block/drbd/drbd_debugfs.c 	list_for_each_entry(req, &connection->transfer_log, tl_requests) {
req               319 drivers/block/drbd/drbd_debugfs.c 			kref_get(&req->kref);
req               323 drivers/block/drbd/drbd_debugfs.c 			req_next = list_next_entry(req, tl_requests);
req               324 drivers/block/drbd/drbd_debugfs.c 			if (kref_put(&req->kref, drbd_req_destroy))
req               325 drivers/block/drbd/drbd_debugfs.c 				req = req_next;
req               326 drivers/block/drbd/drbd_debugfs.c 			if (&req->tl_requests == &connection->transfer_log)
req               330 drivers/block/drbd/drbd_debugfs.c 		s = req->rq_state;
req               336 drivers/block/drbd/drbd_debugfs.c 		if (req->master_bio == NULL)
req               352 drivers/block/drbd/drbd_debugfs.c 		seq_print_minor_vnr_req(m, req, now);
req              1109 drivers/block/drbd/drbd_int.h extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
req              1455 drivers/block/drbd/drbd_int.h extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
req               186 drivers/block/drbd/drbd_main.c 	struct drbd_request *req = NULL;
req               196 drivers/block/drbd/drbd_main.c 		if (!req) {
req               203 drivers/block/drbd/drbd_main.c 			req = r;
req               204 drivers/block/drbd/drbd_main.c 			expect_epoch = req->epoch;
req               218 drivers/block/drbd/drbd_main.c 	if (req == NULL) {
req               239 drivers/block/drbd/drbd_main.c 	list_for_each_entry(req, &connection->transfer_log, tl_requests)
req               240 drivers/block/drbd/drbd_main.c 		if (req->epoch == expect_epoch)
req               242 drivers/block/drbd/drbd_main.c 	list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
req               243 drivers/block/drbd/drbd_main.c 		if (req->epoch != expect_epoch)
req               245 drivers/block/drbd/drbd_main.c 		_req_mod(req, BARRIER_ACKED);
req               268 drivers/block/drbd/drbd_main.c 	struct drbd_request *req, *r;
req               270 drivers/block/drbd/drbd_main.c 	list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
req               271 drivers/block/drbd/drbd_main.c 		_req_mod(req, what);
req               301 drivers/block/drbd/drbd_main.c 	struct drbd_request *req, *r;
req               304 drivers/block/drbd/drbd_main.c 	list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
req               305 drivers/block/drbd/drbd_main.c 		if (!(req->rq_state & RQ_LOCAL_PENDING))
req               307 drivers/block/drbd/drbd_main.c 		if (req->device != device)
req               309 drivers/block/drbd/drbd_main.c 		_req_mod(req, ABORT_DISK_IO);
req              1672 drivers/block/drbd/drbd_main.c int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
req              1690 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(req->i.sector);
req              1691 drivers/block/drbd/drbd_main.c 	p->block_id = (unsigned long)req;
req              1693 drivers/block/drbd/drbd_main.c 	dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
req              1698 drivers/block/drbd/drbd_main.c 		if (req->rq_state & RQ_EXP_RECEIVE_ACK)
req              1702 drivers/block/drbd/drbd_main.c 		if (req->rq_state & RQ_EXP_WRITE_ACK
req              1711 drivers/block/drbd/drbd_main.c 		t->size = cpu_to_be32(req->i.size);
req              1721 drivers/block/drbd/drbd_main.c 		wsame->size = cpu_to_be32(req->i.size);
req              1728 drivers/block/drbd/drbd_main.c 		drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
req              1733 drivers/block/drbd/drbd_main.c 				   bio_iovec(req->master_bio).bv_len);
req              1737 drivers/block/drbd/drbd_main.c 				   sizeof(*p) + digest_size, NULL, req->i.size);
req              1750 drivers/block/drbd/drbd_main.c 		if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
req              1751 drivers/block/drbd/drbd_main.c 			err = _drbd_send_bio(peer_device, req->master_bio);
req              1753 drivers/block/drbd/drbd_main.c 			err = _drbd_send_zc_bio(peer_device, req->master_bio);
req              1760 drivers/block/drbd/drbd_main.c 			drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
req              1764 drivers/block/drbd/drbd_main.c 					(unsigned long long)req->i.sector, req->i.size);
req              1811 drivers/block/drbd/drbd_main.c int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
req              1820 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(req->i.sector);
req              1821 drivers/block/drbd/drbd_main.c 	p->blksize = cpu_to_be32(req->i.size);
req              2286 drivers/block/drbd/drbd_main.c 	struct drbd_request *req, *tmp;
req              2292 drivers/block/drbd/drbd_main.c 	list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
req              2293 drivers/block/drbd/drbd_main.c 		struct drbd_device *device = req->device;
req              2294 drivers/block/drbd/drbd_main.c 		struct bio *bio = req->master_bio;
req              2295 drivers/block/drbd/drbd_main.c 		unsigned long start_jif = req->start_jif;
req              2299 drivers/block/drbd/drbd_main.c 			expect(atomic_read(&req->completion_ref) == 0) &&
req              2300 drivers/block/drbd/drbd_main.c 			expect(req->rq_state & RQ_POSTPONED) &&
req              2301 drivers/block/drbd/drbd_main.c 			expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
req              2302 drivers/block/drbd/drbd_main.c 				(req->rq_state & RQ_LOCAL_ABORTED) != 0);
req              2306 drivers/block/drbd/drbd_main.c 				req, atomic_read(&req->completion_ref),
req              2307 drivers/block/drbd/drbd_main.c 				req->rq_state);
req              2314 drivers/block/drbd/drbd_main.c 		kref_put(&req->kref, drbd_req_destroy);
req              2336 drivers/block/drbd/drbd_main.c void drbd_restart_request(struct drbd_request *req)
req              2340 drivers/block/drbd/drbd_main.c 	list_move_tail(&req->tl_requests, &retry.writes);
req              2346 drivers/block/drbd/drbd_main.c 	dec_ap_bio(req->device);
req              2008 drivers/block/drbd/drbd_receiver.c static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
req              2031 drivers/block/drbd/drbd_receiver.c 	bio = req->master_bio;
req              2129 drivers/block/drbd/drbd_receiver.c 	struct drbd_request *req;
req              2132 drivers/block/drbd/drbd_receiver.c 	req = (struct drbd_request *)(unsigned long)id;
req              2133 drivers/block/drbd/drbd_receiver.c 	if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
req              2134 drivers/block/drbd/drbd_receiver.c 		return req;
req              2146 drivers/block/drbd/drbd_receiver.c 	struct drbd_request *req;
req              2159 drivers/block/drbd/drbd_receiver.c 	req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
req              2161 drivers/block/drbd/drbd_receiver.c 	if (unlikely(!req))
req              2167 drivers/block/drbd/drbd_receiver.c 	err = recv_dless_read(peer_device, req, sector, pi->size);
req              2169 drivers/block/drbd/drbd_receiver.c 		req_mod(req, DATA_RECEIVED);
req              2216 drivers/block/drbd/drbd_receiver.c 	struct drbd_request *req;
req              2221 drivers/block/drbd/drbd_receiver.c 		req = container_of(i, struct drbd_request, i);
req              2222 drivers/block/drbd/drbd_receiver.c 		if (req->rq_state & RQ_LOCAL_PENDING ||
req              2223 drivers/block/drbd/drbd_receiver.c 		    !(req->rq_state & RQ_POSTPONED))
req              2227 drivers/block/drbd/drbd_receiver.c 		__req_mod(req, CONFLICT_RESOLVED, NULL);
req              2458 drivers/block/drbd/drbd_receiver.c 		struct drbd_request *req;
req              2463 drivers/block/drbd/drbd_receiver.c 		req = container_of(i, struct drbd_request, i);
req              2464 drivers/block/drbd/drbd_receiver.c 		if (!(req->rq_state & RQ_POSTPONED))
req              2466 drivers/block/drbd/drbd_receiver.c 		req->rq_state &= ~RQ_POSTPONED;
req              2467 drivers/block/drbd/drbd_receiver.c 		__req_mod(req, NEG_ACKED, &m);
req              2539 drivers/block/drbd/drbd_receiver.c 			struct drbd_request *req =
req              2548 drivers/block/drbd/drbd_receiver.c 			if (req->rq_state & RQ_LOCAL_PENDING ||
req              2549 drivers/block/drbd/drbd_receiver.c 			    !(req->rq_state & RQ_POSTPONED)) {
req              2561 drivers/block/drbd/drbd_receiver.c 				err = drbd_wait_misc(device, &req->i);
req              5720 drivers/block/drbd/drbd_receiver.c 	struct drbd_request *req;
req              5724 drivers/block/drbd/drbd_receiver.c 	req = find_request(device, root, id, sector, missing_ok, func);
req              5725 drivers/block/drbd/drbd_receiver.c 	if (unlikely(!req)) {
req              5729 drivers/block/drbd/drbd_receiver.c 	__req_mod(req, what, &m);
req                25 drivers/block/drbd/drbd_req.c static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
req                29 drivers/block/drbd/drbd_req.c 	generic_start_io_acct(q, bio_op(req->master_bio),
req                30 drivers/block/drbd/drbd_req.c 				req->i.size >> 9, &device->vdisk->part0);
req                34 drivers/block/drbd/drbd_req.c static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
req                38 drivers/block/drbd/drbd_req.c 	generic_end_io_acct(q, bio_op(req->master_bio),
req                39 drivers/block/drbd/drbd_req.c 			    &device->vdisk->part0, req->start_jif);
req                44 drivers/block/drbd/drbd_req.c 	struct drbd_request *req;
req                46 drivers/block/drbd/drbd_req.c 	req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
req                47 drivers/block/drbd/drbd_req.c 	if (!req)
req                49 drivers/block/drbd/drbd_req.c 	memset(req, 0, sizeof(*req));
req                51 drivers/block/drbd/drbd_req.c 	drbd_req_make_private_bio(req, bio_src);
req                52 drivers/block/drbd/drbd_req.c 	req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
req                56 drivers/block/drbd/drbd_req.c 	req->device = device;
req                57 drivers/block/drbd/drbd_req.c 	req->master_bio = bio_src;
req                58 drivers/block/drbd/drbd_req.c 	req->epoch = 0;
req                60 drivers/block/drbd/drbd_req.c 	drbd_clear_interval(&req->i);
req                61 drivers/block/drbd/drbd_req.c 	req->i.sector     = bio_src->bi_iter.bi_sector;
req                62 drivers/block/drbd/drbd_req.c 	req->i.size      = bio_src->bi_iter.bi_size;
req                63 drivers/block/drbd/drbd_req.c 	req->i.local = true;
req                64 drivers/block/drbd/drbd_req.c 	req->i.waiting = false;
req                66 drivers/block/drbd/drbd_req.c 	INIT_LIST_HEAD(&req->tl_requests);
req                67 drivers/block/drbd/drbd_req.c 	INIT_LIST_HEAD(&req->w.list);
req                68 drivers/block/drbd/drbd_req.c 	INIT_LIST_HEAD(&req->req_pending_master_completion);
req                69 drivers/block/drbd/drbd_req.c 	INIT_LIST_HEAD(&req->req_pending_local);
req                72 drivers/block/drbd/drbd_req.c 	atomic_set(&req->completion_ref, 1);
req                74 drivers/block/drbd/drbd_req.c 	kref_init(&req->kref);
req                75 drivers/block/drbd/drbd_req.c 	return req;
req                79 drivers/block/drbd/drbd_req.c 					 struct drbd_request *req)
req                81 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req                82 drivers/block/drbd/drbd_req.c 	struct drbd_interval *i = &req->i;
req                93 drivers/block/drbd/drbd_req.c 	struct drbd_request *req = container_of(kref, struct drbd_request, kref);
req                94 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req                95 drivers/block/drbd/drbd_req.c 	const unsigned s = req->rq_state;
req                97 drivers/block/drbd/drbd_req.c 	if ((req->master_bio && !(s & RQ_POSTPONED)) ||
req                98 drivers/block/drbd/drbd_req.c 		atomic_read(&req->completion_ref) ||
req               102 drivers/block/drbd/drbd_req.c 				s, atomic_read(&req->completion_ref));
req               114 drivers/block/drbd/drbd_req.c 	list_del_init(&req->tl_requests);
req               118 drivers/block/drbd/drbd_req.c 	if (!drbd_interval_empty(&req->i)) {
req               125 drivers/block/drbd/drbd_req.c 		drbd_remove_request_interval(root, req);
req               126 drivers/block/drbd/drbd_req.c 	} else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
req               128 drivers/block/drbd/drbd_req.c 			s, (unsigned long long)req->i.sector, req->i.size);
req               147 drivers/block/drbd/drbd_req.c 				drbd_set_out_of_sync(device, req->i.sector, req->i.size);
req               150 drivers/block/drbd/drbd_req.c 				drbd_set_in_sync(device, req->i.sector, req->i.size);
req               165 drivers/block/drbd/drbd_req.c 				drbd_al_complete_io(device, &req->i);
req               170 drivers/block/drbd/drbd_req.c 					 (unsigned long long) req->i.sector, req->i.size);
req               175 drivers/block/drbd/drbd_req.c 	mempool_free(req, &drbd_request_mempool);
req               211 drivers/block/drbd/drbd_req.c void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
req               213 drivers/block/drbd/drbd_req.c 	const unsigned s = req->rq_state;
req               214 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req               233 drivers/block/drbd/drbd_req.c 	if (!req->master_bio) {
req               252 drivers/block/drbd/drbd_req.c 	error = PTR_ERR(req->private_bio);
req               261 drivers/block/drbd/drbd_req.c 	if (op_is_write(bio_op(req->master_bio)) &&
req               262 drivers/block/drbd/drbd_req.c 	    req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
req               266 drivers/block/drbd/drbd_req.c 	_drbd_end_io_acct(device, req);
req               283 drivers/block/drbd/drbd_req.c 	    bio_op(req->master_bio) == REQ_OP_READ &&
req               284 drivers/block/drbd/drbd_req.c 	    !(req->master_bio->bi_opf & REQ_RAHEAD) &&
req               285 drivers/block/drbd/drbd_req.c 	    !list_empty(&req->tl_requests))
req               286 drivers/block/drbd/drbd_req.c 		req->rq_state |= RQ_POSTPONED;
req               288 drivers/block/drbd/drbd_req.c 	if (!(req->rq_state & RQ_POSTPONED)) {
req               290 drivers/block/drbd/drbd_req.c 		m->bio = req->master_bio;
req               291 drivers/block/drbd/drbd_req.c 		req->master_bio = NULL;
req               296 drivers/block/drbd/drbd_req.c 		req->i.completed = true;
req               299 drivers/block/drbd/drbd_req.c 	if (req->i.waiting)
req               306 drivers/block/drbd/drbd_req.c 	list_del_init(&req->req_pending_master_completion);
req               310 drivers/block/drbd/drbd_req.c static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
req               312 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req               313 drivers/block/drbd/drbd_req.c 	D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
req               318 drivers/block/drbd/drbd_req.c 	if (!atomic_sub_and_test(put, &req->completion_ref))
req               321 drivers/block/drbd/drbd_req.c 	drbd_req_complete(req, m);
req               325 drivers/block/drbd/drbd_req.c 	if (req->rq_state & RQ_LOCAL_ABORTED)
req               328 drivers/block/drbd/drbd_req.c 	if (req->rq_state & RQ_POSTPONED) {
req               331 drivers/block/drbd/drbd_req.c 		drbd_restart_request(req);
req               335 drivers/block/drbd/drbd_req.c 	kref_put(&req->kref, drbd_req_destroy);
req               338 drivers/block/drbd/drbd_req.c static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
req               344 drivers/block/drbd/drbd_req.c 		connection->req_next = req;
req               347 drivers/block/drbd/drbd_req.c static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
req               352 drivers/block/drbd/drbd_req.c 	if (connection->req_next != req)
req               354 drivers/block/drbd/drbd_req.c 	list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
req               355 drivers/block/drbd/drbd_req.c 		const unsigned s = req->rq_state;
req               359 drivers/block/drbd/drbd_req.c 	if (&req->tl_requests == &connection->transfer_log)
req               360 drivers/block/drbd/drbd_req.c 		req = NULL;
req               361 drivers/block/drbd/drbd_req.c 	connection->req_next = req;
req               364 drivers/block/drbd/drbd_req.c static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
req               370 drivers/block/drbd/drbd_req.c 		connection->req_ack_pending = req;
req               373 drivers/block/drbd/drbd_req.c static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
req               378 drivers/block/drbd/drbd_req.c 	if (connection->req_ack_pending != req)
req               380 drivers/block/drbd/drbd_req.c 	list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
req               381 drivers/block/drbd/drbd_req.c 		const unsigned s = req->rq_state;
req               385 drivers/block/drbd/drbd_req.c 	if (&req->tl_requests == &connection->transfer_log)
req               386 drivers/block/drbd/drbd_req.c 		req = NULL;
req               387 drivers/block/drbd/drbd_req.c 	connection->req_ack_pending = req;
req               390 drivers/block/drbd/drbd_req.c static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
req               396 drivers/block/drbd/drbd_req.c 		connection->req_not_net_done = req;
req               399 drivers/block/drbd/drbd_req.c static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
req               404 drivers/block/drbd/drbd_req.c 	if (connection->req_not_net_done != req)
req               406 drivers/block/drbd/drbd_req.c 	list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
req               407 drivers/block/drbd/drbd_req.c 		const unsigned s = req->rq_state;
req               411 drivers/block/drbd/drbd_req.c 	if (&req->tl_requests == &connection->transfer_log)
req               412 drivers/block/drbd/drbd_req.c 		req = NULL;
req               413 drivers/block/drbd/drbd_req.c 	connection->req_not_net_done = req;
req               418 drivers/block/drbd/drbd_req.c static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
req               421 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req               423 drivers/block/drbd/drbd_req.c 	unsigned s = req->rq_state;
req               431 drivers/block/drbd/drbd_req.c 	req->rq_state &= ~clear;
req               432 drivers/block/drbd/drbd_req.c 	req->rq_state |= set;
req               435 drivers/block/drbd/drbd_req.c 	if (req->rq_state == s)
req               440 drivers/block/drbd/drbd_req.c 	kref_get(&req->kref);
req               443 drivers/block/drbd/drbd_req.c 		atomic_inc(&req->completion_ref);
req               447 drivers/block/drbd/drbd_req.c 		atomic_inc(&req->completion_ref);
req               451 drivers/block/drbd/drbd_req.c 		atomic_inc(&req->completion_ref);
req               452 drivers/block/drbd/drbd_req.c 		set_if_null_req_next(peer_device, req);
req               456 drivers/block/drbd/drbd_req.c 		kref_get(&req->kref); /* wait for the DONE */
req               461 drivers/block/drbd/drbd_req.c 			atomic_add(req->i.size >> 9, &device->ap_in_flight);
req               462 drivers/block/drbd/drbd_req.c 			set_if_null_req_not_net_done(peer_device, req);
req               464 drivers/block/drbd/drbd_req.c 		if (req->rq_state & RQ_NET_PENDING)
req               465 drivers/block/drbd/drbd_req.c 			set_if_null_req_ack_pending(peer_device, req);
req               469 drivers/block/drbd/drbd_req.c 		atomic_inc(&req->completion_ref);
req               477 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
req               482 drivers/block/drbd/drbd_req.c 		if (req->rq_state & RQ_LOCAL_ABORTED)
req               483 drivers/block/drbd/drbd_req.c 			kref_put(&req->kref, drbd_req_destroy);
req               486 drivers/block/drbd/drbd_req.c 		list_del_init(&req->req_pending_local);
req               492 drivers/block/drbd/drbd_req.c 		req->acked_jif = jiffies;
req               493 drivers/block/drbd/drbd_req.c 		advance_conn_req_ack_pending(peer_device, req);
req               498 drivers/block/drbd/drbd_req.c 		advance_conn_req_next(peer_device, req);
req               503 drivers/block/drbd/drbd_req.c 			atomic_sub(req->i.size >> 9, &device->ap_in_flight);
req               505 drivers/block/drbd/drbd_req.c 			kref_put(&req->kref, drbd_req_destroy);
req               506 drivers/block/drbd/drbd_req.c 		req->net_done_jif = jiffies;
req               511 drivers/block/drbd/drbd_req.c 		advance_conn_req_next(peer_device, req);
req               512 drivers/block/drbd/drbd_req.c 		advance_conn_req_ack_pending(peer_device, req);
req               513 drivers/block/drbd/drbd_req.c 		advance_conn_req_not_net_done(peer_device, req);
req               519 drivers/block/drbd/drbd_req.c 	if (req->i.waiting)
req               522 drivers/block/drbd/drbd_req.c 	drbd_req_put_completion_ref(req, m, c_put);
req               523 drivers/block/drbd/drbd_req.c 	kref_put(&req->kref, drbd_req_destroy);
req               526 drivers/block/drbd/drbd_req.c static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
req               534 drivers/block/drbd/drbd_req.c 			(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
req               535 drivers/block/drbd/drbd_req.c 			(unsigned long long)req->i.sector,
req               536 drivers/block/drbd/drbd_req.c 			req->i.size >> 9,
req               546 drivers/block/drbd/drbd_req.c static inline bool is_pending_write_protocol_A(struct drbd_request *req)
req               548 drivers/block/drbd/drbd_req.c 	return (req->rq_state &
req               565 drivers/block/drbd/drbd_req.c int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req               568 drivers/block/drbd/drbd_req.c 	struct drbd_device *const device = req->device;
req               591 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
req               596 drivers/block/drbd/drbd_req.c 		req->rq_state |=
req               599 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_NET_PENDING);
req               604 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
req               605 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
req               609 drivers/block/drbd/drbd_req.c 		if (req->rq_state & RQ_WRITE)
req               610 drivers/block/drbd/drbd_req.c 			device->writ_cnt += req->i.size >> 9;
req               612 drivers/block/drbd/drbd_req.c 			device->read_cnt += req->i.size >> 9;
req               614 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_LOCAL_PENDING,
req               619 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
req               623 drivers/block/drbd/drbd_req.c 		drbd_report_io_error(device, req);
req               625 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
req               629 drivers/block/drbd/drbd_req.c 		drbd_set_out_of_sync(device, req->i.sector, req->i.size);
req               630 drivers/block/drbd/drbd_req.c 		drbd_report_io_error(device, req);
req               635 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
req               642 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
req               656 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, drbd_interval_empty(&req->i));
req               657 drivers/block/drbd/drbd_req.c 		drbd_insert_interval(&device->read_requests, &req->i);
req               661 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req               662 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
req               663 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req               664 drivers/block/drbd/drbd_req.c 		req->w.cb = w_send_read_req;
req               666 drivers/block/drbd/drbd_req.c 				&req->w);
req               675 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, drbd_interval_empty(&req->i));
req               676 drivers/block/drbd/drbd_req.c 		drbd_insert_interval(&device->write_requests, &req->i);
req               698 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req               699 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req               700 drivers/block/drbd/drbd_req.c 		req->w.cb =  w_send_dblock;
req               702 drivers/block/drbd/drbd_req.c 				&req->w);
req               715 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req               716 drivers/block/drbd/drbd_req.c 		req->w.cb =  w_send_out_of_sync;
req               718 drivers/block/drbd/drbd_req.c 				&req->w);
req               726 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_NET_QUEUED, 0);
req               731 drivers/block/drbd/drbd_req.c 		if (is_pending_write_protocol_A(req))
req               734 drivers/block/drbd/drbd_req.c 			mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
req               737 drivers/block/drbd/drbd_req.c 			mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
req               746 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
req               751 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m,
req               764 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req               765 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
req               766 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
req               770 drivers/block/drbd/drbd_req.c 		req->rq_state |= RQ_NET_SIS;
req               781 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
req               786 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
req               790 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
req               795 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req               796 drivers/block/drbd/drbd_req.c 		req->rq_state |= RQ_POSTPONED;
req               797 drivers/block/drbd/drbd_req.c 		if (req->i.waiting)
req               805 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
req               809 drivers/block/drbd/drbd_req.c 		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
req               811 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
req               815 drivers/block/drbd/drbd_req.c 		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
req               818 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m,
req               823 drivers/block/drbd/drbd_req.c 		if (bio_data_dir(req->master_bio) == WRITE)
req               827 drivers/block/drbd/drbd_req.c 		req->w.cb = w_restart_disk_io;
req               829 drivers/block/drbd/drbd_req.c 				&req->w);
req               834 drivers/block/drbd/drbd_req.c 		if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
req               835 drivers/block/drbd/drbd_req.c 			mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
req               844 drivers/block/drbd/drbd_req.c 		if (!(req->rq_state & RQ_NET_OK)) {
req               848 drivers/block/drbd/drbd_req.c 			mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
req               849 drivers/block/drbd/drbd_req.c 			if (req->w.cb) {
req               852 drivers/block/drbd/drbd_req.c 						&req->w);
req               853 drivers/block/drbd/drbd_req.c 				rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
req               861 drivers/block/drbd/drbd_req.c 		if (!(req->rq_state & RQ_WRITE))
req               864 drivers/block/drbd/drbd_req.c 		if (req->rq_state & RQ_NET_PENDING) {
req               874 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_COMPLETION_SUSP,
req               875 drivers/block/drbd/drbd_req.c 				(req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
req               879 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req               880 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
req               885 drivers/block/drbd/drbd_req.c 		mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
req               959 drivers/block/drbd/drbd_req.c static void complete_conflicting_writes(struct drbd_request *req)
req               962 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req               964 drivers/block/drbd/drbd_req.c 	sector_t sector = req->i.sector;
req               965 drivers/block/drbd/drbd_req.c 	int size = req->i.size;
req              1047 drivers/block/drbd/drbd_req.c static bool do_remote_read(struct drbd_request *req)
req              1049 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req              1052 drivers/block/drbd/drbd_req.c 	if (req->private_bio) {
req              1054 drivers/block/drbd/drbd_req.c 					req->i.sector, req->i.size)) {
req              1055 drivers/block/drbd/drbd_req.c 			bio_put(req->private_bio);
req              1056 drivers/block/drbd/drbd_req.c 			req->private_bio = NULL;
req              1064 drivers/block/drbd/drbd_req.c 	if (req->private_bio == NULL)
req              1074 drivers/block/drbd/drbd_req.c 	if (rbm == RB_PREFER_LOCAL && req->private_bio)
req              1077 drivers/block/drbd/drbd_req.c 	if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
req              1078 drivers/block/drbd/drbd_req.c 		if (req->private_bio) {
req              1079 drivers/block/drbd/drbd_req.c 			bio_put(req->private_bio);
req              1080 drivers/block/drbd/drbd_req.c 			req->private_bio = NULL;
req              1110 drivers/block/drbd/drbd_req.c static int drbd_process_write_request(struct drbd_request *req)
req              1112 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req              1124 drivers/block/drbd/drbd_req.c 	if (unlikely(req->i.size == 0)) {
req              1126 drivers/block/drbd/drbd_req.c 		D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
req              1128 drivers/block/drbd/drbd_req.c 			_req_mod(req, QUEUE_AS_DRBD_BARRIER);
req              1138 drivers/block/drbd/drbd_req.c 		_req_mod(req, TO_BE_SENT);
req              1139 drivers/block/drbd/drbd_req.c 		_req_mod(req, QUEUE_FOR_NET_WRITE);
req              1140 drivers/block/drbd/drbd_req.c 	} else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
req              1141 drivers/block/drbd/drbd_req.c 		_req_mod(req, QUEUE_FOR_SEND_OOS);
req              1146 drivers/block/drbd/drbd_req.c static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags)
req              1148 drivers/block/drbd/drbd_req.c 	int err = drbd_issue_discard_or_zero_out(req->device,
req              1149 drivers/block/drbd/drbd_req.c 				req->i.sector, req->i.size >> 9, flags);
req              1151 drivers/block/drbd/drbd_req.c 		req->private_bio->bi_status = BLK_STS_IOERR;
req              1152 drivers/block/drbd/drbd_req.c 	bio_endio(req->private_bio);
req              1156 drivers/block/drbd/drbd_req.c drbd_submit_req_private_bio(struct drbd_request *req)
req              1158 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = req->device;
req              1159 drivers/block/drbd/drbd_req.c 	struct bio *bio = req->private_bio;
req              1180 drivers/block/drbd/drbd_req.c 			drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT |
req              1183 drivers/block/drbd/drbd_req.c 			drbd_process_discard_or_zeroes_req(req, EE_TRIM);
req              1191 drivers/block/drbd/drbd_req.c static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
req              1194 drivers/block/drbd/drbd_req.c 	list_add_tail(&req->tl_requests, &device->submit.writes);
req              1195 drivers/block/drbd/drbd_req.c 	list_add_tail(&req->req_pending_master_completion,
req              1212 drivers/block/drbd/drbd_req.c 	struct drbd_request *req;
req              1215 drivers/block/drbd/drbd_req.c 	req = drbd_req_new(device, bio);
req              1216 drivers/block/drbd/drbd_req.c 	if (!req) {
req              1225 drivers/block/drbd/drbd_req.c 	req->start_jif = start_jif;
req              1228 drivers/block/drbd/drbd_req.c 		bio_put(req->private_bio);
req              1229 drivers/block/drbd/drbd_req.c 		req->private_bio = NULL;
req              1233 drivers/block/drbd/drbd_req.c 	_drbd_start_io_acct(device, req);
req              1240 drivers/block/drbd/drbd_req.c 	if (rw == WRITE && req->private_bio && req->i.size
req              1242 drivers/block/drbd/drbd_req.c 		if (!drbd_al_begin_io_fastpath(device, &req->i))
req              1244 drivers/block/drbd/drbd_req.c 		req->rq_state |= RQ_IN_ACT_LOG;
req              1245 drivers/block/drbd/drbd_req.c 		req->in_actlog_jif = jiffies;
req              1247 drivers/block/drbd/drbd_req.c 	return req;
req              1251 drivers/block/drbd/drbd_req.c 	drbd_queue_write(device, req);
req              1281 drivers/block/drbd/drbd_req.c 	struct drbd_request *req = plug->most_recent_req;
req              1284 drivers/block/drbd/drbd_req.c 	if (!req)
req              1290 drivers/block/drbd/drbd_req.c 	req->rq_state |= RQ_UNPLUG;
req              1292 drivers/block/drbd/drbd_req.c 	drbd_queue_unplug(req->device);
req              1293 drivers/block/drbd/drbd_req.c 	kref_put(&req->kref, drbd_req_destroy);
req              1311 drivers/block/drbd/drbd_req.c static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
req              1316 drivers/block/drbd/drbd_req.c 	kref_get(&req->kref);
req              1317 drivers/block/drbd/drbd_req.c 	plug->most_recent_req = req;
req              1322 drivers/block/drbd/drbd_req.c static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
req              1325 drivers/block/drbd/drbd_req.c 	const int rw = bio_data_dir(req->master_bio);
req              1335 drivers/block/drbd/drbd_req.c 		complete_conflicting_writes(req);
req              1346 drivers/block/drbd/drbd_req.c 		req->rq_state |= RQ_POSTPONED;
req              1347 drivers/block/drbd/drbd_req.c 		if (req->private_bio) {
req              1348 drivers/block/drbd/drbd_req.c 			bio_put(req->private_bio);
req              1349 drivers/block/drbd/drbd_req.c 			req->private_bio = NULL;
req              1359 drivers/block/drbd/drbd_req.c 		if (!do_remote_read(req) && !req->private_bio)
req              1364 drivers/block/drbd/drbd_req.c 	req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
req              1368 drivers/block/drbd/drbd_req.c 	if (likely(req->i.size!=0)) {
req              1372 drivers/block/drbd/drbd_req.c 		list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
req              1376 drivers/block/drbd/drbd_req.c 		if (req->private_bio && !may_do_writes(device)) {
req              1377 drivers/block/drbd/drbd_req.c 			bio_put(req->private_bio);
req              1378 drivers/block/drbd/drbd_req.c 			req->private_bio = NULL;
req              1382 drivers/block/drbd/drbd_req.c 		if (!drbd_process_write_request(req))
req              1387 drivers/block/drbd/drbd_req.c 		if (req->private_bio == NULL) {
req              1388 drivers/block/drbd/drbd_req.c 			_req_mod(req, TO_BE_SENT);
req              1389 drivers/block/drbd/drbd_req.c 			_req_mod(req, QUEUE_FOR_NET_READ);
req              1397 drivers/block/drbd/drbd_req.c 			drbd_update_plug(plug, req);
req              1402 drivers/block/drbd/drbd_req.c 	if (list_empty(&req->req_pending_master_completion))
req              1403 drivers/block/drbd/drbd_req.c 		list_add_tail(&req->req_pending_master_completion,
req              1405 drivers/block/drbd/drbd_req.c 	if (req->private_bio) {
req              1407 drivers/block/drbd/drbd_req.c 		req->pre_submit_jif = jiffies;
req              1408 drivers/block/drbd/drbd_req.c 		list_add_tail(&req->req_pending_local,
req              1410 drivers/block/drbd/drbd_req.c 		_req_mod(req, TO_BE_SUBMITTED);
req              1417 drivers/block/drbd/drbd_req.c 					(unsigned long long)req->i.sector, req->i.size >> 9);
req              1423 drivers/block/drbd/drbd_req.c 	drbd_req_put_completion_ref(req, &m, 1);
req              1433 drivers/block/drbd/drbd_req.c 		drbd_submit_req_private_bio(req);
req              1440 drivers/block/drbd/drbd_req.c 	struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
req              1441 drivers/block/drbd/drbd_req.c 	if (IS_ERR_OR_NULL(req))
req              1443 drivers/block/drbd/drbd_req.c 	drbd_send_and_submit(device, req);
req              1449 drivers/block/drbd/drbd_req.c 	struct drbd_request *req, *tmp;
req              1452 drivers/block/drbd/drbd_req.c 	list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
req              1453 drivers/block/drbd/drbd_req.c 		const int rw = bio_data_dir(req->master_bio);
req              1456 drivers/block/drbd/drbd_req.c 		&& req->private_bio && req->i.size
req              1458 drivers/block/drbd/drbd_req.c 			if (!drbd_al_begin_io_fastpath(device, &req->i))
req              1461 drivers/block/drbd/drbd_req.c 			req->rq_state |= RQ_IN_ACT_LOG;
req              1462 drivers/block/drbd/drbd_req.c 			req->in_actlog_jif = jiffies;
req              1466 drivers/block/drbd/drbd_req.c 		list_del_init(&req->tl_requests);
req              1467 drivers/block/drbd/drbd_req.c 		drbd_send_and_submit(device, req);
req              1477 drivers/block/drbd/drbd_req.c 	struct drbd_request *req;
req              1482 drivers/block/drbd/drbd_req.c 	while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
req              1483 drivers/block/drbd/drbd_req.c 		err = drbd_al_begin_io_nonblock(device, &req->i);
req              1489 drivers/block/drbd/drbd_req.c 			list_move_tail(&req->tl_requests, later);
req              1491 drivers/block/drbd/drbd_req.c 			list_move_tail(&req->tl_requests, pending);
req              1502 drivers/block/drbd/drbd_req.c 	struct drbd_request *req;
req              1505 drivers/block/drbd/drbd_req.c 	while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
req              1506 drivers/block/drbd/drbd_req.c 		req->rq_state |= RQ_IN_ACT_LOG;
req              1507 drivers/block/drbd/drbd_req.c 		req->in_actlog_jif = jiffies;
req              1509 drivers/block/drbd/drbd_req.c 		list_del_init(&req->tl_requests);
req              1510 drivers/block/drbd/drbd_req.c 		drbd_send_and_submit(device, req);
req               259 drivers/block/drbd/drbd_req.h static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
req               264 drivers/block/drbd/drbd_req.h 	req->private_bio = bio;
req               266 drivers/block/drbd/drbd_req.h 	bio->bi_private  = req;
req               281 drivers/block/drbd/drbd_req.h extern void _req_may_be_done(struct drbd_request *req,
req               283 drivers/block/drbd/drbd_req.h extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req               293 drivers/block/drbd/drbd_req.h extern void drbd_restart_request(struct drbd_request *req);
req               297 drivers/block/drbd/drbd_req.h static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
req               299 drivers/block/drbd/drbd_req.h 	struct drbd_device *device = req->device;
req               304 drivers/block/drbd/drbd_req.h 	rv = __req_mod(req, what, &m);
req               315 drivers/block/drbd/drbd_req.h static inline int req_mod(struct drbd_request *req,
req               319 drivers/block/drbd/drbd_req.h 	struct drbd_device *device = req->device;
req               324 drivers/block/drbd/drbd_req.h 	rv = __req_mod(req, what, &m);
req               208 drivers/block/drbd/drbd_worker.c 	struct drbd_request *req = bio->bi_private;
req               209 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = req->device;
req               241 drivers/block/drbd/drbd_worker.c 	if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
req               273 drivers/block/drbd/drbd_worker.c 	req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
req               278 drivers/block/drbd/drbd_worker.c 	__req_mod(req, what, &m);
req              1425 drivers/block/drbd/drbd_worker.c 	struct drbd_request *req = container_of(w, struct drbd_request, w);
req              1426 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = req->device;
req              1432 drivers/block/drbd/drbd_worker.c 		req_mod(req, SEND_CANCELED);
req              1435 drivers/block/drbd/drbd_worker.c 	req->pre_send_jif = jiffies;
req              1441 drivers/block/drbd/drbd_worker.c 	maybe_send_barrier(connection, req->epoch);
req              1443 drivers/block/drbd/drbd_worker.c 	err = drbd_send_out_of_sync(peer_device, req);
req              1444 drivers/block/drbd/drbd_worker.c 	req_mod(req, OOS_HANDED_TO_NETWORK);
req              1456 drivers/block/drbd/drbd_worker.c 	struct drbd_request *req = container_of(w, struct drbd_request, w);
req              1457 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = req->device;
req              1460 drivers/block/drbd/drbd_worker.c 	bool do_send_unplug = req->rq_state & RQ_UNPLUG;
req              1464 drivers/block/drbd/drbd_worker.c 		req_mod(req, SEND_CANCELED);
req              1467 drivers/block/drbd/drbd_worker.c 	req->pre_send_jif = jiffies;
req              1469 drivers/block/drbd/drbd_worker.c 	re_init_if_first_write(connection, req->epoch);
req              1470 drivers/block/drbd/drbd_worker.c 	maybe_send_barrier(connection, req->epoch);
req              1473 drivers/block/drbd/drbd_worker.c 	err = drbd_send_dblock(peer_device, req);
req              1474 drivers/block/drbd/drbd_worker.c 	req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
req              1489 drivers/block/drbd/drbd_worker.c 	struct drbd_request *req = container_of(w, struct drbd_request, w);
req              1490 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = req->device;
req              1493 drivers/block/drbd/drbd_worker.c 	bool do_send_unplug = req->rq_state & RQ_UNPLUG;
req              1497 drivers/block/drbd/drbd_worker.c 		req_mod(req, SEND_CANCELED);
req              1500 drivers/block/drbd/drbd_worker.c 	req->pre_send_jif = jiffies;
req              1504 drivers/block/drbd/drbd_worker.c 	maybe_send_barrier(connection, req->epoch);
req              1506 drivers/block/drbd/drbd_worker.c 	err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
req              1507 drivers/block/drbd/drbd_worker.c 				 (unsigned long)req);
req              1509 drivers/block/drbd/drbd_worker.c 	req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
req              1519 drivers/block/drbd/drbd_worker.c 	struct drbd_request *req = container_of(w, struct drbd_request, w);
req              1520 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = req->device;
req              1522 drivers/block/drbd/drbd_worker.c 	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
req              1523 drivers/block/drbd/drbd_worker.c 		drbd_al_begin_io(device, &req->i);
req              1525 drivers/block/drbd/drbd_worker.c 	drbd_req_make_private_bio(req, req->master_bio);
req              1526 drivers/block/drbd/drbd_worker.c 	bio_set_dev(req->private_bio, device->ldev->backing_bdev);
req              1527 drivers/block/drbd/drbd_worker.c 	generic_make_request(req->private_bio);
req              2218 drivers/block/floppy.c static void floppy_end_request(struct request *req, blk_status_t error)
req              2221 drivers/block/floppy.c 	unsigned int drive = (unsigned long)req->rq_disk->private_data;
req              2225 drivers/block/floppy.c 		nr_sectors = blk_rq_cur_sectors(req);
req              2226 drivers/block/floppy.c 	if (blk_update_request(req, error, nr_sectors << 9))
req              2228 drivers/block/floppy.c 	__blk_mq_end_request(req, error);
req              2239 drivers/block/floppy.c 	struct request *req = current_req;
req              2247 drivers/block/floppy.c 	if (!req) {
req              2255 drivers/block/floppy.c 		block = current_count_sectors + blk_rq_pos(req);
req              2260 drivers/block/floppy.c 		floppy_end_request(req, 0);
req              2262 drivers/block/floppy.c 		if (rq_data_dir(req) == WRITE) {
req              2266 drivers/block/floppy.c 				DRWE->first_error_sector = blk_rq_pos(req);
req              2269 drivers/block/floppy.c 			DRWE->last_error_sector = blk_rq_pos(req);
req              2272 drivers/block/floppy.c 		floppy_end_request(req, BLK_STS_IOERR);
req               492 drivers/block/mtip32xx/mtip32xx.c 	struct request *req = blk_mq_rq_from_pdu(cmd);
req               495 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_complete_request(req);
req              2599 drivers/block/mtip32xx/mtip32xx.c static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
req              2601 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
req              2604 drivers/block/mtip32xx/mtip32xx.c 	dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
req              2606 drivers/block/mtip32xx/mtip32xx.c 	clear_bit(req->tag, dd->port->cmds_to_issue);
req              2608 drivers/block/mtip32xx/mtip32xx.c 	mtip_softirq_done_fn(req);
req              2612 drivers/block/mtip32xx/mtip32xx.c static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
req              2616 drivers/block/mtip32xx/mtip32xx.c 	set_bit(req->tag, dd->port->cmds_to_issue);
req              2617 drivers/block/mtip32xx/mtip32xx.c 	blk_abort_request(req);
req              3533 drivers/block/mtip32xx/mtip32xx.c static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
req              3536 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = req->q->queuedata;
req              3539 drivers/block/mtip32xx/mtip32xx.c 		struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
req              3542 drivers/block/mtip32xx/mtip32xx.c 		blk_mq_complete_request(req);
req              3546 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(req->tag, dd->port->cmds_to_issue))
req               167 drivers/block/nbd.c 	struct request *req = blk_mq_rq_from_pdu(cmd);
req               170 drivers/block/nbd.c 		blk_mq_requeue_request(req, true);
req               177 drivers/block/nbd.c 	struct request *req = blk_mq_rq_from_pdu(cmd);
req               178 drivers/block/nbd.c 	u32 tag = blk_mq_unique_tag(req);
req               333 drivers/block/nbd.c static void nbd_complete_rq(struct request *req)
req               335 drivers/block/nbd.c 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
req               337 drivers/block/nbd.c 	dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
req               340 drivers/block/nbd.c 	blk_mq_end_request(req, cmd->status);
req               365 drivers/block/nbd.c static u32 req_to_nbd_cmd_type(struct request *req)
req               367 drivers/block/nbd.c 	switch (req_op(req)) {
req               381 drivers/block/nbd.c static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
req               384 drivers/block/nbd.c 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
req               436 drivers/block/nbd.c 			req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
req               437 drivers/block/nbd.c 			(unsigned long long)blk_rq_pos(req) << 9,
req               438 drivers/block/nbd.c 			blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
req               452 drivers/block/nbd.c 	blk_mq_complete_request(req);
req               517 drivers/block/nbd.c 	struct request *req = blk_mq_rq_from_pdu(cmd);
req               524 drivers/block/nbd.c 	unsigned long size = blk_rq_bytes(req);
req               533 drivers/block/nbd.c 	type = req_to_nbd_cmd_type(req);
req               537 drivers/block/nbd.c 	if (rq_data_dir(req) == WRITE &&
req               544 drivers/block/nbd.c 	if (req->cmd_flags & REQ_FUA)
req               569 drivers/block/nbd.c 		request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
req               578 drivers/block/nbd.c 		req, nbdcmd_to_ascii(type),
req               579 drivers/block/nbd.c 		(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
req               582 drivers/block/nbd.c 	trace_nbd_header_sent(req, handle);
req               591 drivers/block/nbd.c 				nsock->pending = req;
req               605 drivers/block/nbd.c 	bio = req->bio;
req               616 drivers/block/nbd.c 				req, bvec.bv_len);
req               633 drivers/block/nbd.c 					nsock->pending = req;
req               655 drivers/block/nbd.c 	trace_nbd_payload_sent(req, handle);
req               668 drivers/block/nbd.c 	struct request *req = NULL;
req               696 drivers/block/nbd.c 		req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
req               698 drivers/block/nbd.c 	if (!req || !blk_mq_request_started(req)) {
req               700 drivers/block/nbd.c 			tag, req);
req               703 drivers/block/nbd.c 	trace_nbd_header_received(req, handle);
req               704 drivers/block/nbd.c 	cmd = blk_mq_rq_to_pdu(req);
req               709 drivers/block/nbd.c 			req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
req               715 drivers/block/nbd.c 			req);
req               721 drivers/block/nbd.c 			req);
req               732 drivers/block/nbd.c 	dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
req               733 drivers/block/nbd.c 	if (rq_data_dir(req) != WRITE) {
req               737 drivers/block/nbd.c 		rq_for_each_segment(bvec, req, iter) {
req               759 drivers/block/nbd.c 				req, bvec.bv_len);
req               763 drivers/block/nbd.c 	trace_nbd_payload_received(req, handle);
req               796 drivers/block/nbd.c static bool nbd_clear_req(struct request *req, void *data, bool reserved)
req               798 drivers/block/nbd.c 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
req               804 drivers/block/nbd.c 	blk_mq_complete_request(req);
req               873 drivers/block/nbd.c 	struct request *req = blk_mq_rq_from_pdu(cmd);
req               882 drivers/block/nbd.c 		blk_mq_start_request(req);
req               891 drivers/block/nbd.c 		blk_mq_start_request(req);
req               915 drivers/block/nbd.c 			blk_mq_start_request(req);
req               926 drivers/block/nbd.c 	blk_mq_start_request(req);
req               927 drivers/block/nbd.c 	if (unlikely(nsock->pending && nsock->pending != req)) {
req               510 drivers/block/paride/pd.c 	struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
req               512 drivers/block/paride/pd.c 	return req->func(pd_current);
req               776 drivers/block/paride/pd.c 	struct pd_req *req;
req               781 drivers/block/paride/pd.c 	req = blk_mq_rq_to_pdu(rq);
req               783 drivers/block/paride/pd.c 	req->func = func;
req                36 drivers/block/ps3disk.c 	struct request *req;
req                82 drivers/block/ps3disk.c 				   struct request *req, int gather)
req                91 drivers/block/ps3disk.c 	rq_for_each_segment(bvec, req, iter) {
req               111 drivers/block/ps3disk.c 					      struct request *req)
req               114 drivers/block/ps3disk.c 	int write = rq_data_dir(req), res;
req               124 drivers/block/ps3disk.c 	rq_for_each_segment(bv, req, iter)
req               128 drivers/block/ps3disk.c 		__func__, __LINE__, op, n, blk_rq_sectors(req));
req               131 drivers/block/ps3disk.c 	start_sector = blk_rq_pos(req) * priv->blocking_factor;
req               132 drivers/block/ps3disk.c 	sectors = blk_rq_sectors(req) * priv->blocking_factor;
req               137 drivers/block/ps3disk.c 		ps3disk_scatter_gather(dev, req, 1);
req               153 drivers/block/ps3disk.c 	priv->req = req;
req               158 drivers/block/ps3disk.c 						 struct request *req)
req               174 drivers/block/ps3disk.c 	priv->req = req;
req               179 drivers/block/ps3disk.c 				       struct request *req)
req               183 drivers/block/ps3disk.c 	switch (req_op(req)) {
req               185 drivers/block/ps3disk.c 		return ps3disk_submit_flush_request(dev, req);
req               188 drivers/block/ps3disk.c 		return ps3disk_submit_request_sg(dev, req);
req               190 drivers/block/ps3disk.c 		blk_dump_rq_flags(req, DEVICE_NAME " bad request");
req               216 drivers/block/ps3disk.c 	struct request *req;
req               236 drivers/block/ps3disk.c 	req = priv->req;
req               237 drivers/block/ps3disk.c 	if (!req) {
req               246 drivers/block/ps3disk.c 	if (req_op(req) == REQ_OP_FLUSH) {
req               250 drivers/block/ps3disk.c 		read = !rq_data_dir(req);
req               262 drivers/block/ps3disk.c 			ps3disk_scatter_gather(dev, req, 0);
req               266 drivers/block/ps3disk.c 	priv->req = NULL;
req               267 drivers/block/ps3disk.c 	blk_mq_end_request(req, error);
req              1570 drivers/block/rbd.c 	struct ceph_osd_request *req;
req              1575 drivers/block/rbd.c 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
req              1576 drivers/block/rbd.c 	if (!req)
req              1579 drivers/block/rbd.c 	list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
req              1580 drivers/block/rbd.c 	req->r_callback = rbd_osd_req_callback;
req              1581 drivers/block/rbd.c 	req->r_priv = obj_req;
req              1587 drivers/block/rbd.c 	ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
req              1588 drivers/block/rbd.c 	req->r_base_oloc.pool = rbd_dev->layout.pool_id;
req              1590 drivers/block/rbd.c 	ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
req              1596 drivers/block/rbd.c 	return req;
req              2153 drivers/block/rbd.c static int rbd_cls_object_map_update(struct ceph_osd_request *req,
req              2161 drivers/block/rbd.c 	ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
req              2180 drivers/block/rbd.c 	osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
req              2196 drivers/block/rbd.c 	struct ceph_osd_request *req;
req              2208 drivers/block/rbd.c 	req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
req              2209 drivers/block/rbd.c 	if (!req)
req              2212 drivers/block/rbd.c 	list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
req              2213 drivers/block/rbd.c 	req->r_callback = rbd_object_map_callback;
req              2214 drivers/block/rbd.c 	req->r_priv = obj_req;
req              2216 drivers/block/rbd.c 	rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
req              2217 drivers/block/rbd.c 	ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
req              2218 drivers/block/rbd.c 	req->r_flags = CEPH_OSD_FLAG_WRITE;
req              2219 drivers/block/rbd.c 	ktime_get_real_ts64(&req->r_mtime);
req              2226 drivers/block/rbd.c 		ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
req              2232 drivers/block/rbd.c 	ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
req              2237 drivers/block/rbd.c 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
req              2241 drivers/block/rbd.c 	ceph_osdc_start_request(osdc, req, false);
req              4935 drivers/block/rbd.c 	struct ceph_osd_request *req;
req              4940 drivers/block/rbd.c 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
req              4941 drivers/block/rbd.c 	if (!req)
req              4944 drivers/block/rbd.c 	ceph_oid_copy(&req->r_base_oid, oid);
req              4945 drivers/block/rbd.c 	ceph_oloc_copy(&req->r_base_oloc, oloc);
req              4946 drivers/block/rbd.c 	req->r_flags = CEPH_OSD_FLAG_READ;
req              4954 drivers/block/rbd.c 	osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
req              4955 drivers/block/rbd.c 	osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
req              4958 drivers/block/rbd.c 	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
req              4962 drivers/block/rbd.c 	ceph_osdc_start_request(osdc, req, false);
req              4963 drivers/block/rbd.c 	ret = ceph_osdc_wait_request(osdc, req);
req              4968 drivers/block/rbd.c 	ceph_osdc_put_request(req);
req               188 drivers/block/skd_main.c 	struct skd_request_context req;
req               481 drivers/block/skd_main.c 	struct request *const req = mqd->rq;
req               482 drivers/block/skd_main.c 	struct request_queue *const q = req->q;
req               486 drivers/block/skd_main.c 	const u32 tag = blk_mq_unique_tag(req);
req               487 drivers/block/skd_main.c 	struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
req               490 drivers/block/skd_main.c 	const u32 lba = blk_rq_pos(req);
req               491 drivers/block/skd_main.c 	const u32 count = blk_rq_sectors(req);
req               492 drivers/block/skd_main.c 	const int data_dir = rq_data_dir(req);
req               497 drivers/block/skd_main.c 	if (!(req->rq_flags & RQF_DONTPREP)) {
req               499 drivers/block/skd_main.c 		req->rq_flags |= RQF_DONTPREP;
req               502 drivers/block/skd_main.c 	blk_mq_start_request(req);
req               510 drivers/block/skd_main.c 		"new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
req               522 drivers/block/skd_main.c 	if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
req               525 drivers/block/skd_main.c 		blk_mq_complete_request(req);
req               563 drivers/block/skd_main.c 	if (req_op(req) == REQ_OP_FLUSH) {
req               570 drivers/block/skd_main.c 	if (req->cmd_flags & REQ_FUA)
req               601 drivers/block/skd_main.c static enum blk_eh_timer_return skd_timed_out(struct request *req,
req               604 drivers/block/skd_main.c 	struct skd_device *skdev = req->q->queuedata;
req               607 drivers/block/skd_main.c 		blk_mq_unique_tag(req));
req               612 drivers/block/skd_main.c static void skd_complete_rq(struct request *req)
req               614 drivers/block/skd_main.c 	struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
req               616 drivers/block/skd_main.c 	blk_mq_end_request(req, skreq->status);
req               622 drivers/block/skd_main.c 	struct request *req = blk_mq_rq_from_pdu(skreq);
req               632 drivers/block/skd_main.c 	n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
req               887 drivers/block/skd_main.c 	struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
req               898 drivers/block/skd_main.c 	dma_address = skspcl->req.sksg_dma_address;
req               900 drivers/block/skd_main.c 	skspcl->req.n_sg = 1;
req               916 drivers/block/skd_main.c 	struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
req               921 drivers/block/skd_main.c 	if (skspcl->req.state != SKD_REQ_STATE_IDLE)
req               928 drivers/block/skd_main.c 	skspcl->req.state = SKD_REQ_STATE_BUSY;
req               931 drivers/block/skd_main.c 	scsi->hdr.tag = skspcl->req.id;
req              1044 drivers/block/skd_main.c 				skspcl->req.sksg_list[0].byte_count,
req              1047 drivers/block/skd_main.c 	skspcl->req.completion = *skcomp;
req              1048 drivers/block/skd_main.c 	skspcl->req.state = SKD_REQ_STATE_IDLE;
req              1050 drivers/block/skd_main.c 	status = skspcl->req.completion.status;
req              1245 drivers/block/skd_main.c 	WARN_ON_ONCE(skspcl->req.n_sg != 1);
req              1260 drivers/block/skd_main.c 			skspcl, skspcl->req.id, skspcl->req.sksg_list,
req              1261 drivers/block/skd_main.c 			&skspcl->req.sksg_dma_address);
req              1262 drivers/block/skd_main.c 		for (i = 0; i < skspcl->req.n_sg; i++) {
req              1264 drivers/block/skd_main.c 				&skspcl->req.sksg_list[i];
req              1283 drivers/block/skd_main.c 				   skspcl->req.sksg_dma_address,
req              1288 drivers/block/skd_main.c 				   skspcl->req.sksg_list[0].byte_count,
req              1412 drivers/block/skd_main.c 				      struct request *req)
req              1420 drivers/block/skd_main.c 		blk_mq_complete_request(req);
req              1425 drivers/block/skd_main.c 		blk_mq_requeue_request(req, true);
req              1435 drivers/block/skd_main.c 			blk_mq_requeue_request(req, true);
req              1443 drivers/block/skd_main.c 		blk_mq_complete_request(req);
req              1621 drivers/block/skd_main.c 			if (skspcl->req.id == req_id &&
req              1622 drivers/block/skd_main.c 			    skspcl->req.state == SKD_REQ_STATE_BUSY) {
req              1895 drivers/block/skd_main.c static bool skd_recover_request(struct request *req, void *data, bool reserved)
req              1898 drivers/block/skd_main.c 	struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
req              1911 drivers/block/skd_main.c 	blk_mq_complete_request(req);
req              2189 drivers/block/skd_main.c 	if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
req              2770 drivers/block/skd_main.c 	skspcl->req.id = 0 + SKD_ID_INTERNAL;
req              2771 drivers/block/skd_main.c 	skspcl->req.state = SKD_REQ_STATE_IDLE;
req              2790 drivers/block/skd_main.c 	skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
req              2791 drivers/block/skd_main.c 						 &skspcl->req.sksg_dma_address);
req              2792 drivers/block/skd_main.c 	if (skspcl->req.sksg_list == NULL) {
req              3027 drivers/block/skd_main.c 	skd_free_sg_list(skdev, skspcl->req.sksg_list,
req              3028 drivers/block/skd_main.c 			 skspcl->req.sksg_dma_address);
req              3030 drivers/block/skd_main.c 	skspcl->req.sksg_list = NULL;
req              3031 drivers/block/skd_main.c 	skspcl->req.sksg_dma_address = 0;
req              3580 drivers/block/skd_main.c 	struct request *req = blk_mq_rq_from_pdu(skreq);
req              3581 drivers/block/skd_main.c 	u32 lba = blk_rq_pos(req);
req              3582 drivers/block/skd_main.c 	u32 count = blk_rq_sectors(req);
req              3592 drivers/block/skd_main.c 		"req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
req              3593 drivers/block/skd_main.c 		count, count, (int)rq_data_dir(req));
req                54 drivers/block/sunvdc.c 	struct request		*req;
req               309 drivers/block/sunvdc.c 	struct request *req;
req               318 drivers/block/sunvdc.c 	req = rqe->req;
req               319 drivers/block/sunvdc.c 	if (req == NULL) {
req               324 drivers/block/sunvdc.c 	rqe->req = NULL;
req               326 drivers/block/sunvdc.c 	blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
req               459 drivers/block/sunvdc.c static int __send_request(struct request *req)
req               461 drivers/block/sunvdc.c 	struct vdc_port *port = req->rq_disk->private_data;
req               476 drivers/block/sunvdc.c 	if (rq_data_dir(req) == READ) {
req               485 drivers/block/sunvdc.c 	nsg = blk_rq_map_sg(req->q, req, sg);
req               502 drivers/block/sunvdc.c 	rqe->req = req;
req               513 drivers/block/sunvdc.c 	desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
req              1107 drivers/block/sunvdc.c 		struct request *req;
req              1113 drivers/block/sunvdc.c 		req = rqe->req;
req              1114 drivers/block/sunvdc.c 		if (req == NULL) {
req              1119 drivers/block/sunvdc.c 		rqe->req = NULL;
req              1120 drivers/block/sunvdc.c 		blk_mq_requeue_request(req, false);
req               529 drivers/block/swim.c 	struct request *req = bd->rq;
req               535 drivers/block/swim.c 	blk_mq_start_request(req);
req               537 drivers/block/swim.c 	if (!fs->disk_in || rq_data_dir(req) == WRITE) {
req               543 drivers/block/swim.c 		err = floppy_read_sectors(fs, blk_rq_pos(req),
req               544 drivers/block/swim.c 					  blk_rq_cur_sectors(req),
req               545 drivers/block/swim.c 					  bio_data(req->bio));
req               546 drivers/block/swim.c 	} while (blk_update_request(req, err, blk_rq_cur_bytes(req)));
req               547 drivers/block/swim.c 	__blk_mq_end_request(req, err);
req               259 drivers/block/swim3.c 	struct request *req = fs->cur_req;
req               262 drivers/block/swim3.c 		  err, nr_bytes, req);
req               265 drivers/block/swim3.c 		nr_bytes = blk_rq_cur_bytes(req);
req               266 drivers/block/swim3.c 	if (blk_update_request(req, err, nr_bytes))
req               268 drivers/block/swim3.c 	__blk_mq_end_request(req, err);
req               312 drivers/block/swim3.c 	struct request *req = bd->rq;
req               320 drivers/block/swim3.c 	blk_mq_start_request(req);
req               321 drivers/block/swim3.c 	fs->cur_req = req;
req               333 drivers/block/swim3.c 	if (rq_data_dir(req) == WRITE) {
req               348 drivers/block/swim3.c 	fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
req               349 drivers/block/swim3.c 	x = ((long)blk_rq_pos(req)) % fs->secpercyl;
req               422 drivers/block/swim3.c 	struct request *req = fs->cur_req;
req               424 drivers/block/swim3.c 	if (blk_rq_cur_sectors(req) <= 0) {
req               428 drivers/block/swim3.c 	if (rq_data_dir(req) == WRITE)
req               432 drivers/block/swim3.c 		if (n > blk_rq_cur_sectors(req))
req               433 drivers/block/swim3.c 			n = blk_rq_cur_sectors(req);
req               445 drivers/block/swim3.c 	if (rq_data_dir(req) == WRITE) {
req               449 drivers/block/swim3.c 		init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
req               453 drivers/block/swim3.c 		init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
req               460 drivers/block/swim3.c 	if (rq_data_dir(req) == WRITE)
req               647 drivers/block/swim3.c 	struct request *req = fs->cur_req;
req               656 drivers/block/swim3.c 			  fs->state, rq_data_dir(req), intr, err);
req               716 drivers/block/swim3.c 		if (rq_data_dir(req) == WRITE)
req               741 drivers/block/swim3.c 				blk_update_request(req, 0, n << 9);
req               749 drivers/block/swim3.c 				       rq_data_dir(req) == WRITE? "writ": "read",
req               750 drivers/block/swim3.c 				       (long)blk_rq_pos(req), err);
req               759 drivers/block/swim3.c 					  fs->state, rq_data_dir(req), intr, err);
req               132 drivers/block/virtio_blk.c static inline void virtblk_scsi_request_done(struct request *req)
req               134 drivers/block/virtio_blk.c 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
req               135 drivers/block/virtio_blk.c 	struct virtio_blk *vblk = req->q->queuedata;
req               165 drivers/block/virtio_blk.c static inline void virtblk_scsi_request_done(struct request *req)
req               193 drivers/block/virtio_blk.c static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
req               195 drivers/block/virtio_blk.c 	unsigned short segments = blk_rq_nr_discard_segments(req);
req               208 drivers/block/virtio_blk.c 	__rq_for_each_bio(bio, req) {
req               218 drivers/block/virtio_blk.c 	req->special_vec.bv_page = virt_to_page(range);
req               219 drivers/block/virtio_blk.c 	req->special_vec.bv_offset = offset_in_page(range);
req               220 drivers/block/virtio_blk.c 	req->special_vec.bv_len = sizeof(*range) * segments;
req               221 drivers/block/virtio_blk.c 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
req               226 drivers/block/virtio_blk.c static inline void virtblk_request_done(struct request *req)
req               228 drivers/block/virtio_blk.c 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
req               230 drivers/block/virtio_blk.c 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
req               231 drivers/block/virtio_blk.c 		kfree(page_address(req->special_vec.bv_page) +
req               232 drivers/block/virtio_blk.c 		      req->special_vec.bv_offset);
req               235 drivers/block/virtio_blk.c 	switch (req_op(req)) {
req               238 drivers/block/virtio_blk.c 		virtblk_scsi_request_done(req);
req               242 drivers/block/virtio_blk.c 	blk_mq_end_request(req, virtblk_result(vbr));
req               258 drivers/block/virtio_blk.c 			struct request *req = blk_mq_rq_from_pdu(vbr);
req               260 drivers/block/virtio_blk.c 			blk_mq_complete_request(req);
req               291 drivers/block/virtio_blk.c 	struct request *req = bd->rq;
req               292 drivers/block/virtio_blk.c 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
req               301 drivers/block/virtio_blk.c 	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
req               303 drivers/block/virtio_blk.c 	switch (req_op(req)) {
req               316 drivers/block/virtio_blk.c 		unmap = !(req->cmd_flags & REQ_NOUNMAP);
req               332 drivers/block/virtio_blk.c 		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
req               333 drivers/block/virtio_blk.c 	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
req               335 drivers/block/virtio_blk.c 	blk_mq_start_request(req);
req               338 drivers/block/virtio_blk.c 		err = virtblk_setup_discard_write_zeroes(req, unmap);
req               343 drivers/block/virtio_blk.c 	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
req               345 drivers/block/virtio_blk.c 		if (rq_data_dir(req) == WRITE)
req               352 drivers/block/virtio_blk.c 	if (blk_rq_is_scsi(req))
req               389 drivers/block/virtio_blk.c 	struct request *req;
req               392 drivers/block/virtio_blk.c 	req = blk_get_request(q, REQ_OP_DRV_IN, 0);
req               393 drivers/block/virtio_blk.c 	if (IS_ERR(req))
req               394 drivers/block/virtio_blk.c 		return PTR_ERR(req);
req               396 drivers/block/virtio_blk.c 	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
req               400 drivers/block/virtio_blk.c 	blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
req               401 drivers/block/virtio_blk.c 	err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
req               403 drivers/block/virtio_blk.c 	blk_put_request(req);
req               774 drivers/block/virtio_blk.c static void virtblk_initialize_rq(struct request *req)
req               776 drivers/block/virtio_blk.c 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
req               207 drivers/block/xen-blkback/blkback.c 				struct blkif_request *req,
req               472 drivers/block/xen-blkback/blkback.c 	struct pending_req *req = NULL;
req               477 drivers/block/xen-blkback/blkback.c 		req = list_entry(ring->pending_free.next, struct pending_req,
req               479 drivers/block/xen-blkback/blkback.c 		list_del(&req->free_list);
req               482 drivers/block/xen-blkback/blkback.c 	return req;
req               489 drivers/block/xen-blkback/blkback.c static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
req               496 drivers/block/xen-blkback/blkback.c 	list_add(&req->free_list, &ring->pending_free);
req               505 drivers/block/xen-blkback/blkback.c static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
req               514 drivers/block/xen-blkback/blkback.c 	if (likely(req->nr_sects)) {
req               515 drivers/block/xen-blkback/blkback.c 		blkif_sector_t end = req->sector_number + req->nr_sects;
req               517 drivers/block/xen-blkback/blkback.c 		if (unlikely(end < req->sector_number))
req               523 drivers/block/xen-blkback/blkback.c 	req->dev  = vbd->pdevice;
req               524 drivers/block/xen-blkback/blkback.c 	req->bdev = vbd->bdev;
req               752 drivers/block/xen-blkback/blkback.c static void xen_blkbk_unmap_and_respond(struct pending_req *req)
req               754 drivers/block/xen-blkback/blkback.c 	struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
req               755 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring = req->ring;
req               756 drivers/block/xen-blkback/blkback.c 	struct grant_page **pages = req->segments;
req               759 drivers/block/xen-blkback/blkback.c 	invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
req               760 drivers/block/xen-blkback/blkback.c 					   req->unmap, req->unmap_pages);
req               762 drivers/block/xen-blkback/blkback.c 	work->data = req;
req               764 drivers/block/xen-blkback/blkback.c 	work->unmap_ops = req->unmap;
req               766 drivers/block/xen-blkback/blkback.c 	work->pages = req->unmap_pages;
req               769 drivers/block/xen-blkback/blkback.c 	gnttab_unmap_refs_async(&req->gnttab_unmap_data);
req               955 drivers/block/xen-blkback/blkback.c static int xen_blkbk_parse_indirect(struct blkif_request *req,
req               970 drivers/block/xen-blkback/blkback.c 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
req              1009 drivers/block/xen-blkback/blkback.c 				struct blkif_request *req)
req              1020 drivers/block/xen-blkback/blkback.c 	preq.sector_number = req->u.discard.sector_number;
req              1021 drivers/block/xen-blkback/blkback.c 	preq.nr_sects      = req->u.discard.nr_sectors;
req              1033 drivers/block/xen-blkback/blkback.c 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
req              1036 drivers/block/xen-blkback/blkback.c 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req              1037 drivers/block/xen-blkback/blkback.c 				   req->u.discard.nr_sectors,
req              1046 drivers/block/xen-blkback/blkback.c 	make_response(ring, req->u.discard.id, req->operation, status);
req              1052 drivers/block/xen-blkback/blkback.c 			     struct blkif_request *req,
req              1056 drivers/block/xen-blkback/blkback.c 	make_response(ring, req->u.other.id, req->operation,
req              1127 drivers/block/xen-blkback/blkback.c 	struct blkif_request req;
req              1161 drivers/block/xen-blkback/blkback.c 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
req              1164 drivers/block/xen-blkback/blkback.c 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
req              1167 drivers/block/xen-blkback/blkback.c 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
req              1177 drivers/block/xen-blkback/blkback.c 		switch (req.operation) {
req              1183 drivers/block/xen-blkback/blkback.c 			if (dispatch_rw_block_io(ring, &req, pending_req))
req              1188 drivers/block/xen-blkback/blkback.c 			if (dispatch_discard_io(ring, &req))
req              1192 drivers/block/xen-blkback/blkback.c 			if (dispatch_other_io(ring, &req, pending_req))
req              1225 drivers/block/xen-blkback/blkback.c 				struct blkif_request *req,
req              1241 drivers/block/xen-blkback/blkback.c 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
req              1242 drivers/block/xen-blkback/blkback.c 			req->u.indirect.indirect_op : req->operation;
req              1244 drivers/block/xen-blkback/blkback.c 	if ((req->operation == BLKIF_OP_INDIRECT) &&
req              1276 drivers/block/xen-blkback/blkback.c 	nseg = req->operation == BLKIF_OP_INDIRECT ?
req              1277 drivers/block/xen-blkback/blkback.c 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
req              1280 drivers/block/xen-blkback/blkback.c 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
req              1282 drivers/block/xen-blkback/blkback.c 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
req              1292 drivers/block/xen-blkback/blkback.c 	pending_req->id        = req->u.rw.id;
req              1297 drivers/block/xen-blkback/blkback.c 	if (req->operation != BLKIF_OP_INDIRECT) {
req              1298 drivers/block/xen-blkback/blkback.c 		preq.dev               = req->u.rw.handle;
req              1299 drivers/block/xen-blkback/blkback.c 		preq.sector_number     = req->u.rw.sector_number;
req              1301 drivers/block/xen-blkback/blkback.c 			pages[i]->gref = req->u.rw.seg[i].gref;
req              1302 drivers/block/xen-blkback/blkback.c 			seg[i].nsec = req->u.rw.seg[i].last_sect -
req              1303 drivers/block/xen-blkback/blkback.c 				req->u.rw.seg[i].first_sect + 1;
req              1304 drivers/block/xen-blkback/blkback.c 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
req              1305 drivers/block/xen-blkback/blkback.c 			if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
req              1306 drivers/block/xen-blkback/blkback.c 			    (req->u.rw.seg[i].last_sect <
req              1307 drivers/block/xen-blkback/blkback.c 			     req->u.rw.seg[i].first_sect))
req              1312 drivers/block/xen-blkback/blkback.c 		preq.dev               = req->u.indirect.handle;
req              1313 drivers/block/xen-blkback/blkback.c 		preq.sector_number     = req->u.indirect.sector_number;
req              1314 drivers/block/xen-blkback/blkback.c 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
req              1421 drivers/block/xen-blkback/blkback.c 	make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
req               247 drivers/block/xen-blkback/xenbus.c 	struct pending_req *req, *n;
req               286 drivers/block/xen-blkback/xenbus.c 		list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
req               287 drivers/block/xen-blkback/xenbus.c 			list_del(&req->free_list);
req               290 drivers/block/xen-blkback/xenbus.c 				kfree(req->segments[j]);
req               293 drivers/block/xen-blkback/xenbus.c 				kfree(req->indirect_pages[j]);
req               295 drivers/block/xen-blkback/xenbus.c 			kfree(req);
req               927 drivers/block/xen-blkback/xenbus.c 	struct pending_req *req, *n;
req               980 drivers/block/xen-blkback/xenbus.c 		req = kzalloc(sizeof(*req), GFP_KERNEL);
req               981 drivers/block/xen-blkback/xenbus.c 		if (!req)
req               983 drivers/block/xen-blkback/xenbus.c 		list_add_tail(&req->free_list, &ring->pending_free);
req               985 drivers/block/xen-blkback/xenbus.c 			req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
req               986 drivers/block/xen-blkback/xenbus.c 			if (!req->segments[j])
req               990 drivers/block/xen-blkback/xenbus.c 			req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
req               992 drivers/block/xen-blkback/xenbus.c 			if (!req->indirect_pages[j])
req              1007 drivers/block/xen-blkback/xenbus.c 	list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
req              1008 drivers/block/xen-blkback/xenbus.c 		list_del(&req->free_list);
req              1010 drivers/block/xen-blkback/xenbus.c 			if (!req->segments[j])
req              1012 drivers/block/xen-blkback/xenbus.c 			kfree(req->segments[j]);
req              1015 drivers/block/xen-blkback/xenbus.c 			if (!req->indirect_pages[j])
req              1017 drivers/block/xen-blkback/xenbus.c 			kfree(req->indirect_pages[j]);
req              1019 drivers/block/xen-blkback/xenbus.c 		kfree(req);
req                99 drivers/block/xen-blkfront.c 	struct blkif_request req;
req               271 drivers/block/xen-blkfront.c 	rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
req               272 drivers/block/xen-blkfront.c 	rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
req               279 drivers/block/xen-blkfront.c 	if (rinfo->shadow[id].req.u.rw.id != id)
req               283 drivers/block/xen-blkfront.c 	rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
req               526 drivers/block/xen-blkfront.c 					    struct request *req,
req               535 drivers/block/xen-blkfront.c 	rinfo->shadow[id].request = req;
req               544 drivers/block/xen-blkfront.c static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
req               551 drivers/block/xen-blkfront.c 	id = blkif_ring_get_request(rinfo, req, &ring_req);
req               554 drivers/block/xen-blkfront.c 	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
req               556 drivers/block/xen-blkfront.c 	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
req               557 drivers/block/xen-blkfront.c 	if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
req               563 drivers/block/xen-blkfront.c 	rinfo->shadow[id].req = *ring_req;
req               692 drivers/block/xen-blkfront.c static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
req               703 drivers/block/xen-blkfront.c 		.need_copy = rq_data_dir(req) && info->feature_persistent,
req               715 drivers/block/xen-blkfront.c 	max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
req               740 drivers/block/xen-blkfront.c 	id = blkif_ring_get_request(rinfo, req, &ring_req);
req               742 drivers/block/xen-blkfront.c 	num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
req               759 drivers/block/xen-blkfront.c 		BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
req               761 drivers/block/xen-blkfront.c 		ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
req               763 drivers/block/xen-blkfront.c 		ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
req               767 drivers/block/xen-blkfront.c 		ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
req               769 drivers/block/xen-blkfront.c 		ring_req->operation = rq_data_dir(req) ?
req               771 drivers/block/xen-blkfront.c 		if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
req               790 drivers/block/xen-blkfront.c 			extra_id = blkif_ring_get_request(rinfo, req,
req               834 drivers/block/xen-blkfront.c 	rinfo->shadow[id].req = *ring_req;
req               836 drivers/block/xen-blkfront.c 		rinfo->shadow[extra_id].req = *extra_ring_req;
req               850 drivers/block/xen-blkfront.c static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
req               855 drivers/block/xen-blkfront.c 	if (unlikely(req_op(req) == REQ_OP_DISCARD ||
req               856 drivers/block/xen-blkfront.c 		     req_op(req) == REQ_OP_SECURE_ERASE))
req               857 drivers/block/xen-blkfront.c 		return blkif_queue_discard_req(req, rinfo);
req               859 drivers/block/xen-blkfront.c 		return blkif_queue_rw_req(req, rinfo);
req               872 drivers/block/xen-blkfront.c static inline bool blkif_request_flush_invalid(struct request *req,
req               875 drivers/block/xen-blkfront.c 	return (blk_rq_is_passthrough(req) ||
req               876 drivers/block/xen-blkfront.c 		((req_op(req) == REQ_OP_FLUSH) &&
req               878 drivers/block/xen-blkfront.c 		((req->cmd_flags & REQ_FUA) &&
req              1288 drivers/block/xen-blkfront.c 		segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
req              1289 drivers/block/xen-blkfront.c 		       rinfo->shadow[i].req.u.indirect.nr_segments :
req              1290 drivers/block/xen-blkfront.c 		       rinfo->shadow[i].req.u.rw.nr_segments;
req              1299 drivers/block/xen-blkfront.c 		if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
req              1432 drivers/block/xen-blkfront.c 	num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
req              1433 drivers/block/xen-blkfront.c 		s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
req              1453 drivers/block/xen-blkfront.c 		num_grant += s2->req.u.rw.nr_segments;
req              1519 drivers/block/xen-blkfront.c 	if (s->req.operation == BLKIF_OP_INDIRECT) {
req              1550 drivers/block/xen-blkfront.c 	struct request *req;
req              1582 drivers/block/xen-blkfront.c 		req  = rinfo->shadow[id].request;
req              1600 drivers/block/xen-blkfront.c 			blkif_req(req)->error = BLK_STS_OK;
req              1602 drivers/block/xen-blkfront.c 			blkif_req(req)->error = BLK_STS_IOERR;
req              1610 drivers/block/xen-blkfront.c 				blkif_req(req)->error = BLK_STS_NOTSUPP;
req              1622 drivers/block/xen-blkfront.c 				blkif_req(req)->error = BLK_STS_NOTSUPP;
req              1625 drivers/block/xen-blkfront.c 				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
req              1628 drivers/block/xen-blkfront.c 				blkif_req(req)->error = BLK_STS_NOTSUPP;
req              1630 drivers/block/xen-blkfront.c 			if (unlikely(blkif_req(req)->error)) {
req              1631 drivers/block/xen-blkfront.c 				if (blkif_req(req)->error == BLK_STS_NOTSUPP)
req              1632 drivers/block/xen-blkfront.c 					blkif_req(req)->error = BLK_STS_OK;
req              1649 drivers/block/xen-blkfront.c 		blk_mq_complete_request(req);
req              1880 drivers/block/xen-blkfront.c 			rinfo->shadow[j].req.u.rw.id = j + 1;
req              1881 drivers/block/xen-blkfront.c 		rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
req              2019 drivers/block/xen-blkfront.c 	struct request *req, *n;
req              2050 drivers/block/xen-blkfront.c 	list_for_each_entry_safe(req, n, &info->requests, queuelist) {
req              2052 drivers/block/xen-blkfront.c 		list_del_init(&req->queuelist);
req              2053 drivers/block/xen-blkfront.c 		BUG_ON(req->nr_phys_segments > segs);
req              2054 drivers/block/xen-blkfront.c 		blk_mq_requeue_request(req, false);
req               186 drivers/block/xsysace.c 	struct request *req;	/* request being processed */
req               488 drivers/block/xsysace.c 	struct request *req;
req               508 drivers/block/xsysace.c 		if (ace->req) {
req               509 drivers/block/xsysace.c 			blk_mq_end_request(ace->req, BLK_STS_IOERR);
req               510 drivers/block/xsysace.c 			ace->req = NULL;
req               512 drivers/block/xsysace.c 		while ((req = ace_get_next_request(ace->queue)) != NULL)
req               513 drivers/block/xsysace.c 			blk_mq_end_request(req, BLK_STS_IOERR);
req               656 drivers/block/xsysace.c 		req = ace_get_next_request(ace->queue);
req               657 drivers/block/xsysace.c 		if (!req) {
req               665 drivers/block/xsysace.c 			(unsigned long long)blk_rq_pos(req),
req               666 drivers/block/xsysace.c 			blk_rq_sectors(req), blk_rq_cur_sectors(req),
req               667 drivers/block/xsysace.c 			rq_data_dir(req));
req               669 drivers/block/xsysace.c 		ace->req = req;
req               670 drivers/block/xsysace.c 		ace->data_ptr = bio_data(req->bio);
req               671 drivers/block/xsysace.c 		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
req               672 drivers/block/xsysace.c 		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
req               674 drivers/block/xsysace.c 		count = blk_rq_sectors(req);
req               675 drivers/block/xsysace.c 		if (rq_data_dir(req)) {
req               708 drivers/block/xsysace.c 				blk_rq_cur_sectors(ace->req) * 16,
req               717 drivers/block/xsysace.c 				blk_rq_cur_sectors(ace->req) * 16,
req               737 drivers/block/xsysace.c 		if (blk_update_request(ace->req, BLK_STS_OK,
req               738 drivers/block/xsysace.c 		    blk_rq_cur_bytes(ace->req))) {
req               743 drivers/block/xsysace.c 			ace->data_ptr = bio_data(ace->req->bio);
req               744 drivers/block/xsysace.c 			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
req               753 drivers/block/xsysace.c 		ace->req = NULL;
req               868 drivers/block/xsysace.c 	struct request *req = bd->rq;
req               870 drivers/block/xsysace.c 	if (blk_rq_is_passthrough(req)) {
req               871 drivers/block/xsysace.c 		blk_mq_start_request(req);
req               876 drivers/block/xsysace.c 	list_add_tail(&req->queuelist, &ace->rq_list);
req                72 drivers/block/z2ram.c 	struct request *req = bd->rq;
req                73 drivers/block/z2ram.c 	unsigned long start = blk_rq_pos(req) << 9;
req                74 drivers/block/z2ram.c 	unsigned long len  = blk_rq_cur_bytes(req);
req                76 drivers/block/z2ram.c 	blk_mq_start_request(req);
req                81 drivers/block/z2ram.c 		       (unsigned long long)blk_rq_pos(req),
req                82 drivers/block/z2ram.c 		       blk_rq_cur_sectors(req));
req                91 drivers/block/z2ram.c 		void *buffer = bio_data(req->bio);
req                96 drivers/block/z2ram.c 		if (rq_data_dir(req) == READ)
req               105 drivers/block/z2ram.c 	blk_mq_end_request(req, BLK_STS_OK);
req               247 drivers/bluetooth/bpa10x.c 	static const u8 req[] = { 0x07 };
req               253 drivers/bluetooth/bpa10x.c 	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
req               345 drivers/bluetooth/bpa10x.c 	const u8 req[] = { 0x00, enable };
req               354 drivers/bluetooth/bpa10x.c 	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
req              2178 drivers/cdrom/cdrom.c 	struct scsi_request *req;
req              2208 drivers/cdrom/cdrom.c 		req = scsi_req(rq);
req              2216 drivers/cdrom/cdrom.c 		req->cmd[0] = GPCMD_READ_CD;
req              2217 drivers/cdrom/cdrom.c 		req->cmd[1] = 1 << 2;
req              2218 drivers/cdrom/cdrom.c 		req->cmd[2] = (lba >> 24) & 0xff;
req              2219 drivers/cdrom/cdrom.c 		req->cmd[3] = (lba >> 16) & 0xff;
req              2220 drivers/cdrom/cdrom.c 		req->cmd[4] = (lba >>  8) & 0xff;
req              2221 drivers/cdrom/cdrom.c 		req->cmd[5] = lba & 0xff;
req              2222 drivers/cdrom/cdrom.c 		req->cmd[6] = (nr >> 16) & 0xff;
req              2223 drivers/cdrom/cdrom.c 		req->cmd[7] = (nr >>  8) & 0xff;
req              2224 drivers/cdrom/cdrom.c 		req->cmd[8] = nr & 0xff;
req              2225 drivers/cdrom/cdrom.c 		req->cmd[9] = 0xf8;
req              2227 drivers/cdrom/cdrom.c 		req->cmd_len = 12;
req              2236 drivers/cdrom/cdrom.c 			scsi_normalize_sense(req->sense, req->sense_len,
req               568 drivers/cdrom/gdrom.c static blk_status_t gdrom_readdisk_dma(struct request *req)
req               581 drivers/cdrom/gdrom.c 	block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
req               582 drivers/cdrom/gdrom.c 	block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
req               583 drivers/cdrom/gdrom.c 	__raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
req               626 drivers/cdrom/gdrom.c 	blk_mq_end_request(req, err);
req               141 drivers/char/ipmi/ipmi_devintf.c 			   struct ipmi_req *req,
req               149 drivers/char/ipmi/ipmi_devintf.c 	if (req->addr_len > sizeof(struct ipmi_addr))
req               152 drivers/char/ipmi/ipmi_devintf.c 	if (copy_from_user(&addr, req->addr, req->addr_len))
req               155 drivers/char/ipmi/ipmi_devintf.c 	msg.netfn = req->msg.netfn;
req               156 drivers/char/ipmi/ipmi_devintf.c 	msg.cmd = req->msg.cmd;
req               157 drivers/char/ipmi/ipmi_devintf.c 	msg.data_len = req->msg.data_len;
req               165 drivers/char/ipmi/ipmi_devintf.c 	rv = ipmi_validate_addr(&addr, req->addr_len);
req               169 drivers/char/ipmi/ipmi_devintf.c 	if (req->msg.data != NULL) {
req               170 drivers/char/ipmi/ipmi_devintf.c 		if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
req               176 drivers/char/ipmi/ipmi_devintf.c 				   req->msg.data,
req               177 drivers/char/ipmi/ipmi_devintf.c 				   req->msg.data_len)) {
req               187 drivers/char/ipmi/ipmi_devintf.c 				  req->msgid,
req               304 drivers/char/ipmi/ipmi_devintf.c 		struct ipmi_req req;
req               308 drivers/char/ipmi/ipmi_devintf.c 		if (copy_from_user(&req, arg, sizeof(req))) {
req               318 drivers/char/ipmi/ipmi_devintf.c 		rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
req               324 drivers/char/ipmi/ipmi_devintf.c 		struct ipmi_req_settime req;
req               326 drivers/char/ipmi/ipmi_devintf.c 		if (copy_from_user(&req, arg, sizeof(req))) {
req               332 drivers/char/ipmi/ipmi_devintf.c 				     &req.req,
req               333 drivers/char/ipmi/ipmi_devintf.c 				     req.retries,
req               334 drivers/char/ipmi/ipmi_devintf.c 				     req.retry_time_ms);
req               655 drivers/char/ipmi/ipmi_devintf.c 	struct compat_ipmi_req	req;
req               684 drivers/char/ipmi/ipmi_devintf.c 	get_compat_ipmi_req(&p64->req, &p32->req);
req               754 drivers/char/ipmi/ipmi_devintf.c 		return handle_send_req(priv->user, &sp.req,
req                36 drivers/char/tpm/tpm_ppi.c static bool tpm_ppi_req_has_parameter(u64 req)
req                38 drivers/char/tpm/tpm_ppi.c 	return req == 23;
req                65 drivers/char/tpm/tpm_ppi.c 	u64 req;
req                88 drivers/char/tpm/tpm_ppi.c 			req = obj->package.elements[1].integer.value;
req                89 drivers/char/tpm/tpm_ppi.c 			if (tpm_ppi_req_has_parameter(req))
req                91 drivers/char/tpm/tpm_ppi.c 				    "%llu %llu\n", req,
req                95 drivers/char/tpm/tpm_ppi.c 						"%llu\n", req);
req               116 drivers/char/tpm/tpm_ppi.c 	u32 req;
req               147 drivers/char/tpm/tpm_ppi.c 		if (sscanf(buf, "%d", &req) != 1)
req               150 drivers/char/tpm/tpm_ppi.c 		argv4.buffer.length = sizeof(req);
req               151 drivers/char/tpm/tpm_ppi.c 		argv4.buffer.pointer = (u8 *)&req;
req               227 drivers/char/tpm/tpm_ppi.c 	u64 req, res;
req               254 drivers/char/tpm/tpm_ppi.c 	req = ret_obj[1].integer.value;
req               256 drivers/char/tpm/tpm_ppi.c 	if (req) {
req               258 drivers/char/tpm/tpm_ppi.c 			status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
req               261 drivers/char/tpm/tpm_ppi.c 			status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
req               264 drivers/char/tpm/tpm_ppi.c 			status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
req               268 drivers/char/tpm/tpm_ppi.c 					   req, res, "Corresponding TPM error");
req               271 drivers/char/tpm/tpm_ppi.c 					   req, res, "Error");
req               274 drivers/char/tpm/tpm_ppi.c 				   req, "No Recent Request");
req               245 drivers/clk/at91/clk-audio-pll.c 					     struct clk_rate_request *req)
req               251 drivers/clk/at91/clk-audio-pll.c 		 req->rate, req->best_parent_rate);
req               253 drivers/clk/at91/clk-audio-pll.c 	req->rate = clamp(req->rate, AUDIO_PLL_FOUT_MIN, AUDIO_PLL_FOUT_MAX);
req               255 drivers/clk/at91/clk-audio-pll.c 	req->min_rate = max(req->min_rate, AUDIO_PLL_FOUT_MIN);
req               256 drivers/clk/at91/clk-audio-pll.c 	req->max_rate = min(req->max_rate, AUDIO_PLL_FOUT_MAX);
req               258 drivers/clk/at91/clk-audio-pll.c 	ret = clk_audio_pll_frac_compute_frac(req->rate, req->best_parent_rate,
req               263 drivers/clk/at91/clk-audio-pll.c 	req->rate = clk_audio_pll_fout(req->best_parent_rate, nd, fracr);
req               265 drivers/clk/at91/clk-audio-pll.c 	req->best_parent_hw = clk_hw_get_parent(hw);
req               268 drivers/clk/at91/clk-audio-pll.c 		 __func__, req->rate, nd, fracr);
req                98 drivers/clk/at91/clk-generated.c static void clk_generated_best_diff(struct clk_rate_request *req,
req               110 drivers/clk/at91/clk-generated.c 	tmp_diff = abs(req->rate - tmp_rate);
req               115 drivers/clk/at91/clk-generated.c 		req->best_parent_rate = parent_rate;
req               116 drivers/clk/at91/clk-generated.c 		req->best_parent_hw = parent;
req               121 drivers/clk/at91/clk-generated.c 					struct clk_rate_request *req)
req               125 drivers/clk/at91/clk-generated.c 	struct clk_rate_request req_parent = *req;
req               143 drivers/clk/at91/clk-generated.c 		div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
req               147 drivers/clk/at91/clk-generated.c 		clk_generated_best_diff(req, parent, parent_rate, div,
req               172 drivers/clk/at91/clk-generated.c 		req_parent.rate = req->rate * div;
req               174 drivers/clk/at91/clk-generated.c 		clk_generated_best_diff(req, parent, req_parent.rate, div,
req               184 drivers/clk/at91/clk-generated.c 		 __clk_get_name((req->best_parent_hw)->clk),
req               185 drivers/clk/at91/clk-generated.c 		 req->best_parent_rate);
req               190 drivers/clk/at91/clk-generated.c 	req->rate = best_rate;
req                49 drivers/clk/at91/clk-programmable.c 					   struct clk_rate_request *req)
req                69 drivers/clk/at91/clk-programmable.c 				if (tmp_rate <= req->rate)
req                75 drivers/clk/at91/clk-programmable.c 				if (tmp_rate <= req->rate)
req                80 drivers/clk/at91/clk-programmable.c 		if (tmp_rate > req->rate)
req                84 drivers/clk/at91/clk-programmable.c 		    (req->rate - tmp_rate) < (req->rate - best_rate)) {
req                86 drivers/clk/at91/clk-programmable.c 			req->best_parent_rate = parent_rate;
req                87 drivers/clk/at91/clk-programmable.c 			req->best_parent_hw = parent;
req                97 drivers/clk/at91/clk-programmable.c 	req->rate = best_rate;
req                56 drivers/clk/at91/clk-usb.c 					     struct clk_rate_request *req)
req                75 drivers/clk/at91/clk-usb.c 			tmp_parent_rate = req->rate * div;
req                82 drivers/clk/at91/clk-usb.c 			if (tmp_rate < req->rate)
req                83 drivers/clk/at91/clk-usb.c 				tmp_diff = req->rate - tmp_rate;
req                85 drivers/clk/at91/clk-usb.c 				tmp_diff = tmp_rate - req->rate;
req                90 drivers/clk/at91/clk-usb.c 				req->best_parent_rate = tmp_parent_rate;
req                91 drivers/clk/at91/clk-usb.c 				req->best_parent_hw = parent;
req                94 drivers/clk/at91/clk-usb.c 			if (!best_diff || tmp_rate < req->rate)
req               105 drivers/clk/at91/clk-usb.c 	req->rate = best_rate;
req              1170 drivers/clk/bcm/clk-bcm2835.c 					struct clk_rate_request *req)
req              1200 drivers/clk/bcm/clk-bcm2835.c 		rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate,
req              1203 drivers/clk/bcm/clk-bcm2835.c 		if (rate > best_rate && rate <= req->rate) {
req              1214 drivers/clk/bcm/clk-bcm2835.c 	req->best_parent_hw = best_parent;
req              1215 drivers/clk/bcm/clk-bcm2835.c 	req->best_parent_rate = best_prate;
req              1217 drivers/clk/bcm/clk-bcm2835.c 	req->rate = best_avgrate;
req               507 drivers/clk/bcm/clk-iproc-pll.c 		struct clk_rate_request *req)
req               517 drivers/clk/bcm/clk-iproc-pll.c 	if (req->rate == 0 || req->best_parent_rate == 0)
req               523 drivers/clk/bcm/clk-iproc-pll.c 		ret = pll_calc_param(req->rate, req->best_parent_rate,
req               528 drivers/clk/bcm/clk-iproc-pll.c 		req->rate = vco_param.rate;
req               537 drivers/clk/bcm/clk-iproc-pll.c 		diff = abs(req->rate - pll->vco_param[i].rate);
req               547 drivers/clk/bcm/clk-iproc-pll.c 	req->rate = pll->vco_param[best_idx].rate;
req               647 drivers/clk/bcm/clk-iproc-pll.c 		struct clk_rate_request *req)
req               651 drivers/clk/bcm/clk-iproc-pll.c 	if (req->rate == 0)
req               653 drivers/clk/bcm/clk-iproc-pll.c 	if (req->rate == req->best_parent_rate)
req               656 drivers/clk/bcm/clk-iproc-pll.c 	bestdiv = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
req               658 drivers/clk/bcm/clk-iproc-pll.c 		req->rate = req->best_parent_rate;
req               663 drivers/clk/bcm/clk-iproc-pll.c 	req->rate = req->best_parent_rate / bestdiv;
req              1023 drivers/clk/bcm/clk-kona.c 					struct clk_rate_request *req)
req              1041 drivers/clk/bcm/clk-kona.c 		rate = kona_peri_clk_round_rate(hw, req->rate,
req              1042 drivers/clk/bcm/clk-kona.c 						&req->best_parent_rate);
req              1046 drivers/clk/bcm/clk-kona.c 		req->rate = rate;
req              1053 drivers/clk/bcm/clk-kona.c 	best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
req              1054 drivers/clk/bcm/clk-kona.c 	best_delta = abs(best_rate - req->rate);
req              1068 drivers/clk/bcm/clk-kona.c 		other_rate = kona_peri_clk_round_rate(hw, req->rate,
req              1070 drivers/clk/bcm/clk-kona.c 		delta = abs(other_rate - req->rate);
req              1074 drivers/clk/bcm/clk-kona.c 			req->best_parent_hw = parent;
req              1075 drivers/clk/bcm/clk-kona.c 			req->best_parent_rate = parent_rate;
req              1079 drivers/clk/bcm/clk-kona.c 	req->rate = best_rate;
req               145 drivers/clk/bcm/clk-raspberrypi.c 					  struct clk_rate_request *req)
req               153 drivers/clk/bcm/clk-raspberrypi.c 	final_rate = clamp(req->rate, rpi->min_rate, rpi->max_rate);
req               156 drivers/clk/bcm/clk-raspberrypi.c 	do_div(div, req->best_parent_rate);
req               161 drivers/clk/bcm/clk-raspberrypi.c 	final_rate = ((u64)req->best_parent_rate *
req               164 drivers/clk/bcm/clk-raspberrypi.c 	req->rate = final_rate >> A2W_PLL_FRAC_BITS;
req                45 drivers/clk/clk-composite.c 					struct clk_rate_request *req)
req                62 drivers/clk/clk-composite.c 		return rate_ops->determine_rate(rate_hw, req);
req                65 drivers/clk/clk-composite.c 		req->best_parent_hw = NULL;
req                69 drivers/clk/clk-composite.c 			req->best_parent_hw = parent;
req                70 drivers/clk/clk-composite.c 			req->best_parent_rate = clk_hw_get_rate(parent);
req                72 drivers/clk/clk-composite.c 			rate = rate_ops->round_rate(rate_hw, req->rate,
req                73 drivers/clk/clk-composite.c 						    &req->best_parent_rate);
req                77 drivers/clk/clk-composite.c 			req->rate = rate;
req                88 drivers/clk/clk-composite.c 			tmp_rate = rate_ops->round_rate(rate_hw, req->rate,
req                93 drivers/clk/clk-composite.c 			rate_diff = abs(req->rate - tmp_rate);
req                95 drivers/clk/clk-composite.c 			if (!rate_diff || !req->best_parent_hw
req                97 drivers/clk/clk-composite.c 				req->best_parent_hw = parent;
req                98 drivers/clk/clk-composite.c 				req->best_parent_rate = parent_rate;
req               107 drivers/clk/clk-composite.c 		req->rate = best_rate;
req               111 drivers/clk/clk-composite.c 		return mux_ops->determine_rate(mux_hw, req);
req               129 drivers/clk/clk-mux.c 				  struct clk_rate_request *req)
req               133 drivers/clk/clk-mux.c 	return clk_mux_determine_rate_flags(hw, req, mux->flags);
req               552 drivers/clk/clk.c 				 struct clk_rate_request *req,
req               558 drivers/clk/clk.c 	struct clk_rate_request parent_req = *req;
req               587 drivers/clk/clk.c 			parent_req = *req;
req               595 drivers/clk/clk.c 		if (mux_is_better_rate(req->rate, parent_req.rate,
req               607 drivers/clk/clk.c 		req->best_parent_hw = best_parent->hw;
req               608 drivers/clk/clk.c 	req->best_parent_rate = best;
req               609 drivers/clk/clk.c 	req->rate = best;
req               660 drivers/clk/clk.c 			     struct clk_rate_request *req)
req               662 drivers/clk/clk.c 	return clk_mux_determine_rate_flags(hw, req, 0);
req               667 drivers/clk/clk.c 				     struct clk_rate_request *req)
req               669 drivers/clk/clk.c 	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
req              1308 drivers/clk/clk.c 					   struct clk_rate_request *req)
req              1324 drivers/clk/clk.c 		req->rate = core->rate;
req              1326 drivers/clk/clk.c 		return core->ops->determine_rate(core->hw, req);
req              1328 drivers/clk/clk.c 		rate = core->ops->round_rate(core->hw, req->rate,
req              1329 drivers/clk/clk.c 					     &req->best_parent_rate);
req              1333 drivers/clk/clk.c 		req->rate = rate;
req              1342 drivers/clk/clk.c 				   struct clk_rate_request *req)
req              1346 drivers/clk/clk.c 	if (WARN_ON(!core || !req))
req              1351 drivers/clk/clk.c 		req->best_parent_hw = parent->hw;
req              1352 drivers/clk/clk.c 		req->best_parent_rate = parent->rate;
req              1354 drivers/clk/clk.c 		req->best_parent_hw = NULL;
req              1355 drivers/clk/clk.c 		req->best_parent_rate = 0;
req              1365 drivers/clk/clk.c 				      struct clk_rate_request *req)
req              1370 drivers/clk/clk.c 		req->rate = 0;
req              1374 drivers/clk/clk.c 	clk_core_init_rate_req(core, req);
req              1377 drivers/clk/clk.c 		return clk_core_determine_round_nolock(core, req);
req              1379 drivers/clk/clk.c 		return clk_core_round_rate_nolock(core->parent, req);
req              1381 drivers/clk/clk.c 	req->rate = core->rate;
req              1392 drivers/clk/clk.c int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
req              1395 drivers/clk/clk.c 		req->rate = 0;
req              1399 drivers/clk/clk.c 	return clk_core_round_rate_nolock(hw->core, req);
req              1406 drivers/clk/clk.c 	struct clk_rate_request req;
req              1408 drivers/clk/clk.c 	clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
req              1409 drivers/clk/clk.c 	req.rate = rate;
req              1411 drivers/clk/clk.c 	ret = clk_core_round_rate_nolock(hw->core, &req);
req              1415 drivers/clk/clk.c 	return req.rate;
req              1430 drivers/clk/clk.c 	struct clk_rate_request req;
req              1441 drivers/clk/clk.c 	clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
req              1442 drivers/clk/clk.c 	req.rate = rate;
req              1444 drivers/clk/clk.c 	ret = clk_core_round_rate_nolock(clk->core, &req);
req              1454 drivers/clk/clk.c 	return req.rate;
req              1910 drivers/clk/clk.c 		struct clk_rate_request req;
req              1912 drivers/clk/clk.c 		req.rate = rate;
req              1913 drivers/clk/clk.c 		req.min_rate = min_rate;
req              1914 drivers/clk/clk.c 		req.max_rate = max_rate;
req              1916 drivers/clk/clk.c 		clk_core_init_rate_req(core, &req);
req              1918 drivers/clk/clk.c 		ret = clk_core_determine_round_nolock(core, &req);
req              1922 drivers/clk/clk.c 		best_parent_rate = req.best_parent_rate;
req              1923 drivers/clk/clk.c 		new_rate = req.rate;
req              1924 drivers/clk/clk.c 		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
req              2111 drivers/clk/clk.c 	struct clk_rate_request req;
req              2123 drivers/clk/clk.c 	clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
req              2124 drivers/clk/clk.c 	req.rate = req_rate;
req              2126 drivers/clk/clk.c 	ret = clk_core_round_rate_nolock(core, &req);
req              2131 drivers/clk/clk.c 	return ret ? 0 : req.rate;
req               129 drivers/clk/davinci/pll.c 				      struct clk_rate_request *req)
req               132 drivers/clk/davinci/pll.c 	struct clk_hw *parent = req->best_parent_hw;
req               133 drivers/clk/davinci/pll.c 	unsigned long parent_rate = req->best_parent_rate;
req               134 drivers/clk/davinci/pll.c 	unsigned long rate = req->rate;
req               139 drivers/clk/davinci/pll.c 	if (rate < req->min_rate)
req               142 drivers/clk/davinci/pll.c 	rate = min(rate, req->max_rate);
req               148 drivers/clk/davinci/pll.c 		if (best_rate < req->min_rate)
req               154 drivers/clk/davinci/pll.c 		req->rate = best_rate;
req               165 drivers/clk/davinci/pll.c 		if (r < req->min_rate)
req               167 drivers/clk/davinci/pll.c 		if (r > rate || r > req->max_rate)
req               171 drivers/clk/davinci/pll.c 			req->rate = best_rate;
req               172 drivers/clk/davinci/pll.c 			req->best_parent_rate = parent_rate;
req               282 drivers/clk/hisilicon/clk-hi3620.c 				  struct clk_rate_request *req)
req               286 drivers/clk/hisilicon/clk-hi3620.c 	if ((req->rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
req               287 drivers/clk/hisilicon/clk-hi3620.c 		req->rate = 13000000;
req               288 drivers/clk/hisilicon/clk-hi3620.c 		req->best_parent_rate = 26000000;
req               289 drivers/clk/hisilicon/clk-hi3620.c 	} else if (req->rate <= 26000000) {
req               290 drivers/clk/hisilicon/clk-hi3620.c 		req->rate = 25000000;
req               291 drivers/clk/hisilicon/clk-hi3620.c 		req->best_parent_rate = 180000000;
req               292 drivers/clk/hisilicon/clk-hi3620.c 	} else if (req->rate <= 52000000) {
req               293 drivers/clk/hisilicon/clk-hi3620.c 		req->rate = 50000000;
req               294 drivers/clk/hisilicon/clk-hi3620.c 		req->best_parent_rate = 360000000;
req               295 drivers/clk/hisilicon/clk-hi3620.c 	} else if (req->rate <= 100000000) {
req               296 drivers/clk/hisilicon/clk-hi3620.c 		req->rate = 100000000;
req               297 drivers/clk/hisilicon/clk-hi3620.c 		req->best_parent_rate = 720000000;
req               300 drivers/clk/hisilicon/clk-hi3620.c 		req->rate = 180000000;
req               301 drivers/clk/hisilicon/clk-hi3620.c 		req->best_parent_rate = 1440000000;
req               421 drivers/clk/imx/clk-sccg-pll.c 					struct clk_rate_request *req,
req               433 drivers/clk/imx/clk-sccg-pll.c 	req->max_rate = max;
req               434 drivers/clk/imx/clk-sccg-pll.c 	req->min_rate = min;
req               449 drivers/clk/imx/clk-sccg-pll.c 	ret = __clk_determine_rate(parent_hw, req);
req               451 drivers/clk/imx/clk-sccg-pll.c 		ret = clk_sccg_pll_find_setup(setup, req->rate,
req               455 drivers/clk/imx/clk-sccg-pll.c 	req->best_parent_hw = parent_hw;
req               456 drivers/clk/imx/clk-sccg-pll.c 	req->best_parent_rate = req->rate;
req               457 drivers/clk/imx/clk-sccg-pll.c 	req->rate = setup->fout;
req               463 drivers/clk/imx/clk-sccg-pll.c 				       struct clk_rate_request *req)
req               467 drivers/clk/imx/clk-sccg-pll.c 	uint64_t rate = req->rate;
req               468 drivers/clk/imx/clk-sccg-pll.c 	uint64_t min = req->min_rate;
req               469 drivers/clk/imx/clk-sccg-pll.c 	uint64_t max = req->max_rate;
req               475 drivers/clk/imx/clk-sccg-pll.c 	ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
req               480 drivers/clk/imx/clk-sccg-pll.c 	ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
req               486 drivers/clk/imx/clk-sccg-pll.c 	ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
req                68 drivers/clk/imx/clk-scu.c 		struct req_get_clock_rate req;
req                87 drivers/clk/imx/clk-scu.c 		} __packed __aligned(4) req;
req               157 drivers/clk/imx/clk-scu.c 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
req               158 drivers/clk/imx/clk-scu.c 	msg.data.req.clk = clk->clk_type;
req               247 drivers/clk/imx/clk-scu.c 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
req               248 drivers/clk/imx/clk-scu.c 	msg.data.req.clk = clk->clk_type;
req               172 drivers/clk/keystone/sci-clk.c 				  struct clk_rate_request *req)
req               181 drivers/clk/keystone/sci-clk.c 						      req->min_rate,
req               182 drivers/clk/keystone/sci-clk.c 						      req->rate,
req               183 drivers/clk/keystone/sci-clk.c 						      req->max_rate,
req               192 drivers/clk/keystone/sci-clk.c 	req->rate = new_rate;
req               165 drivers/clk/meson/clk-regmap.c 					 struct clk_rate_request *req)
req               170 drivers/clk/meson/clk-regmap.c 	return clk_mux_determine_rate_flags(hw, req, mux->flags);
req               386 drivers/clk/microchip/clk-core.c 				struct clk_rate_request *req)
req               402 drivers/clk/microchip/clk-core.c 		if (req->rate > parent_rate)
req               405 drivers/clk/microchip/clk-core.c 		nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
req               406 drivers/clk/microchip/clk-core.c 		delta = abs(nearest_rate - req->rate);
req               407 drivers/clk/microchip/clk-core.c 		if ((nearest_rate >= req->rate) && (delta < best_delta)) {
req               421 drivers/clk/microchip/clk-core.c 		       __func__, clk_hw_get_name(hw), req->rate);
req               426 drivers/clk/microchip/clk-core.c 		 clk_hw_get_name(hw), req->rate,
req               430 drivers/clk/microchip/clk-core.c 	if (req->best_parent_rate)
req               431 drivers/clk/microchip/clk-core.c 		req->best_parent_rate = best_parent_rate;
req               433 drivers/clk/microchip/clk-core.c 	if (req->best_parent_hw)
req               434 drivers/clk/microchip/clk-core.c 		req->best_parent_hw = best_parent_clk;
req               205 drivers/clk/mmp/clk-mix.c 				      struct clk_rate_request *req)
req               231 drivers/clk/mmp/clk-mix.c 			gap = abs(mix_rate - req->rate);
req               249 drivers/clk/mmp/clk-mix.c 				gap = abs(mix_rate - req->rate);
req               266 drivers/clk/mmp/clk-mix.c 	req->best_parent_rate = parent_rate_best;
req               267 drivers/clk/mmp/clk-mix.c 	req->best_parent_hw = parent_best;
req               268 drivers/clk/mmp/clk-mix.c 	req->rate = mix_rate_best;
req               207 drivers/clk/pxa/clk-pxa.c int pxa2xx_determine_rate(struct clk_rate_request *req,
req               215 drivers/clk/pxa/clk-pxa.c 		if (rate == req->rate)
req               217 drivers/clk/pxa/clk-pxa.c 		if (rate < req->min_rate)
req               219 drivers/clk/pxa/clk-pxa.c 		if (rate > req->max_rate)
req               221 drivers/clk/pxa/clk-pxa.c 		if (rate <= req->rate)
req               223 drivers/clk/pxa/clk-pxa.c 		if ((rate >= req->rate) && (closest_above == -1))
req               227 drivers/clk/pxa/clk-pxa.c 	req->best_parent_hw = NULL;
req               230 drivers/clk/pxa/clk-pxa.c 		rate = req->rate;
req               236 drivers/clk/pxa/clk-pxa.c 		pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
req               240 drivers/clk/pxa/clk-pxa.c 	pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
req               241 drivers/clk/pxa/clk-pxa.c 	req->rate = rate;
req               156 drivers/clk/pxa/clk-pxa.h int pxa2xx_determine_rate(struct clk_rate_request *req,
req               217 drivers/clk/pxa/clk-pxa25x.c 					  struct clk_rate_request *req)
req               219 drivers/clk/pxa/clk-pxa25x.c 	return __clk_mux_determine_rate(hw, req);
req               252 drivers/clk/pxa/clk-pxa25x.c 					  struct clk_rate_request *req)
req               254 drivers/clk/pxa/clk-pxa25x.c 	return pxa2xx_determine_rate(req, pxa25x_freqs,
req               244 drivers/clk/pxa/clk-pxa27x.c 					  struct clk_rate_request *req)
req               246 drivers/clk/pxa/clk-pxa27x.c 	return pxa2xx_determine_rate(req, pxa27x_freqs,
req               351 drivers/clk/pxa/clk-pxa27x.c 					  struct clk_rate_request *req)
req               353 drivers/clk/pxa/clk-pxa27x.c 	return __clk_mux_determine_rate(hw, req);
req               126 drivers/clk/qcom/clk-pll.c clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
req               131 drivers/clk/qcom/clk-pll.c 	f = find_freq(pll->freq_tbl, req->rate);
req               133 drivers/clk/qcom/clk-pll.c 		req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate);
req               135 drivers/clk/qcom/clk-pll.c 		req->rate = f->freq;
req               400 drivers/clk/qcom/clk-rcg.c 		struct clk_rate_request *req,
req               403 drivers/clk/qcom/clk-rcg.c 	unsigned long clk_flags, rate = req->rate;
req               428 drivers/clk/qcom/clk-rcg.c 	req->best_parent_hw = p;
req               429 drivers/clk/qcom/clk-rcg.c 	req->best_parent_rate = rate;
req               430 drivers/clk/qcom/clk-rcg.c 	req->rate = f->freq;
req               436 drivers/clk/qcom/clk-rcg.c 				  struct clk_rate_request *req)
req               440 drivers/clk/qcom/clk-rcg.c 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req,
req               445 drivers/clk/qcom/clk-rcg.c 				      struct clk_rate_request *req)
req               456 drivers/clk/qcom/clk-rcg.c 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map);
req               460 drivers/clk/qcom/clk-rcg.c 					 struct clk_rate_request *req)
req               467 drivers/clk/qcom/clk-rcg.c 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
req               468 drivers/clk/qcom/clk-rcg.c 	req->best_parent_rate = clk_hw_round_rate(p, req->rate);
req               469 drivers/clk/qcom/clk-rcg.c 	req->rate = req->best_parent_rate;
req               538 drivers/clk/qcom/clk-rcg.c 				struct clk_rate_request *req)
req               542 drivers/clk/qcom/clk-rcg.c 	p = req->best_parent_hw;
req               543 drivers/clk/qcom/clk-rcg.c 	req->best_parent_rate = clk_hw_round_rate(p, req->rate);
req               544 drivers/clk/qcom/clk-rcg.c 	req->rate = req->best_parent_rate;
req               594 drivers/clk/qcom/clk-rcg.c 		struct clk_rate_request *req)
req               601 drivers/clk/qcom/clk-rcg.c 		request = (req->rate * frac->den) / frac->num;
req               603 drivers/clk/qcom/clk-rcg.c 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
req               609 drivers/clk/qcom/clk-rcg.c 		req->best_parent_rate = src_rate;
req               610 drivers/clk/qcom/clk-rcg.c 		req->rate = (src_rate * frac->num) / frac->den;
req               668 drivers/clk/qcom/clk-rcg.c 		struct clk_rate_request *req)
req               675 drivers/clk/qcom/clk-rcg.c 	if (req->rate == 0)
req               678 drivers/clk/qcom/clk-rcg.c 	src_rate = clk_hw_get_rate(req->best_parent_hw);
req               680 drivers/clk/qcom/clk-rcg.c 	div = src_rate / req->rate;
req               683 drivers/clk/qcom/clk-rcg.c 		req->best_parent_rate = src_rate;
req               684 drivers/clk/qcom/clk-rcg.c 		req->rate = src_rate / div;
req               192 drivers/clk/qcom/clk-rcg2.c 				    struct clk_rate_request *req,
req               195 drivers/clk/qcom/clk-rcg2.c 	unsigned long clk_flags, rate = req->rate;
req               227 drivers/clk/qcom/clk-rcg2.c 				rate = req->rate;
req               241 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_hw = p;
req               242 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_rate = rate;
req               243 drivers/clk/qcom/clk-rcg2.c 	req->rate = f->freq;
req               249 drivers/clk/qcom/clk-rcg2.c 				   struct clk_rate_request *req)
req               253 drivers/clk/qcom/clk-rcg2.c 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
req               257 drivers/clk/qcom/clk-rcg2.c 					 struct clk_rate_request *req)
req               261 drivers/clk/qcom/clk-rcg2.c 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
req               455 drivers/clk/qcom/clk-rcg2.c 					struct clk_rate_request *req)
req               467 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
req               468 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
req               470 drivers/clk/qcom/clk-rcg2.c 	if (req->best_parent_rate == 810000000)
req               476 drivers/clk/qcom/clk-rcg2.c 		request = req->rate;
req               479 drivers/clk/qcom/clk-rcg2.c 		if ((req->best_parent_rate < (request - delta)) ||
req               480 drivers/clk/qcom/clk-rcg2.c 		    (req->best_parent_rate > (request + delta)))
req               488 drivers/clk/qcom/clk-rcg2.c 		req->rate = calc_rate(req->best_parent_rate,
req               509 drivers/clk/qcom/clk-rcg2.c 				   struct clk_rate_request *req)
req               518 drivers/clk/qcom/clk-rcg2.c 	if (req->rate == 0)
req               521 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
req               522 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
req               524 drivers/clk/qcom/clk-rcg2.c 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
req               527 drivers/clk/qcom/clk-rcg2.c 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
req               567 drivers/clk/qcom/clk-rcg2.c 				    struct clk_rate_request *req)
req               573 drivers/clk/qcom/clk-rcg2.c 	unsigned long rate = req->rate;
req               578 drivers/clk/qcom/clk-rcg2.c 	p = req->best_parent_hw;
req               579 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
req               584 drivers/clk/qcom/clk-rcg2.c 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
req               645 drivers/clk/qcom/clk-rcg2.c 				    struct clk_rate_request *req)
req               652 drivers/clk/qcom/clk-rcg2.c 		request = (req->rate * frac->den) / frac->num;
req               654 drivers/clk/qcom/clk-rcg2.c 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
req               659 drivers/clk/qcom/clk-rcg2.c 		req->best_parent_rate = src_rate;
req               660 drivers/clk/qcom/clk-rcg2.c 		req->rate = (src_rate * frac->num) / frac->den;
req               727 drivers/clk/qcom/clk-rcg2.c 				    struct clk_rate_request *req)
req               735 drivers/clk/qcom/clk-rcg2.c 	if (req->rate == clk_hw_get_rate(xo)) {
req               736 drivers/clk/qcom/clk-rcg2.c 		req->best_parent_hw = xo;
req               747 drivers/clk/qcom/clk-rcg2.c 	parent_req.rate = req->rate = min(req->rate, p9_rate);
req               748 drivers/clk/qcom/clk-rcg2.c 	if (req->rate == p9_rate) {
req               749 drivers/clk/qcom/clk-rcg2.c 		req->rate = req->best_parent_rate = p9_rate;
req               750 drivers/clk/qcom/clk-rcg2.c 		req->best_parent_hw = p9;
req               754 drivers/clk/qcom/clk-rcg2.c 	if (req->best_parent_hw == p9) {
req               756 drivers/clk/qcom/clk-rcg2.c 		if (clk_hw_get_rate(p8) == req->rate)
req               757 drivers/clk/qcom/clk-rcg2.c 			req->best_parent_hw = p8;
req               759 drivers/clk/qcom/clk-rcg2.c 			req->best_parent_hw = p2;
req               760 drivers/clk/qcom/clk-rcg2.c 	} else if (req->best_parent_hw == p8) {
req               761 drivers/clk/qcom/clk-rcg2.c 		req->best_parent_hw = p2;
req               763 drivers/clk/qcom/clk-rcg2.c 		req->best_parent_hw = p8;
req               766 drivers/clk/qcom/clk-rcg2.c 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
req               770 drivers/clk/qcom/clk-rcg2.c 	req->rate = req->best_parent_rate = parent_req.rate;
req              1018 drivers/clk/qcom/clk-rcg2.c 				   struct clk_rate_request *req)
req              1032 drivers/clk/qcom/clk-rcg2.c 	return clk_rcg2_determine_rate(hw, req);
req                82 drivers/clk/qcom/clk-regmap-mux-div.c static inline bool is_better_rate(unsigned long req, unsigned long best,
req                85 drivers/clk/qcom/clk-regmap-mux-div.c 	return (req <= new && new < best) || (best < req && best < new);
req                89 drivers/clk/qcom/clk-regmap-mux-div.c 				  struct clk_rate_request *req)
req                94 drivers/clk/qcom/clk-regmap-mux-div.c 	unsigned long req_rate = req->rate;
req               108 drivers/clk/qcom/clk-regmap-mux-div.c 				req->rate = best_rate;
req               109 drivers/clk/qcom/clk-regmap-mux-div.c 				req->best_parent_rate = parent_rate;
req               110 drivers/clk/qcom/clk-regmap-mux-div.c 				req->best_parent_hw = parent;
req               157 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm_req req = {
req               164 drivers/clk/qcom/clk-smd-rpm.c 				 r->rpm_res_type, r->rpm_clk_id, &req,
req               165 drivers/clk/qcom/clk-smd-rpm.c 				 sizeof(req));
req               169 drivers/clk/qcom/clk-smd-rpm.c 				 r->rpm_res_type, r->rpm_clk_id, &req,
req               170 drivers/clk/qcom/clk-smd-rpm.c 				 sizeof(req));
req               180 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm_req req = {
req               187 drivers/clk/qcom/clk-smd-rpm.c 				  r->rpm_res_type, r->rpm_clk_id, &req,
req               188 drivers/clk/qcom/clk-smd-rpm.c 				  sizeof(req));
req               194 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm_req req = {
req               201 drivers/clk/qcom/clk-smd-rpm.c 				  r->rpm_res_type, r->rpm_clk_id, &req,
req               202 drivers/clk/qcom/clk-smd-rpm.c 				  sizeof(req));
req               370 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm_req req = {
req               378 drivers/clk/qcom/clk-smd-rpm.c 				 QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
req               386 drivers/clk/qcom/clk-smd-rpm.c 				 QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
req                80 drivers/clk/sunxi-ng/ccu_div.c 				struct clk_rate_request *req)
req                85 drivers/clk/sunxi-ng/ccu_div.c 					     req, ccu_div_round_rate, cd);
req               177 drivers/clk/sunxi-ng/ccu_mp.c 				 struct clk_rate_request *req)
req               182 drivers/clk/sunxi-ng/ccu_mp.c 					     req, ccu_mp_round_rate, cmp);
req               279 drivers/clk/sunxi-ng/ccu_mp.c 				     struct clk_rate_request *req)
req               287 drivers/clk/sunxi-ng/ccu_mp.c 		req->rate *= 2;
req               288 drivers/clk/sunxi-ng/ccu_mp.c 		req->min_rate *= 2;
req               289 drivers/clk/sunxi-ng/ccu_mp.c 		req->max_rate *= 2;
req               292 drivers/clk/sunxi-ng/ccu_mp.c 	ret = ccu_mp_determine_rate(hw, req);
req               296 drivers/clk/sunxi-ng/ccu_mp.c 		req->rate /= 2;
req               297 drivers/clk/sunxi-ng/ccu_mp.c 		req->min_rate /= 2;
req               298 drivers/clk/sunxi-ng/ccu_mp.c 		req->max_rate /= 2;
req                95 drivers/clk/sunxi-ng/ccu_mult.c 				struct clk_rate_request *req)
req               100 drivers/clk/sunxi-ng/ccu_mult.c 					     req, ccu_mult_round_rate, cm);
req                78 drivers/clk/sunxi-ng/ccu_mux.c 				  struct clk_rate_request *req,
req                99 drivers/clk/sunxi-ng/ccu_mux.c 				  req->rate, data);
req               123 drivers/clk/sunxi-ng/ccu_mux.c 		tmp_rate = round(cm, parent, &parent_rate, req->rate, data);
req               132 drivers/clk/sunxi-ng/ccu_mux.c 		if (tmp_rate == req->rate) {
req               139 drivers/clk/sunxi-ng/ccu_mux.c 		if ((req->rate - tmp_rate) < (req->rate - best_rate)) {
req               150 drivers/clk/sunxi-ng/ccu_mux.c 	req->best_parent_hw = best_parent;
req               151 drivers/clk/sunxi-ng/ccu_mux.c 	req->best_parent_rate = best_parent_rate;
req               152 drivers/clk/sunxi-ng/ccu_mux.c 	req->rate = best_rate;
req                91 drivers/clk/sunxi-ng/ccu_mux.h 				  struct clk_rate_request *req,
req               136 drivers/clk/sunxi-ng/ccu_nkm.c 				  struct clk_rate_request *req)
req               141 drivers/clk/sunxi-ng/ccu_nkm.c 					     req, ccu_nkm_round_rate, nkm);
req                89 drivers/clk/sunxi/clk-factors.c 				      struct clk_rate_request *req)
req               100 drivers/clk/sunxi/clk-factors.c 			.rate = req->rate,
req               107 drivers/clk/sunxi/clk-factors.c 			parent_rate = clk_hw_round_rate(parent, req->rate);
req               115 drivers/clk/sunxi/clk-factors.c 		if (child_rate <= req->rate && child_rate > best_child_rate) {
req               125 drivers/clk/sunxi/clk-factors.c 	req->best_parent_hw = best_parent;
req               126 drivers/clk/sunxi/clk-factors.c 	req->best_parent_rate = best;
req               127 drivers/clk/sunxi/clk-factors.c 	req->rate = best_child_rate;
req               135 drivers/clk/sunxi/clk-factors.c 	struct factors_request req = {
req               144 drivers/clk/sunxi/clk-factors.c 	factors->get_factors(&req);
req               153 drivers/clk/sunxi/clk-factors.c 	reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n);
req               154 drivers/clk/sunxi/clk-factors.c 	reg = FACTOR_SET(config->kshift, config->kwidth, reg, req.k);
req               155 drivers/clk/sunxi/clk-factors.c 	reg = FACTOR_SET(config->mshift, config->mwidth, reg, req.m);
req               156 drivers/clk/sunxi/clk-factors.c 	reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
req                37 drivers/clk/sunxi/clk-factors.h 	void (*getter)(struct factors_request *req);
req                38 drivers/clk/sunxi/clk-factors.h 	void (*recalc)(struct factors_request *req);
req                46 drivers/clk/sunxi/clk-factors.h 	void (*get_factors)(struct factors_request *req);
req                47 drivers/clk/sunxi/clk-factors.h 	void (*recalc)(struct factors_request *req);
req                23 drivers/clk/sunxi/clk-mod0.c static void sun4i_a10_get_mod0_factors(struct factors_request *req)
req                29 drivers/clk/sunxi/clk-mod0.c 	if (req->rate > req->parent_rate)
req                30 drivers/clk/sunxi/clk-mod0.c 		req->rate = req->parent_rate;
req                32 drivers/clk/sunxi/clk-mod0.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req                45 drivers/clk/sunxi/clk-mod0.c 	req->rate = (req->parent_rate >> calcp) / calcm;
req                46 drivers/clk/sunxi/clk-mod0.c 	req->m = calcm - 1;
req                47 drivers/clk/sunxi/clk-mod0.c 	req->p = calcp;
req               136 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c 				   struct clk_rate_request *req)
req               152 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c 		tmp_rate = tcon_ch1_calc_divider(req->rate, parent_rate,
req               156 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c 		    (req->rate - tmp_rate) < (req->rate - best_rate)) {
req               158 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c 			req->best_parent_rate = parent_rate;
req               159 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c 			req->best_parent_hw = parent;
req               166 drivers/clk/sunxi/clk-sun4i-tcon-ch1.c 	req->rate = best_rate;
req                25 drivers/clk/sunxi/clk-sun6i-ar100.c static void sun6i_get_ar100_factors(struct factors_request *req)
req                31 drivers/clk/sunxi/clk-sun6i-ar100.c 	if (req->rate > req->parent_rate)
req                32 drivers/clk/sunxi/clk-sun6i-ar100.c 		req->rate = req->parent_rate;
req                34 drivers/clk/sunxi/clk-sun6i-ar100.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req                50 drivers/clk/sunxi/clk-sun6i-ar100.c 	req->rate = (req->parent_rate >> shift) / div;
req                51 drivers/clk/sunxi/clk-sun6i-ar100.c 	req->m = div - 1;
req                52 drivers/clk/sunxi/clk-sun6i-ar100.c 	req->p = shift;
req                26 drivers/clk/sunxi/clk-sun9i-core.c static void sun9i_a80_get_pll4_factors(struct factors_request *req)
req                33 drivers/clk/sunxi/clk-sun9i-core.c 	n = DIV_ROUND_UP(req->rate, 6000000);
req                53 drivers/clk/sunxi/clk-sun9i-core.c 	req->rate = ((24000000 * n) >> p) / (m + 1);
req                54 drivers/clk/sunxi/clk-sun9i-core.c 	req->n = n;
req                55 drivers/clk/sunxi/clk-sun9i-core.c 	req->m = m;
req                56 drivers/clk/sunxi/clk-sun9i-core.c 	req->p = p;
req                99 drivers/clk/sunxi/clk-sun9i-core.c static void sun9i_a80_get_gt_factors(struct factors_request *req)
req               103 drivers/clk/sunxi/clk-sun9i-core.c 	if (req->parent_rate < req->rate)
req               104 drivers/clk/sunxi/clk-sun9i-core.c 		req->rate = req->parent_rate;
req               106 drivers/clk/sunxi/clk-sun9i-core.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req               112 drivers/clk/sunxi/clk-sun9i-core.c 	req->rate = req->parent_rate / div;
req               113 drivers/clk/sunxi/clk-sun9i-core.c 	req->m = div;
req               154 drivers/clk/sunxi/clk-sun9i-core.c static void sun9i_a80_get_ahb_factors(struct factors_request *req)
req               158 drivers/clk/sunxi/clk-sun9i-core.c 	if (req->parent_rate < req->rate)
req               159 drivers/clk/sunxi/clk-sun9i-core.c 		req->rate = req->parent_rate;
req               161 drivers/clk/sunxi/clk-sun9i-core.c 	_p = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
req               167 drivers/clk/sunxi/clk-sun9i-core.c 	req->rate = req->parent_rate >> _p;
req               168 drivers/clk/sunxi/clk-sun9i-core.c 	req->p = _p;
req               234 drivers/clk/sunxi/clk-sun9i-core.c static void sun9i_a80_get_apb1_factors(struct factors_request *req)
req               238 drivers/clk/sunxi/clk-sun9i-core.c 	if (req->parent_rate < req->rate)
req               239 drivers/clk/sunxi/clk-sun9i-core.c 		req->rate = req->parent_rate;
req               241 drivers/clk/sunxi/clk-sun9i-core.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req               247 drivers/clk/sunxi/clk-sun9i-core.c 	req->p = order_base_2(div);
req               248 drivers/clk/sunxi/clk-sun9i-core.c 	req->m = (req->parent_rate >> req->p) - 1;
req               249 drivers/clk/sunxi/clk-sun9i-core.c 	req->rate = (req->parent_rate >> req->p) / (req->m + 1);
req               114 drivers/clk/sunxi/clk-sun9i-cpus.c 					     struct clk_rate_request *req)
req               119 drivers/clk/sunxi/clk-sun9i-cpus.c 	unsigned long rate = req->rate;
req               145 drivers/clk/sunxi/clk-sun9i-cpus.c 	req->best_parent_hw = best_parent;
req               146 drivers/clk/sunxi/clk-sun9i-cpus.c 	req->best_parent_rate = best;
req               147 drivers/clk/sunxi/clk-sun9i-cpus.c 	req->rate = best_child_rate;
req                33 drivers/clk/sunxi/clk-sunxi.c static void sun4i_get_pll1_factors(struct factors_request *req)
req                38 drivers/clk/sunxi/clk-sunxi.c 	div = req->rate / 6000000;
req                39 drivers/clk/sunxi/clk-sunxi.c 	req->rate = 6000000 * div;
req                42 drivers/clk/sunxi/clk-sunxi.c 	req->m = 0;
req                45 drivers/clk/sunxi/clk-sunxi.c 	if (req->rate >= 768000000 || req->rate == 42000000 ||
req                46 drivers/clk/sunxi/clk-sunxi.c 			req->rate == 54000000)
req                47 drivers/clk/sunxi/clk-sunxi.c 		req->k = 1;
req                49 drivers/clk/sunxi/clk-sunxi.c 		req->k = 0;
req                53 drivers/clk/sunxi/clk-sunxi.c 		req->p = 3;
req                57 drivers/clk/sunxi/clk-sunxi.c 		req->p = 2;
req                62 drivers/clk/sunxi/clk-sunxi.c 		req->p = 1;
req                66 drivers/clk/sunxi/clk-sunxi.c 		req->p = 0;
req                69 drivers/clk/sunxi/clk-sunxi.c 	div <<= req->p;
req                70 drivers/clk/sunxi/clk-sunxi.c 	div /= (req->k + 1);
req                71 drivers/clk/sunxi/clk-sunxi.c 	req->n = div / 4;
req                80 drivers/clk/sunxi/clk-sunxi.c static void sun6i_a31_get_pll1_factors(struct factors_request *req)
req                86 drivers/clk/sunxi/clk-sunxi.c 	u32 freq_mhz = req->rate / 1000000;
req                87 drivers/clk/sunxi/clk-sunxi.c 	u32 parent_freq_mhz = req->parent_rate / 1000000;
req               101 drivers/clk/sunxi/clk-sunxi.c 	req->rate = freq_mhz * 1000000;
req               105 drivers/clk/sunxi/clk-sunxi.c 		req->k = 3;
req               108 drivers/clk/sunxi/clk-sunxi.c 		req->k = 2;
req               111 drivers/clk/sunxi/clk-sunxi.c 		req->k = 1;
req               114 drivers/clk/sunxi/clk-sunxi.c 		req->k = 0;
req               125 drivers/clk/sunxi/clk-sunxi.c 		req->m = 2;
req               131 drivers/clk/sunxi/clk-sunxi.c 		req->m = 3;
req               134 drivers/clk/sunxi/clk-sunxi.c 		req->m = 1;
req               137 drivers/clk/sunxi/clk-sunxi.c 	req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz)
req               144 drivers/clk/sunxi/clk-sunxi.c 	if ((req->n + 1) > 31 && (req->m + 1) > 1) {
req               145 drivers/clk/sunxi/clk-sunxi.c 		req->n = (req->n + 1) / 2 - 1;
req               146 drivers/clk/sunxi/clk-sunxi.c 		req->m = (req->m + 1) / 2 - 1;
req               157 drivers/clk/sunxi/clk-sunxi.c static void sun8i_a23_get_pll1_factors(struct factors_request *req)
req               162 drivers/clk/sunxi/clk-sunxi.c 	div = req->rate / 6000000;
req               163 drivers/clk/sunxi/clk-sunxi.c 	req->rate = 6000000 * div;
req               166 drivers/clk/sunxi/clk-sunxi.c 	req->m = 0;
req               169 drivers/clk/sunxi/clk-sunxi.c 	if (req->rate >= 768000000 || req->rate == 42000000 ||
req               170 drivers/clk/sunxi/clk-sunxi.c 			req->rate == 54000000)
req               171 drivers/clk/sunxi/clk-sunxi.c 		req->k = 1;
req               173 drivers/clk/sunxi/clk-sunxi.c 		req->k = 0;
req               177 drivers/clk/sunxi/clk-sunxi.c 		req->p = 2;
req               182 drivers/clk/sunxi/clk-sunxi.c 		req->p = 1;
req               186 drivers/clk/sunxi/clk-sunxi.c 		req->p = 0;
req               189 drivers/clk/sunxi/clk-sunxi.c 	div <<= req->p;
req               190 drivers/clk/sunxi/clk-sunxi.c 	div /= (req->k + 1);
req               191 drivers/clk/sunxi/clk-sunxi.c 	req->n = div / 4 - 1;
req               201 drivers/clk/sunxi/clk-sunxi.c static void sun4i_get_pll5_factors(struct factors_request *req)
req               206 drivers/clk/sunxi/clk-sunxi.c 	div = req->rate / req->parent_rate;
req               207 drivers/clk/sunxi/clk-sunxi.c 	req->rate = req->parent_rate * div;
req               210 drivers/clk/sunxi/clk-sunxi.c 		req->k = 0;
req               212 drivers/clk/sunxi/clk-sunxi.c 		req->k = 1;
req               214 drivers/clk/sunxi/clk-sunxi.c 		req->k = 2;
req               216 drivers/clk/sunxi/clk-sunxi.c 		req->k = 3;
req               218 drivers/clk/sunxi/clk-sunxi.c 	req->n = DIV_ROUND_UP(div, (req->k + 1));
req               228 drivers/clk/sunxi/clk-sunxi.c static void sun6i_a31_get_pll6_factors(struct factors_request *req)
req               233 drivers/clk/sunxi/clk-sunxi.c 	div = req->rate / req->parent_rate;
req               234 drivers/clk/sunxi/clk-sunxi.c 	req->rate = req->parent_rate * div;
req               236 drivers/clk/sunxi/clk-sunxi.c 	req->k = div / 32;
req               237 drivers/clk/sunxi/clk-sunxi.c 	if (req->k > 3)
req               238 drivers/clk/sunxi/clk-sunxi.c 		req->k = 3;
req               240 drivers/clk/sunxi/clk-sunxi.c 	req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
req               249 drivers/clk/sunxi/clk-sunxi.c static void sun5i_a13_get_ahb_factors(struct factors_request *req)
req               254 drivers/clk/sunxi/clk-sunxi.c 	if (req->parent_rate < req->rate)
req               255 drivers/clk/sunxi/clk-sunxi.c 		req->rate = req->parent_rate;
req               261 drivers/clk/sunxi/clk-sunxi.c 	if (req->rate < 8000)
req               262 drivers/clk/sunxi/clk-sunxi.c 		req->rate = 8000;
req               263 drivers/clk/sunxi/clk-sunxi.c 	if (req->rate > 300000000)
req               264 drivers/clk/sunxi/clk-sunxi.c 		req->rate = 300000000;
req               266 drivers/clk/sunxi/clk-sunxi.c 	div = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
req               272 drivers/clk/sunxi/clk-sunxi.c 	req->rate = req->parent_rate >> div;
req               274 drivers/clk/sunxi/clk-sunxi.c 	req->p = div;
req               288 drivers/clk/sunxi/clk-sunxi.c static void sun6i_get_ahb1_factors(struct factors_request *req)
req               296 drivers/clk/sunxi/clk-sunxi.c 	if (req->parent_rate && req->rate > req->parent_rate)
req               297 drivers/clk/sunxi/clk-sunxi.c 		req->rate = req->parent_rate;
req               299 drivers/clk/sunxi/clk-sunxi.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req               302 drivers/clk/sunxi/clk-sunxi.c 	if (req->parent_index == SUN6I_AHB1_PARENT_PLL6) {
req               318 drivers/clk/sunxi/clk-sunxi.c 	req->rate = (req->parent_rate / calcm) >> calcp;
req               319 drivers/clk/sunxi/clk-sunxi.c 	req->p = calcp;
req               320 drivers/clk/sunxi/clk-sunxi.c 	req->m = calcm - 1;
req               327 drivers/clk/sunxi/clk-sunxi.c static void sun6i_ahb1_recalc(struct factors_request *req)
req               329 drivers/clk/sunxi/clk-sunxi.c 	req->rate = req->parent_rate;
req               332 drivers/clk/sunxi/clk-sunxi.c 	if (req->parent_index == SUN6I_AHB1_PARENT_PLL6)
req               333 drivers/clk/sunxi/clk-sunxi.c 		req->rate /= req->m + 1;
req               336 drivers/clk/sunxi/clk-sunxi.c 	req->rate >>= req->p;
req               345 drivers/clk/sunxi/clk-sunxi.c static void sun4i_get_apb1_factors(struct factors_request *req)
req               350 drivers/clk/sunxi/clk-sunxi.c 	if (req->parent_rate < req->rate)
req               351 drivers/clk/sunxi/clk-sunxi.c 		req->rate = req->parent_rate;
req               353 drivers/clk/sunxi/clk-sunxi.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req               370 drivers/clk/sunxi/clk-sunxi.c 	req->rate = (req->parent_rate >> calcp) / (calcm + 1);
req               371 drivers/clk/sunxi/clk-sunxi.c 	req->m = calcm;
req               372 drivers/clk/sunxi/clk-sunxi.c 	req->p = calcp;
req               384 drivers/clk/sunxi/clk-sunxi.c static void sun7i_a20_get_out_factors(struct factors_request *req)
req               390 drivers/clk/sunxi/clk-sunxi.c 	if (req->rate > req->parent_rate)
req               391 drivers/clk/sunxi/clk-sunxi.c 		req->rate = req->parent_rate;
req               393 drivers/clk/sunxi/clk-sunxi.c 	div = DIV_ROUND_UP(req->parent_rate, req->rate);
req               406 drivers/clk/sunxi/clk-sunxi.c 	req->rate = (req->parent_rate >> calcp) / calcm;
req               407 drivers/clk/sunxi/clk-sunxi.c 	req->m = calcm - 1;
req               408 drivers/clk/sunxi/clk-sunxi.c 	req->p = calcp;
req              1136 drivers/clk/sunxi/clk-sunxi.c static void sun6i_display_factors(struct factors_request *req)
req              1140 drivers/clk/sunxi/clk-sunxi.c 	if (req->rate > req->parent_rate)
req              1141 drivers/clk/sunxi/clk-sunxi.c 		req->rate = req->parent_rate;
req              1143 drivers/clk/sunxi/clk-sunxi.c 	m = DIV_ROUND_UP(req->parent_rate, req->rate);
req              1145 drivers/clk/sunxi/clk-sunxi.c 	req->rate = req->parent_rate / m;
req              1146 drivers/clk/sunxi/clk-sunxi.c 	req->m = m - 1;
req                64 drivers/clk/tegra/clk-bpmp.c 	void *req = &request;
req                77 drivers/clk/tegra/clk-bpmp.c 	memcpy(req + 4, clk->tx.data, clk->tx.size);
req               827 drivers/clk/tegra/clk-dfll.c 				       struct dfll_rate_req *req,
req               838 drivers/clk/tegra/clk-dfll.c 	req->scale_bits = DFLL_FREQ_REQ_SCALE_MAX - 1;
req               849 drivers/clk/tegra/clk-dfll.c 		req->scale_bits = scale - 1;
req               860 drivers/clk/tegra/clk-dfll.c 	req->mult_bits = val;
req               861 drivers/clk/tegra/clk-dfll.c 	req->dvco_target_rate = MULT_TO_DVCO_RATE(req->mult_bits, td->ref_rate);
req               862 drivers/clk/tegra/clk-dfll.c 	req->rate = dfll_scale_dvco_rate(req->scale_bits,
req               863 drivers/clk/tegra/clk-dfll.c 					 req->dvco_target_rate);
req               864 drivers/clk/tegra/clk-dfll.c 	req->lut_index = find_lut_index_for_rate(td, req->dvco_target_rate);
req               865 drivers/clk/tegra/clk-dfll.c 	if (req->lut_index < 0)
req               866 drivers/clk/tegra/clk-dfll.c 		return req->lut_index;
req               880 drivers/clk/tegra/clk-dfll.c 				       struct dfll_rate_req *req)
req               886 drivers/clk/tegra/clk-dfll.c 	force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
req               889 drivers/clk/tegra/clk-dfll.c 	val |= req->mult_bits << DFLL_FREQ_REQ_MULT_SHIFT;
req               890 drivers/clk/tegra/clk-dfll.c 	val |= req->scale_bits << DFLL_FREQ_REQ_SCALE_SHIFT;
req               914 drivers/clk/tegra/clk-dfll.c 	struct dfll_rate_req req;
req               922 drivers/clk/tegra/clk-dfll.c 	ret = dfll_calculate_rate_request(td, &req, rate);
req               927 drivers/clk/tegra/clk-dfll.c 	td->last_req = req;
req              1017 drivers/clk/tegra/clk-dfll.c 	struct dfll_rate_req *req = &td->last_req;
req              1024 drivers/clk/tegra/clk-dfll.c 		if (req->rate == 0) {
req              1036 drivers/clk/tegra/clk-dfll.c 		dfll_set_frequency_request(td, req);
req              1133 drivers/clk/tegra/clk-dfll.c 	struct dfll_rate_req req;
req              1136 drivers/clk/tegra/clk-dfll.c 	ret = dfll_calculate_rate_request(td, &req, clk_req->rate);
req               112 drivers/clk/tegra/clk-emc.c static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
req               134 drivers/clk/tegra/clk-emc.c 		if (timing->rate < req->rate && i != t - 1)
req               137 drivers/clk/tegra/clk-emc.c 		if (timing->rate > req->max_rate) {
req               139 drivers/clk/tegra/clk-emc.c 			req->rate = tegra->timings[i - 1].rate;
req               143 drivers/clk/tegra/clk-emc.c 		if (timing->rate < req->min_rate)
req               146 drivers/clk/tegra/clk-emc.c 		req->rate = timing->rate;
req               151 drivers/clk/tegra/clk-emc.c 		req->rate = timing->rate;
req               155 drivers/clk/tegra/clk-emc.c 	req->rate = clk_hw_get_rate(hw);
req               108 drivers/clk/tegra/clk-sdmmc-mux.c 					struct clk_rate_request *req)
req               112 drivers/clk/tegra/clk-sdmmc-mux.c 	unsigned long output_rate = req->best_parent_rate;
req               114 drivers/clk/tegra/clk-sdmmc-mux.c 	req->rate = max(req->rate, req->min_rate);
req               115 drivers/clk/tegra/clk-sdmmc-mux.c 	req->rate = min(req->rate, req->max_rate);
req               117 drivers/clk/tegra/clk-sdmmc-mux.c 	if (!req->rate)
req               120 drivers/clk/tegra/clk-sdmmc-mux.c 	div = div_frac_get(req->rate, output_rate, 8, 1, sdmmc_mux->div_flags);
req               125 drivers/clk/tegra/clk-sdmmc-mux.c 		req->rate =  DIV_ROUND_UP(output_rate * SDMMC_MUL,
req               128 drivers/clk/tegra/clk-sdmmc-mux.c 		req->rate =  output_rate * SDMMC_MUL / (div + SDMMC_MUL);
req               283 drivers/clk/ti/clock.h 				      struct clk_rate_request *req);
req               311 drivers/clk/ti/clock.h 				       struct clk_rate_request *req);
req               500 drivers/clk/ti/dpll3xxx.c 				      struct clk_rate_request *req)
req               505 drivers/clk/ti/dpll3xxx.c 	if (!req->rate)
req               512 drivers/clk/ti/dpll3xxx.c 	if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
req               514 drivers/clk/ti/dpll3xxx.c 		req->best_parent_hw = dd->clk_bypass;
req               516 drivers/clk/ti/dpll3xxx.c 		req->rate = omap2_dpll_round_rate(hw, req->rate,
req               517 drivers/clk/ti/dpll3xxx.c 					  &req->best_parent_rate);
req               518 drivers/clk/ti/dpll3xxx.c 		req->best_parent_hw = dd->clk_ref;
req               521 drivers/clk/ti/dpll3xxx.c 	req->best_parent_rate = req->rate;
req               200 drivers/clk/ti/dpll44xx.c 				       struct clk_rate_request *req)
req               205 drivers/clk/ti/dpll44xx.c 	if (!req->rate)
req               212 drivers/clk/ti/dpll44xx.c 	if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
req               214 drivers/clk/ti/dpll44xx.c 		req->best_parent_hw = dd->clk_bypass;
req               216 drivers/clk/ti/dpll44xx.c 		req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
req               217 drivers/clk/ti/dpll44xx.c 						&req->best_parent_rate);
req               218 drivers/clk/ti/dpll44xx.c 		req->best_parent_hw = dd->clk_ref;
req               221 drivers/clk/ti/dpll44xx.c 	req->best_parent_rate = req->rate;
req               108 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_req *req = arg;
req               109 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu = req->cpu;
req               132 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_req *req = arg;
req               133 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu = req->cpu;
req               135 drivers/cpufreq/ia64-acpi-cpufreq.c 	int			ret, state = req->state;
req               178 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_req req;
req               181 drivers/cpufreq/ia64-acpi-cpufreq.c 	req.cpu = cpu;
req               182 drivers/cpufreq/ia64-acpi-cpufreq.c 	ret = work_on_cpu(cpu, processor_get_freq, &req);
req               193 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_req req;
req               195 drivers/cpufreq/ia64-acpi-cpufreq.c 	req.cpu = policy->cpu;
req               196 drivers/cpufreq/ia64-acpi-cpufreq.c 	req.state = index;
req               198 drivers/cpufreq/ia64-acpi-cpufreq.c 	return work_on_cpu(req.cpu, processor_set_freq, &req);
req              1092 drivers/cpufreq/intel_pstate.c 	struct freq_qos_request *req;
req              1102 drivers/cpufreq/intel_pstate.c 		req = policy->driver_data;
req              1105 drivers/cpufreq/intel_pstate.c 		if (!req)
req              1116 drivers/cpufreq/intel_pstate.c 			req++;
req              1123 drivers/cpufreq/intel_pstate.c 		if (freq_qos_update_request(req, freq) < 0)
req              2378 drivers/cpufreq/intel_pstate.c 	struct freq_qos_request *req;
req              2395 drivers/cpufreq/intel_pstate.c 	req = kcalloc(2, sizeof(*req), GFP_KERNEL);
req              2396 drivers/cpufreq/intel_pstate.c 	if (!req) {
req              2413 drivers/cpufreq/intel_pstate.c 	ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
req              2420 drivers/cpufreq/intel_pstate.c 	ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
req              2427 drivers/cpufreq/intel_pstate.c 	policy->driver_data = req;
req              2432 drivers/cpufreq/intel_pstate.c 	freq_qos_remove_request(req);
req              2434 drivers/cpufreq/intel_pstate.c 	kfree(req);
req              2443 drivers/cpufreq/intel_pstate.c 	struct freq_qos_request *req;
req              2445 drivers/cpufreq/intel_pstate.c 	req = policy->driver_data;
req              2447 drivers/cpufreq/intel_pstate.c 	freq_qos_remove_request(req + 1);
req              2448 drivers/cpufreq/intel_pstate.c 	freq_qos_remove_request(req);
req              2449 drivers/cpufreq/intel_pstate.c 	kfree(req);
req               234 drivers/cpufreq/pmac32-cpufreq.c 	struct adb_request req;
req               276 drivers/cpufreq/pmac32-cpufreq.c 	pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
req               277 drivers/cpufreq/pmac32-cpufreq.c 	while (!req.complete)
req                68 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	struct freq_qos_request *req;
req                87 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	req = policy->driver_data;
req                89 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	ret = freq_qos_update_request(req,
req               106 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	struct freq_qos_request *req;
req               112 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               113 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	if (!req)
req               116 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
req               120 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 		kfree(req);
req               124 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	policy->driver_data = req;
req               130 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	struct freq_qos_request *req = policy->driver_data;
req               133 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 		freq_qos_remove_request(req);
req               134 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 		kfree(req);
req               108 drivers/cpufreq/tegra186-cpufreq.c 	struct mrq_cpu_vhint_request req;
req               122 drivers/cpufreq/tegra186-cpufreq.c 	memset(&req, 0, sizeof(req));
req               123 drivers/cpufreq/tegra186-cpufreq.c 	req.addr = phys;
req               124 drivers/cpufreq/tegra186-cpufreq.c 	req.cluster_id = cluster_id;
req               128 drivers/cpufreq/tegra186-cpufreq.c 	msg.tx.data = &req;
req               129 drivers/cpufreq/tegra186-cpufreq.c 	msg.tx.size = sizeof(req);
req                69 drivers/crypto/amcc/crypto4xx_alg.c static inline int crypto4xx_crypt(struct skcipher_request *req,
req                73 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req                77 drivers/crypto/amcc/crypto4xx_alg.c 	if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
req                81 drivers/crypto/amcc/crypto4xx_alg.c 		crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
req                83 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req                84 drivers/crypto/amcc/crypto4xx_alg.c 		req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
req                88 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
req                90 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt(req, 0, false, true);
req                93 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
req                95 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
req                98 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
req               100 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt(req, 0, true, true);
req               103 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
req               105 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
req               108 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
req               110 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
req               113 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
req               115 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
req               225 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_rfc3686_encrypt(struct skcipher_request *req)
req               227 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req               231 drivers/crypto/amcc/crypto4xx_alg.c 		cpu_to_le32p((u32 *) req->iv),
req               232 drivers/crypto/amcc/crypto4xx_alg.c 		cpu_to_le32p((u32 *) (req->iv + 4)),
req               235 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req               236 drivers/crypto/amcc/crypto4xx_alg.c 				  req->cryptlen, iv, AES_IV_SIZE,
req               240 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
req               242 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req               246 drivers/crypto/amcc/crypto4xx_alg.c 		cpu_to_le32p((u32 *) req->iv),
req               247 drivers/crypto/amcc/crypto4xx_alg.c 		cpu_to_le32p((u32 *) (req->iv + 4)),
req               250 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req               251 drivers/crypto/amcc/crypto4xx_alg.c 				  req->cryptlen, iv, AES_IV_SIZE,
req               256 drivers/crypto/amcc/crypto4xx_alg.c crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
req               258 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req               261 drivers/crypto/amcc/crypto4xx_alg.c 	unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
req               262 drivers/crypto/amcc/crypto4xx_alg.c 	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
req               276 drivers/crypto/amcc/crypto4xx_alg.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               278 drivers/crypto/amcc/crypto4xx_alg.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               279 drivers/crypto/amcc/crypto4xx_alg.c 			req->cryptlen, req->iv);
req               286 drivers/crypto/amcc/crypto4xx_alg.c 	return encrypt ? crypto4xx_encrypt_iv_stream(req)
req               287 drivers/crypto/amcc/crypto4xx_alg.c 		       : crypto4xx_decrypt_iv_stream(req);
req               324 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_encrypt_ctr(struct skcipher_request *req)
req               326 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_ctr_crypt(req, true);
req               329 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_decrypt_ctr(struct skcipher_request *req)
req               331 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_ctr_crypt(req, false);
req               334 drivers/crypto/amcc/crypto4xx_alg.c static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
req               338 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               352 drivers/crypto/amcc/crypto4xx_alg.c 	if (req->assoclen & 0x3 || req->assoclen > 1020)
req               356 drivers/crypto/amcc/crypto4xx_alg.c 	if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
req               362 drivers/crypto/amcc/crypto4xx_alg.c static int crypto4xx_aead_fallback(struct aead_request *req,
req               365 drivers/crypto/amcc/crypto4xx_alg.c 	struct aead_request *subreq = aead_request_ctx(req);
req               368 drivers/crypto/amcc/crypto4xx_alg.c 	aead_request_set_callback(subreq, req->base.flags,
req               369 drivers/crypto/amcc/crypto4xx_alg.c 				  req->base.complete, req->base.data);
req               370 drivers/crypto/amcc/crypto4xx_alg.c 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req               371 drivers/crypto/amcc/crypto4xx_alg.c 			       req->iv);
req               372 drivers/crypto/amcc/crypto4xx_alg.c 	aead_request_set_ad(subreq, req->assoclen);
req               460 drivers/crypto/amcc/crypto4xx_alg.c static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
req               462 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_ctx *ctx  = crypto_tfm_ctx(req->base.tfm);
req               463 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
req               464 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               468 drivers/crypto/amcc/crypto4xx_alg.c 	unsigned int len = req->cryptlen;
req               473 drivers/crypto/amcc/crypto4xx_alg.c 	if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
req               474 drivers/crypto/amcc/crypto4xx_alg.c 		return crypto4xx_aead_fallback(req, ctx, decrypt);
req               479 drivers/crypto/amcc/crypto4xx_alg.c 	if (req->iv[0] == 1) {
req               485 drivers/crypto/amcc/crypto4xx_alg.c 	crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
req               487 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req               489 drivers/crypto/amcc/crypto4xx_alg.c 				  sa, ctx->sa_len, req->assoclen, rctx->dst);
req               492 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
req               494 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt_aes_ccm(req, false);
req               497 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
req               499 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt_aes_ccm(req, true);
req               608 drivers/crypto/amcc/crypto4xx_alg.c static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
req               611 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               612 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
req               614 drivers/crypto/amcc/crypto4xx_alg.c 	unsigned int len = req->cryptlen;
req               617 drivers/crypto/amcc/crypto4xx_alg.c 		len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
req               619 drivers/crypto/amcc/crypto4xx_alg.c 	if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
req               620 drivers/crypto/amcc/crypto4xx_alg.c 		return crypto4xx_aead_fallback(req, ctx, decrypt);
req               622 drivers/crypto/amcc/crypto4xx_alg.c 	crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
req               625 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req               628 drivers/crypto/amcc/crypto4xx_alg.c 				  ctx->sa_len, req->assoclen, rctx->dst);
req               631 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
req               633 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt_aes_gcm(req, false);
req               636 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
req               638 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_crypt_aes_gcm(req, true);
req               687 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_hash_init(struct ahash_request *req)
req               689 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               695 drivers/crypto/amcc/crypto4xx_alg.c 			__crypto_ahash_cast(req->base.tfm));
req               702 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_hash_update(struct ahash_request *req)
req               704 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               705 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               709 drivers/crypto/amcc/crypto4xx_alg.c 	sg_init_one(&dst, req->result, ds);
req               711 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req               712 drivers/crypto/amcc/crypto4xx_alg.c 				  req->nbytes, NULL, 0, ctx->sa_in,
req               716 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_hash_final(struct ahash_request *req)
req               721 drivers/crypto/amcc/crypto4xx_alg.c int crypto4xx_hash_digest(struct ahash_request *req)
req               723 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               724 drivers/crypto/amcc/crypto4xx_alg.c 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               728 drivers/crypto/amcc/crypto4xx_alg.c 	sg_init_one(&dst, req->result, ds);
req               730 drivers/crypto/amcc/crypto4xx_alg.c 	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req               731 drivers/crypto/amcc/crypto4xx_alg.c 				  req->nbytes, NULL, 0, ctx->sa_in,
req               522 drivers/crypto/amcc/crypto4xx_core.c 	struct skcipher_request *req;
req               526 drivers/crypto/amcc/crypto4xx_core.c 	req = skcipher_request_cast(pd_uinfo->async_req);
req               530 drivers/crypto/amcc/crypto4xx_core.c 					  req->cryptlen, req->dst);
req               538 drivers/crypto/amcc/crypto4xx_core.c 		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               540 drivers/crypto/amcc/crypto4xx_core.c 		crypto4xx_memcpy_from_le32((u32 *)req->iv,
req               548 drivers/crypto/amcc/crypto4xx_core.c 		skcipher_request_complete(req, -EINPROGRESS);
req               549 drivers/crypto/amcc/crypto4xx_core.c 	skcipher_request_complete(req, 0);
req               678 drivers/crypto/amcc/crypto4xx_core.c int crypto4xx_build_pd(struct crypto_async_request *req,
req               763 drivers/crypto/amcc/crypto4xx_core.c 	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
req               815 drivers/crypto/amcc/crypto4xx_core.c 	pd_uinfo->async_req = req;
req               919 drivers/crypto/amcc/crypto4xx_core.c 		((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
req               920 drivers/crypto/amcc/crypto4xx_core.c 		 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
req               152 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_build_pd(struct crypto_async_request *req,
req               174 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_encrypt_ctr(struct skcipher_request *req);
req               175 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_decrypt_ctr(struct skcipher_request *req);
req               176 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
req               177 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
req               178 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
req               179 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
req               180 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
req               181 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
req               182 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
req               183 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
req               185 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_hash_digest(struct ahash_request *req);
req               186 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_hash_final(struct ahash_request *req);
req               187 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_hash_update(struct ahash_request *req);
req               188 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_hash_init(struct ahash_request *req);
req               237 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
req               238 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
req               241 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
req               242 drivers/crypto/amcc/crypto4xx_core.h int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
req               494 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
req               495 drivers/crypto/atmel-aes.c 	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               496 drivers/crypto/atmel-aes.c 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
req               499 drivers/crypto/atmel-aes.c 	if (req->nbytes < ivsize)
req               503 drivers/crypto/atmel-aes.c 		scatterwalk_map_and_copy(req->info, req->dst,
req               504 drivers/crypto/atmel-aes.c 					 req->nbytes - ivsize, ivsize, 0);
req               506 drivers/crypto/atmel-aes.c 		if (req->src == req->dst)
req               507 drivers/crypto/atmel-aes.c 			memcpy(req->info, rctx->lastc, ivsize);
req               509 drivers/crypto/atmel-aes.c 			scatterwalk_map_and_copy(req->info, req->src,
req               510 drivers/crypto/atmel-aes.c 						 req->nbytes - ivsize,
req               983 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
req               984 drivers/crypto/atmel-aes.c 	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               985 drivers/crypto/atmel-aes.c 	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
req               995 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, use_dma, req->info);
req               997 drivers/crypto/atmel-aes.c 		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
req              1000 drivers/crypto/atmel-aes.c 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
req              1013 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
req              1022 drivers/crypto/atmel-aes.c 	if (ctx->offset >= req->nbytes)
req              1026 drivers/crypto/atmel-aes.c 	datalen = req->nbytes - ctx->offset;
req              1043 drivers/crypto/atmel-aes.c 	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
req              1044 drivers/crypto/atmel-aes.c 	dst = ((req->src == req->dst) ? src :
req              1045 drivers/crypto/atmel-aes.c 	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
req              1069 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
req              1070 drivers/crypto/atmel-aes.c 	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req              1079 drivers/crypto/atmel-aes.c 	memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
req              1085 drivers/crypto/atmel-aes.c static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
req              1087 drivers/crypto/atmel-aes.c 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
req              1119 drivers/crypto/atmel-aes.c 	rctx = ablkcipher_request_ctx(req);
req              1122 drivers/crypto/atmel-aes.c 	if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
req              1125 drivers/crypto/atmel-aes.c 		if (req->nbytes >= ivsize)
req              1126 drivers/crypto/atmel-aes.c 			scatterwalk_map_and_copy(rctx->lastc, req->src,
req              1127 drivers/crypto/atmel-aes.c 						 req->nbytes - ivsize,
req              1131 drivers/crypto/atmel-aes.c 	return atmel_aes_handle_queue(dd, &req->base);
req              1152 drivers/crypto/atmel-aes.c static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
req              1154 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
req              1157 drivers/crypto/atmel-aes.c static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
req              1159 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_ECB);
req              1162 drivers/crypto/atmel-aes.c static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
req              1164 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
req              1167 drivers/crypto/atmel-aes.c static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
req              1169 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CBC);
req              1172 drivers/crypto/atmel-aes.c static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
req              1174 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
req              1177 drivers/crypto/atmel-aes.c static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
req              1179 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_OFB);
req              1182 drivers/crypto/atmel-aes.c static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
req              1184 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
req              1187 drivers/crypto/atmel-aes.c static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
req              1189 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
req              1192 drivers/crypto/atmel-aes.c static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
req              1194 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
req              1197 drivers/crypto/atmel-aes.c static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
req              1199 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
req              1202 drivers/crypto/atmel-aes.c static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
req              1204 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
req              1207 drivers/crypto/atmel-aes.c static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
req              1209 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
req              1212 drivers/crypto/atmel-aes.c static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
req              1214 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
req              1217 drivers/crypto/atmel-aes.c static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
req              1219 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
req              1222 drivers/crypto/atmel-aes.c static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
req              1224 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
req              1227 drivers/crypto/atmel-aes.c static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
req              1229 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
req              1232 drivers/crypto/atmel-aes.c static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
req              1234 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
req              1237 drivers/crypto/atmel-aes.c static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
req              1239 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_CTR);
req              1530 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1531 drivers/crypto/atmel-aes.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1532 drivers/crypto/atmel-aes.c 	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
req              1535 drivers/crypto/atmel-aes.c 	const void *iv = req->iv;
req              1567 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1568 drivers/crypto/atmel-aes.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1574 drivers/crypto/atmel-aes.c 	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
req              1580 drivers/crypto/atmel-aes.c 	if (likely(req->assoclen != 0 || ctx->textlen != 0))
req              1590 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1601 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
req              1605 drivers/crypto/atmel-aes.c 	if (unlikely(req->assoclen == 0)) {
req              1611 drivers/crypto/atmel-aes.c 	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
req              1612 drivers/crypto/atmel-aes.c 	if (unlikely(req->assoclen + padlen > dd->buflen))
req              1614 drivers/crypto/atmel-aes.c 	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
req              1618 drivers/crypto/atmel-aes.c 	dd->datalen = req->assoclen + padlen;
req              1625 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1649 drivers/crypto/atmel-aes.c 	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
req              1650 drivers/crypto/atmel-aes.c 	dst = ((req->src == req->dst) ? src :
req              1651 drivers/crypto/atmel-aes.c 	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
req              1673 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1689 drivers/crypto/atmel-aes.c 	data[0] = cpu_to_be64(req->assoclen * 8);
req              1718 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1719 drivers/crypto/atmel-aes.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1730 drivers/crypto/atmel-aes.c 	offset = req->assoclen + ctx->textlen;
req              1733 drivers/crypto/atmel-aes.c 		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
req              1736 drivers/crypto/atmel-aes.c 		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
req              1743 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_crypt(struct aead_request *req,
req              1750 drivers/crypto/atmel-aes.c 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req              1758 drivers/crypto/atmel-aes.c 	rctx = aead_request_ctx(req);
req              1761 drivers/crypto/atmel-aes.c 	return atmel_aes_handle_queue(dd, &req->base);
req              1802 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_encrypt(struct aead_request *req)
req              1804 drivers/crypto/atmel-aes.c 	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
req              1807 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_decrypt(struct aead_request *req)
req              1809 drivers/crypto/atmel-aes.c 	return atmel_aes_gcm_crypt(req, 0);
req              1857 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
req              1858 drivers/crypto/atmel-aes.c 	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req              1876 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
req              1882 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
req              1883 drivers/crypto/atmel-aes.c 	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
req              1908 drivers/crypto/atmel-aes.c 		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
req              1911 drivers/crypto/atmel-aes.c 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
req              1932 drivers/crypto/atmel-aes.c static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
req              1934 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
req              1937 drivers/crypto/atmel-aes.c static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
req              1939 drivers/crypto/atmel-aes.c 	return atmel_aes_crypt(req, AES_FLAGS_XTS);
req              1987 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1988 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              1997 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              1998 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              1999 drivers/crypto/atmel-aes.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2016 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              2017 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              2029 drivers/crypto/atmel-aes.c 				      req->src, req->assoclen,
req              2037 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              2038 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              2050 drivers/crypto/atmel-aes.c 	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
req              2053 drivers/crypto/atmel-aes.c 	if (req->src != req->dst)
req              2054 drivers/crypto/atmel-aes.c 		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
req              2057 drivers/crypto/atmel-aes.c 	memcpy(iv, req->iv, sizeof(iv));
req              2079 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              2080 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              2092 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
req              2093 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              2094 drivers/crypto/atmel-aes.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2104 drivers/crypto/atmel-aes.c 	offs = req->assoclen + rctx->textlen;
req              2107 drivers/crypto/atmel-aes.c 		scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
req              2109 drivers/crypto/atmel-aes.c 		scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
req              2205 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_crypt(struct aead_request *req,
req              2208 drivers/crypto/atmel-aes.c 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
req              2209 drivers/crypto/atmel-aes.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2216 drivers/crypto/atmel-aes.c 	if (!enc && req->cryptlen < authsize)
req              2218 drivers/crypto/atmel-aes.c 	rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
req              2225 drivers/crypto/atmel-aes.c 	if (!rctx->textlen && !req->assoclen)
req              2236 drivers/crypto/atmel-aes.c 	return atmel_aes_handle_queue(dd, &req->base);
req              2239 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
req              2241 drivers/crypto/atmel-aes.c 	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
req              2244 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
req              2246 drivers/crypto/atmel-aes.c 	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
req                36 drivers/crypto/atmel-authenc.h int atmel_sha_authenc_schedule(struct ahash_request *req,
req                40 drivers/crypto/atmel-authenc.h int atmel_sha_authenc_init(struct ahash_request *req,
req                45 drivers/crypto/atmel-authenc.h int atmel_sha_authenc_final(struct ahash_request *req,
req                49 drivers/crypto/atmel-authenc.h void  atmel_sha_authenc_abort(struct ahash_request *req);
req                53 drivers/crypto/atmel-ecc.c 	struct kpp_request *req = areq;
req                62 drivers/crypto/atmel-ecc.c 	n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
req                65 drivers/crypto/atmel-ecc.c 	copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
req                73 drivers/crypto/atmel-ecc.c 	kpp_request_complete(req, status);
req               150 drivers/crypto/atmel-ecc.c static int atmel_ecdh_generate_public_key(struct kpp_request *req)
req               152 drivers/crypto/atmel-ecc.c 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req               158 drivers/crypto/atmel-ecc.c 		kpp_request_set_tfm(req, ctx->fallback);
req               159 drivers/crypto/atmel-ecc.c 		return crypto_kpp_generate_public_key(req);
req               166 drivers/crypto/atmel-ecc.c 	nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
req               169 drivers/crypto/atmel-ecc.c 	copied = sg_copy_from_buffer(req->dst,
req               170 drivers/crypto/atmel-ecc.c 				     sg_nents_for_len(req->dst, nbytes),
req               178 drivers/crypto/atmel-ecc.c static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
req               180 drivers/crypto/atmel-ecc.c 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req               187 drivers/crypto/atmel-ecc.c 		kpp_request_set_tfm(req, ctx->fallback);
req               188 drivers/crypto/atmel-ecc.c 		return crypto_kpp_compute_shared_secret(req);
req               192 drivers/crypto/atmel-ecc.c 	if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
req               195 drivers/crypto/atmel-ecc.c 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
req               205 drivers/crypto/atmel-ecc.c 	ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
req               209 drivers/crypto/atmel-ecc.c 	atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
req               143 drivers/crypto/atmel-sha.c 	struct ahash_request	*req;
req               285 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req               293 drivers/crypto/atmel-sha.c 	if ((dd->is_async || dd->force_complete) && req->base.complete)
req               294 drivers/crypto/atmel-sha.c 		req->base.complete(&req->base, err);
req               425 drivers/crypto/atmel-sha.c static int atmel_sha_init(struct ahash_request *req)
req               427 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               429 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req               475 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               564 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               594 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               640 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               705 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               736 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               761 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               856 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
req               879 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req               880 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req               900 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req               901 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req               924 drivers/crypto/atmel-sha.c static void atmel_sha_copy_hash(struct ahash_request *req)
req               926 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req               955 drivers/crypto/atmel-sha.c static void atmel_sha_copy_ready_hash(struct ahash_request *req)
req               957 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req               959 drivers/crypto/atmel-sha.c 	if (!req->result)
req               965 drivers/crypto/atmel-sha.c 		memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
req               969 drivers/crypto/atmel-sha.c 		memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
req               973 drivers/crypto/atmel-sha.c 		memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
req               977 drivers/crypto/atmel-sha.c 		memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
req               981 drivers/crypto/atmel-sha.c 		memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
req               986 drivers/crypto/atmel-sha.c static int atmel_sha_finish(struct ahash_request *req)
req               988 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req               992 drivers/crypto/atmel-sha.c 		atmel_sha_copy_ready_hash(req);
req              1000 drivers/crypto/atmel-sha.c static void atmel_sha_finish_req(struct ahash_request *req, int err)
req              1002 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1006 drivers/crypto/atmel-sha.c 		atmel_sha_copy_hash(req);
req              1008 drivers/crypto/atmel-sha.c 			err = atmel_sha_finish(req);
req              1052 drivers/crypto/atmel-sha.c 				  struct ahash_request *req)
req              1061 drivers/crypto/atmel-sha.c 	if (req)
req              1062 drivers/crypto/atmel-sha.c 		ret = ahash_enqueue_request(&dd->queue, req);
req              1084 drivers/crypto/atmel-sha.c 	dd->req = ahash_request_cast(async_req);
req              1085 drivers/crypto/atmel-sha.c 	start_async = (dd->req != req);
req              1098 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1099 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1103 drivers/crypto/atmel-sha.c 						ctx->op, req->nbytes);
req              1140 drivers/crypto/atmel-sha.c 		atmel_sha_finish_req(req, err);
req              1147 drivers/crypto/atmel-sha.c static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
req              1149 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1150 drivers/crypto/atmel-sha.c 	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req              1155 drivers/crypto/atmel-sha.c 	return atmel_sha_handle_queue(dd, req);
req              1158 drivers/crypto/atmel-sha.c static int atmel_sha_update(struct ahash_request *req)
req              1160 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1162 drivers/crypto/atmel-sha.c 	if (!req->nbytes)
req              1165 drivers/crypto/atmel-sha.c 	ctx->total = req->nbytes;
req              1166 drivers/crypto/atmel-sha.c 	ctx->sg = req->src;
req              1177 drivers/crypto/atmel-sha.c 	return atmel_sha_enqueue(req, SHA_OP_UPDATE);
req              1180 drivers/crypto/atmel-sha.c static int atmel_sha_final(struct ahash_request *req)
req              1182 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1191 drivers/crypto/atmel-sha.c 		return atmel_sha_finish(req);
req              1193 drivers/crypto/atmel-sha.c 	return atmel_sha_enqueue(req, SHA_OP_FINAL);
req              1196 drivers/crypto/atmel-sha.c static int atmel_sha_finup(struct ahash_request *req)
req              1198 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1203 drivers/crypto/atmel-sha.c 	err1 = atmel_sha_update(req);
req              1205 drivers/crypto/atmel-sha.c 	    (err1 == -EBUSY && (ahash_request_flags(req) &
req              1213 drivers/crypto/atmel-sha.c 	err2 = atmel_sha_final(req);
req              1218 drivers/crypto/atmel-sha.c static int atmel_sha_digest(struct ahash_request *req)
req              1220 drivers/crypto/atmel-sha.c 	return atmel_sha_init(req) ?: atmel_sha_finup(req);
req              1224 drivers/crypto/atmel-sha.c static int atmel_sha_export(struct ahash_request *req, void *out)
req              1226 drivers/crypto/atmel-sha.c 	const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1232 drivers/crypto/atmel-sha.c static int atmel_sha_import(struct ahash_request *req, const void *in)
req              1234 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1416 drivers/crypto/atmel-sha.c 	atmel_sha_finish_req(dd->req, err);
req              1459 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1460 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1569 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1570 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1623 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1624 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1655 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1656 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1753 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1754 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1755 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1815 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1816 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1818 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1832 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1833 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1835 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1851 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1852 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1854 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1867 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1868 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1870 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1882 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1883 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1924 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_init(struct ahash_request *req)
req              1928 drivers/crypto/atmel-sha.c 	err = atmel_sha_init(req);
req              1932 drivers/crypto/atmel-sha.c 	return atmel_sha_enqueue(req, SHA_OP_INIT);
req              1937 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1938 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1939 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1954 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              1955 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              1956 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1995 drivers/crypto/atmel-sha.c 	atmel_sha_copy_hash(dd->req);
req              1996 drivers/crypto/atmel-sha.c 	atmel_sha_copy_ready_hash(dd->req);
req              2000 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_digest(struct ahash_request *req)
req              2004 drivers/crypto/atmel-sha.c 	err = atmel_sha_init(req);
req              2008 drivers/crypto/atmel-sha.c 	return atmel_sha_enqueue(req, SHA_OP_DIGEST);
req              2013 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              2014 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
req              2015 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2023 drivers/crypto/atmel-sha.c 	if (!req->nbytes)
req              2027 drivers/crypto/atmel-sha.c 	if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
req              2028 drivers/crypto/atmel-sha.c 	    atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
req              2049 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MSR, req->nbytes);
req              2050 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_BCR, req->nbytes);
req              2056 drivers/crypto/atmel-sha.c 		return atmel_sha_dma_start(dd, req->src, req->nbytes,
req              2059 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
req              2241 drivers/crypto/atmel-sha.c 	struct ahash_request *req = areq->data;
req              2242 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx  = ahash_request_ctx(req);
req              2249 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              2250 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2360 drivers/crypto/atmel-sha.c int atmel_sha_authenc_schedule(struct ahash_request *req,
req              2365 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2384 drivers/crypto/atmel-sha.c 	ahash_request_set_tfm(req, tfm);
req              2385 drivers/crypto/atmel-sha.c 	ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
req              2387 drivers/crypto/atmel-sha.c 	return atmel_sha_handle_queue(dd, req);
req              2391 drivers/crypto/atmel-sha.c int atmel_sha_authenc_init(struct ahash_request *req,
req              2397 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2399 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2419 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              2420 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2422 drivers/crypto/atmel-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2456 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              2457 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2462 drivers/crypto/atmel-sha.c int atmel_sha_authenc_final(struct ahash_request *req,
req              2467 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2508 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
req              2509 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req              2518 drivers/crypto/atmel-sha.c void atmel_sha_authenc_abort(struct ahash_request *req)
req              2520 drivers/crypto/atmel-sha.c 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
req               109 drivers/crypto/atmel-tdes.c 	struct ablkcipher_request	*req;
req               310 drivers/crypto/atmel-tdes.c 		(dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
req               311 drivers/crypto/atmel-tdes.c 		atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
req               506 drivers/crypto/atmel-tdes.c 					crypto_ablkcipher_reqtfm(dd->req));
req               576 drivers/crypto/atmel-tdes.c 	struct ablkcipher_request *req = dd->req;
req               582 drivers/crypto/atmel-tdes.c 	req->base.complete(&req->base, err);
req               586 drivers/crypto/atmel-tdes.c 			       struct ablkcipher_request *req)
req               595 drivers/crypto/atmel-tdes.c 	if (req)
req               596 drivers/crypto/atmel-tdes.c 		ret = ablkcipher_enqueue_request(&dd->queue, req);
req               613 drivers/crypto/atmel-tdes.c 	req = ablkcipher_request_cast(async_req);
req               616 drivers/crypto/atmel-tdes.c 	dd->req = req;
req               617 drivers/crypto/atmel-tdes.c 	dd->total = req->nbytes;
req               619 drivers/crypto/atmel-tdes.c 	dd->in_sg = req->src;
req               621 drivers/crypto/atmel-tdes.c 	dd->out_sg = req->dst;
req               623 drivers/crypto/atmel-tdes.c 	rctx = ablkcipher_request_ctx(req);
req               624 drivers/crypto/atmel-tdes.c 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
req               668 drivers/crypto/atmel-tdes.c static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
req               671 drivers/crypto/atmel-tdes.c 			crypto_ablkcipher_reqtfm(req));
req               672 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
req               675 drivers/crypto/atmel-tdes.c 		if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
req               681 drivers/crypto/atmel-tdes.c 		if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
req               687 drivers/crypto/atmel-tdes.c 		if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
req               693 drivers/crypto/atmel-tdes.c 		if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
req               702 drivers/crypto/atmel-tdes.c 	return atmel_tdes_handle_queue(ctx->dd, req);
req               805 drivers/crypto/atmel-tdes.c static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
req               807 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
req               810 drivers/crypto/atmel-tdes.c static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
req               812 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, 0);
req               815 drivers/crypto/atmel-tdes.c static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
req               817 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
req               820 drivers/crypto/atmel-tdes.c static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
req               822 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
req               824 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
req               826 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
req               829 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
req               831 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
req               834 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
req               836 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
req               840 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
req               842 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
req               845 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
req               847 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
req               851 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
req               853 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
req               856 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
req               858 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
req               862 drivers/crypto/atmel-tdes.c static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
req               864 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
req               867 drivers/crypto/atmel-tdes.c static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
req               869 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
req               872 drivers/crypto/atmel-tdes.c static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
req               874 drivers/crypto/atmel-tdes.c 	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
req               284 drivers/crypto/axis/artpec6_crypto.c 	struct crypto_async_request *req;
req               285 drivers/crypto/axis/artpec6_crypto.c 	void (*complete)(struct crypto_async_request *req);
req               364 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_crypto(struct crypto_async_request *req);
req               366 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
req               368 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
req               370 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_aead(struct crypto_async_request *req);
req               372 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_hash(struct crypto_async_request *req);
req               456 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
req               464 drivers/crypto/axis/artpec6_crypto.c 		list_add_tail(&req->list, &ac->pending);
req               465 drivers/crypto/axis/artpec6_crypto.c 		artpec6_crypto_start_dma(req);
req               467 drivers/crypto/axis/artpec6_crypto.c 	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
req               468 drivers/crypto/axis/artpec6_crypto.c 		list_add_tail(&req->list, &ac->queue);
req               470 drivers/crypto/axis/artpec6_crypto.c 		artpec6_crypto_common_destroy(req);
req              1040 drivers/crypto/axis/artpec6_crypto.c 		void (*complete)(struct crypto_async_request *req),
req              1054 drivers/crypto/axis/artpec6_crypto.c 	common->req = parent;
req              1085 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_encrypt(struct skcipher_request *req)
req              1087 drivers/crypto/axis/artpec6_crypto.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req              1090 drivers/crypto/axis/artpec6_crypto.c 	void (*complete)(struct crypto_async_request *req);
req              1093 drivers/crypto/axis/artpec6_crypto.c 	req_ctx = skcipher_request_ctx(req);
req              1115 drivers/crypto/axis/artpec6_crypto.c 				  &req->base,
req              1117 drivers/crypto/axis/artpec6_crypto.c 				  req->dst, req->cryptlen);
req              1121 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_prepare_crypto(req);
req              1130 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_decrypt(struct skcipher_request *req)
req              1133 drivers/crypto/axis/artpec6_crypto.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req              1136 drivers/crypto/axis/artpec6_crypto.c 	void (*complete)(struct crypto_async_request *req);
req              1138 drivers/crypto/axis/artpec6_crypto.c 	req_ctx = skcipher_request_ctx(req);
req              1160 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
req              1162 drivers/crypto/axis/artpec6_crypto.c 				  req->dst, req->cryptlen);
req              1166 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_prepare_crypto(req);
req              1176 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
req              1178 drivers/crypto/axis/artpec6_crypto.c 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
req              1182 drivers/crypto/axis/artpec6_crypto.c 					    (req->iv + iv_len - 4));
req              1183 drivers/crypto/axis/artpec6_crypto.c 	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
req              1207 drivers/crypto/axis/artpec6_crypto.c 			skcipher_request_set_callback(subreq, req->base.flags,
req              1209 drivers/crypto/axis/artpec6_crypto.c 			skcipher_request_set_crypt(subreq, req->src, req->dst,
req              1210 drivers/crypto/axis/artpec6_crypto.c 						   req->cryptlen, req->iv);
req              1218 drivers/crypto/axis/artpec6_crypto.c 	return encrypt ? artpec6_crypto_encrypt(req)
req              1219 drivers/crypto/axis/artpec6_crypto.c 		       : artpec6_crypto_decrypt(req);
req              1222 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
req              1224 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_ctr_crypt(req, true);
req              1227 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
req              1229 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_ctr_crypt(req, false);
req              1263 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_aead_encrypt(struct aead_request *req)
req              1266 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1269 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
req              1275 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_prepare_aead(req);
req              1284 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_aead_decrypt(struct aead_request *req)
req              1287 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1290 drivers/crypto/axis/artpec6_crypto.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req              1294 drivers/crypto/axis/artpec6_crypto.c 				  &req->base,
req              1300 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_prepare_aead(req);
req              2049 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_crypto_req_common *req;
req              2052 drivers/crypto/axis/artpec6_crypto.c 		req = list_first_entry(&ac->queue,
req              2055 drivers/crypto/axis/artpec6_crypto.c 		list_move_tail(&req->list, &ac->pending);
req              2056 drivers/crypto/axis/artpec6_crypto.c 		artpec6_crypto_start_dma(req);
req              2058 drivers/crypto/axis/artpec6_crypto.c 		list_add_tail(&req->complete_in_progress, completions);
req              2086 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_crypto_req_common *req;
req              2101 drivers/crypto/axis/artpec6_crypto.c 	list_for_each_entry_safe(req, n, &ac->pending, list) {
req              2102 drivers/crypto/axis/artpec6_crypto.c 		struct artpec6_crypto_dma_descriptors *dma = req->dma;
req              2106 drivers/crypto/axis/artpec6_crypto.c 		stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
req              2112 drivers/crypto/axis/artpec6_crypto.c 		stat = req->dma->stat[req->dma->in_cnt-1];
req              2117 drivers/crypto/axis/artpec6_crypto.c 		pr_debug("Request %p status is %X\n", req, stat);
req              2127 drivers/crypto/axis/artpec6_crypto.c 		pr_debug("Completing request %p\n", req);
req              2129 drivers/crypto/axis/artpec6_crypto.c 		list_move_tail(&req->list, &complete_done);
req              2141 drivers/crypto/axis/artpec6_crypto.c 	list_for_each_entry_safe(req, n, &complete_done, list) {
req              2142 drivers/crypto/axis/artpec6_crypto.c 		artpec6_crypto_dma_unmap_all(req);
req              2143 drivers/crypto/axis/artpec6_crypto.c 		artpec6_crypto_copy_bounce_buffers(req);
req              2144 drivers/crypto/axis/artpec6_crypto.c 		artpec6_crypto_common_destroy(req);
req              2146 drivers/crypto/axis/artpec6_crypto.c 		req->complete(req->req);
req              2149 drivers/crypto/axis/artpec6_crypto.c 	list_for_each_entry_safe(req, n, &complete_in_progress,
req              2151 drivers/crypto/axis/artpec6_crypto.c 		req->req->complete(req->req, -EINPROGRESS);
req              2155 drivers/crypto/axis/artpec6_crypto.c static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
req              2157 drivers/crypto/axis/artpec6_crypto.c 	req->complete(req, 0);
req              2161 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
req              2163 drivers/crypto/axis/artpec6_crypto.c 	struct skcipher_request *cipher_req = container_of(req,
req              2169 drivers/crypto/axis/artpec6_crypto.c 	req->complete(req, 0);
req              2173 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
req              2175 drivers/crypto/axis/artpec6_crypto.c 	struct skcipher_request *cipher_req = container_of(req,
req              2181 drivers/crypto/axis/artpec6_crypto.c 	req->complete(req, 0);
req              2184 drivers/crypto/axis/artpec6_crypto.c static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
req              2189 drivers/crypto/axis/artpec6_crypto.c 	struct aead_request *areq = container_of(req,
req              2219 drivers/crypto/axis/artpec6_crypto.c 	req->complete(req, result);
req              2222 drivers/crypto/axis/artpec6_crypto.c static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
req              2224 drivers/crypto/axis/artpec6_crypto.c 	req->complete(req, 0);
req              2267 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
req              2271 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2300 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
req              2302 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2307 drivers/crypto/axis/artpec6_crypto.c 					  &req->base,
req              2315 drivers/crypto/axis/artpec6_crypto.c 	ret = artpec6_crypto_prepare_hash(req);
req              2333 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_hash_final(struct ahash_request *req)
req              2335 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2339 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_prepare_submit_hash(req);
req              2342 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_hash_update(struct ahash_request *req)
req              2344 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2348 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_prepare_submit_hash(req);
req              2351 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_sha1_init(struct ahash_request *req)
req              2353 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
req              2356 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_sha1_digest(struct ahash_request *req)
req              2358 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2360 drivers/crypto/axis/artpec6_crypto.c 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
req              2364 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_prepare_submit_hash(req);
req              2367 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_sha256_init(struct ahash_request *req)
req              2369 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
req              2372 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_sha256_digest(struct ahash_request *req)
req              2374 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2376 drivers/crypto/axis/artpec6_crypto.c 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
req              2379 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_prepare_submit_hash(req);
req              2382 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
req              2384 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
req              2387 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
req              2389 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
req              2391 drivers/crypto/axis/artpec6_crypto.c 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
req              2394 drivers/crypto/axis/artpec6_crypto.c 	return artpec6_crypto_prepare_submit_hash(req);
req              2442 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
req              2444 drivers/crypto/axis/artpec6_crypto.c 	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
req              2470 drivers/crypto/axis/artpec6_crypto.c static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
req              2472 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
req               307 drivers/crypto/bcm/cipher.c 	struct ablkcipher_request *req =
req               361 drivers/crypto/bcm/cipher.c 		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
req               377 drivers/crypto/bcm/cipher.c 			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
req               490 drivers/crypto/bcm/cipher.c 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
req               507 drivers/crypto/bcm/cipher.c 	struct ablkcipher_request *req = ablkcipher_request_cast(areq);
req               529 drivers/crypto/bcm/cipher.c 	dump_sg(req->dst, rctx->total_received, payload_len);
req               693 drivers/crypto/bcm/cipher.c 	struct ahash_request *req = ahash_request_cast(areq);
req               694 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               784 drivers/crypto/bcm/cipher.c 				sg_copy_part_to_buf(req->src, dest, new_len,
req               916 drivers/crypto/bcm/cipher.c 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
req               933 drivers/crypto/bcm/cipher.c static int spu_hmac_outer_hash(struct ahash_request *req,
req               936 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               943 drivers/crypto/bcm/cipher.c 		rc = do_shash("md5", req->result, ctx->opad, blocksize,
req               944 drivers/crypto/bcm/cipher.c 			      req->result, ctx->digestsize, NULL, 0);
req               947 drivers/crypto/bcm/cipher.c 		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
req               948 drivers/crypto/bcm/cipher.c 			      req->result, ctx->digestsize, NULL, 0);
req               951 drivers/crypto/bcm/cipher.c 		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
req               952 drivers/crypto/bcm/cipher.c 			      req->result, ctx->digestsize, NULL, 0);
req               955 drivers/crypto/bcm/cipher.c 		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
req               956 drivers/crypto/bcm/cipher.c 			      req->result, ctx->digestsize, NULL, 0);
req               959 drivers/crypto/bcm/cipher.c 		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
req               960 drivers/crypto/bcm/cipher.c 			      req->result, ctx->digestsize, NULL, 0);
req               963 drivers/crypto/bcm/cipher.c 		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
req               964 drivers/crypto/bcm/cipher.c 			      req->result, ctx->digestsize, NULL, 0);
req               984 drivers/crypto/bcm/cipher.c 	struct ahash_request *req = ahash_request_cast(areq);
req               988 drivers/crypto/bcm/cipher.c 	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
req               995 drivers/crypto/bcm/cipher.c 			__swab32s((u32 *)req->result);
req               996 drivers/crypto/bcm/cipher.c 			__swab32s(((u32 *)req->result) + 1);
req               997 drivers/crypto/bcm/cipher.c 			__swab32s(((u32 *)req->result) + 2);
req               998 drivers/crypto/bcm/cipher.c 			__swab32s(((u32 *)req->result) + 3);
req               999 drivers/crypto/bcm/cipher.c 			__swab32s(((u32 *)req->result) + 4);
req              1003 drivers/crypto/bcm/cipher.c 	flow_dump("  digest ", req->result, ctx->digestsize);
req              1007 drivers/crypto/bcm/cipher.c 		err = spu_hmac_outer_hash(req, ctx);
req              1010 drivers/crypto/bcm/cipher.c 		flow_dump("  hmac: ", req->result, ctx->digestsize);
req              1035 drivers/crypto/bcm/cipher.c 	struct ahash_request *req = ahash_request_cast(areq);
req              1036 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1080 drivers/crypto/bcm/cipher.c 				 struct aead_request *req,
req              1300 drivers/crypto/bcm/cipher.c 	struct aead_request *req = container_of(areq,
req              1357 drivers/crypto/bcm/cipher.c 	aead_parms.assoc_size = req->assoclen;
req              1430 drivers/crypto/bcm/cipher.c 		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
req              1458 drivers/crypto/bcm/cipher.c 		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
req              1459 drivers/crypto/bcm/cipher.c 				    req->assoclen + rctx->total_sent -
req              1532 drivers/crypto/bcm/cipher.c 	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
req              1555 drivers/crypto/bcm/cipher.c 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
req              1570 drivers/crypto/bcm/cipher.c 	struct aead_request *req = container_of(areq,
req              1584 drivers/crypto/bcm/cipher.c 	if (req->assoclen)
req              1586 drivers/crypto/bcm/cipher.c 			    req->assoclen);
req              1593 drivers/crypto/bcm/cipher.c 	result_len = req->cryptlen;
req              1595 drivers/crypto/bcm/cipher.c 		icv_offset = req->assoclen + rctx->total_sent;
req              1598 drivers/crypto/bcm/cipher.c 		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
req              1604 drivers/crypto/bcm/cipher.c 	dump_sg(req->dst, req->assoclen, result_len);
req              1750 drivers/crypto/bcm/cipher.c static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
req              1752 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
req              1754 drivers/crypto/bcm/cipher.c 	    crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
req              1759 drivers/crypto/bcm/cipher.c 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
req              1761 drivers/crypto/bcm/cipher.c 	rctx->parent = &req->base;
req              1764 drivers/crypto/bcm/cipher.c 	rctx->total_todo = req->nbytes;
req              1771 drivers/crypto/bcm/cipher.c 	rctx->src_sg = req->src;
req              1774 drivers/crypto/bcm/cipher.c 	rctx->dst_sg = req->dst;
req              1785 drivers/crypto/bcm/cipher.c 		    crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
req              1786 drivers/crypto/bcm/cipher.c 		memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
req              1953 drivers/crypto/bcm/cipher.c static int ablkcipher_encrypt(struct ablkcipher_request *req)
req              1955 drivers/crypto/bcm/cipher.c 	flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
req              1957 drivers/crypto/bcm/cipher.c 	return ablkcipher_enqueue(req, true);
req              1960 drivers/crypto/bcm/cipher.c static int ablkcipher_decrypt(struct ablkcipher_request *req)
req              1962 drivers/crypto/bcm/cipher.c 	flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
req              1963 drivers/crypto/bcm/cipher.c 	return ablkcipher_enqueue(req, false);
req              1966 drivers/crypto/bcm/cipher.c static int ahash_enqueue(struct ahash_request *req)
req              1968 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              1969 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1974 drivers/crypto/bcm/cipher.c 	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
req              1976 drivers/crypto/bcm/cipher.c 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
req              1978 drivers/crypto/bcm/cipher.c 	rctx->parent = &req->base;
req              1984 drivers/crypto/bcm/cipher.c 	rctx->src_sg = req->src;
req              1997 drivers/crypto/bcm/cipher.c 		err = do_shash((unsigned char *)alg_name, req->result,
req              2022 drivers/crypto/bcm/cipher.c static int __ahash_init(struct ahash_request *req)
req              2025 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2026 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2080 drivers/crypto/bcm/cipher.c static int ahash_init(struct ahash_request *req)
req              2082 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2102 drivers/crypto/bcm/cipher.c 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
req              2126 drivers/crypto/bcm/cipher.c 		ret = __ahash_init(req);
req              2139 drivers/crypto/bcm/cipher.c static int __ahash_update(struct ahash_request *req)
req              2141 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2143 drivers/crypto/bcm/cipher.c 	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
req              2145 drivers/crypto/bcm/cipher.c 	if (!req->nbytes)
req              2147 drivers/crypto/bcm/cipher.c 	rctx->total_todo += req->nbytes;
req              2150 drivers/crypto/bcm/cipher.c 	return ahash_enqueue(req);
req              2153 drivers/crypto/bcm/cipher.c static int ahash_update(struct ahash_request *req)
req              2155 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2168 drivers/crypto/bcm/cipher.c 		if (req->src)
req              2169 drivers/crypto/bcm/cipher.c 			nents = sg_nents(req->src);
req              2174 drivers/crypto/bcm/cipher.c 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
req              2176 drivers/crypto/bcm/cipher.c 		tmpbuf = kmalloc(req->nbytes, gfp);
req              2180 drivers/crypto/bcm/cipher.c 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
req              2181 drivers/crypto/bcm/cipher.c 				req->nbytes) {
req              2187 drivers/crypto/bcm/cipher.c 		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
req              2191 drivers/crypto/bcm/cipher.c 		ret = __ahash_update(req);
req              2197 drivers/crypto/bcm/cipher.c static int __ahash_final(struct ahash_request *req)
req              2199 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2201 drivers/crypto/bcm/cipher.c 	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
req              2205 drivers/crypto/bcm/cipher.c 	return ahash_enqueue(req);
req              2208 drivers/crypto/bcm/cipher.c static int ahash_final(struct ahash_request *req)
req              2210 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2220 drivers/crypto/bcm/cipher.c 		ret = crypto_shash_final(ctx->shash, req->result);
req              2228 drivers/crypto/bcm/cipher.c 		ret = __ahash_final(req);
req              2234 drivers/crypto/bcm/cipher.c static int __ahash_finup(struct ahash_request *req)
req              2236 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2238 drivers/crypto/bcm/cipher.c 	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
req              2240 drivers/crypto/bcm/cipher.c 	rctx->total_todo += req->nbytes;
req              2244 drivers/crypto/bcm/cipher.c 	return ahash_enqueue(req);
req              2247 drivers/crypto/bcm/cipher.c static int ahash_finup(struct ahash_request *req)
req              2249 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2262 drivers/crypto/bcm/cipher.c 		if (req->src) {
req              2263 drivers/crypto/bcm/cipher.c 			nents = sg_nents(req->src);
req              2270 drivers/crypto/bcm/cipher.c 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
req              2272 drivers/crypto/bcm/cipher.c 		tmpbuf = kmalloc(req->nbytes, gfp);
req              2278 drivers/crypto/bcm/cipher.c 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
req              2279 drivers/crypto/bcm/cipher.c 				req->nbytes) {
req              2285 drivers/crypto/bcm/cipher.c 		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
req              2286 drivers/crypto/bcm/cipher.c 					 req->result);
req              2289 drivers/crypto/bcm/cipher.c 		return __ahash_finup(req);
req              2301 drivers/crypto/bcm/cipher.c static int ahash_digest(struct ahash_request *req)
req              2305 drivers/crypto/bcm/cipher.c 	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
req              2308 drivers/crypto/bcm/cipher.c 	err = __ahash_init(req);
req              2310 drivers/crypto/bcm/cipher.c 		err = __ahash_finup(req);
req              2349 drivers/crypto/bcm/cipher.c static int ahash_export(struct ahash_request *req, void *out)
req              2351 drivers/crypto/bcm/cipher.c 	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2364 drivers/crypto/bcm/cipher.c static int ahash_import(struct ahash_request *req, const void *in)
req              2366 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2479 drivers/crypto/bcm/cipher.c static int ahash_hmac_init(struct ahash_request *req)
req              2481 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2482 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2490 drivers/crypto/bcm/cipher.c 	ahash_init(req);
req              2505 drivers/crypto/bcm/cipher.c static int ahash_hmac_update(struct ahash_request *req)
req              2507 drivers/crypto/bcm/cipher.c 	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
req              2509 drivers/crypto/bcm/cipher.c 	if (!req->nbytes)
req              2512 drivers/crypto/bcm/cipher.c 	return ahash_update(req);
req              2515 drivers/crypto/bcm/cipher.c static int ahash_hmac_final(struct ahash_request *req)
req              2517 drivers/crypto/bcm/cipher.c 	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
req              2519 drivers/crypto/bcm/cipher.c 	return ahash_final(req);
req              2522 drivers/crypto/bcm/cipher.c static int ahash_hmac_finup(struct ahash_request *req)
req              2524 drivers/crypto/bcm/cipher.c 	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
req              2526 drivers/crypto/bcm/cipher.c 	return ahash_finup(req);
req              2529 drivers/crypto/bcm/cipher.c static int ahash_hmac_digest(struct ahash_request *req)
req              2531 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
req              2532 drivers/crypto/bcm/cipher.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2537 drivers/crypto/bcm/cipher.c 	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
req              2540 drivers/crypto/bcm/cipher.c 	__ahash_init(req);
req              2562 drivers/crypto/bcm/cipher.c 	return __ahash_finup(req);
req              2567 drivers/crypto/bcm/cipher.c static int aead_need_fallback(struct aead_request *req)
req              2569 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
req              2571 drivers/crypto/bcm/cipher.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              2581 drivers/crypto/bcm/cipher.c 	    (req->assoclen == 0)) {
req              2582 drivers/crypto/bcm/cipher.c 		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
req              2583 drivers/crypto/bcm/cipher.c 		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
req              2605 drivers/crypto/bcm/cipher.c 	    (req->assoclen == 0)) {
req              2618 drivers/crypto/bcm/cipher.c 	    req->assoclen != 16 && req->assoclen != 20) {
req              2624 drivers/crypto/bcm/cipher.c 	payload_len = req->cryptlen;
req              2626 drivers/crypto/bcm/cipher.c 		payload_len += req->assoclen;
req              2638 drivers/crypto/bcm/cipher.c 	struct aead_request *req =
req              2640 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
req              2641 drivers/crypto/bcm/cipher.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              2653 drivers/crypto/bcm/cipher.c static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
req              2655 drivers/crypto/bcm/cipher.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              2657 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
req              2667 drivers/crypto/bcm/cipher.c 		aead_request_set_tfm(req, ctx->fallback_cipher);
req              2672 drivers/crypto/bcm/cipher.c 		rctx->old_complete = req->base.complete;
req              2673 drivers/crypto/bcm/cipher.c 		rctx->old_data = req->base.data;
req              2674 drivers/crypto/bcm/cipher.c 		req_flags = aead_request_flags(req);
req              2675 drivers/crypto/bcm/cipher.c 		aead_request_set_callback(req, req_flags, aead_complete, req);
req              2676 drivers/crypto/bcm/cipher.c 		err = is_encrypt ? crypto_aead_encrypt(req) :
req              2677 drivers/crypto/bcm/cipher.c 		    crypto_aead_decrypt(req);
req              2684 drivers/crypto/bcm/cipher.c 			aead_request_set_callback(req, req_flags,
req              2685 drivers/crypto/bcm/cipher.c 						  rctx->old_complete, req);
req              2686 drivers/crypto/bcm/cipher.c 			req->base.data = rctx->old_data;
req              2687 drivers/crypto/bcm/cipher.c 			aead_request_set_tfm(req, aead);
req              2698 drivers/crypto/bcm/cipher.c static int aead_enqueue(struct aead_request *req, bool is_encrypt)
req              2700 drivers/crypto/bcm/cipher.c 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
req              2701 drivers/crypto/bcm/cipher.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              2707 drivers/crypto/bcm/cipher.c 	if (req->assoclen > MAX_ASSOC_SIZE) {
req              2710 drivers/crypto/bcm/cipher.c 		     __func__, req->assoclen, MAX_ASSOC_SIZE);
req              2714 drivers/crypto/bcm/cipher.c 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
req              2716 drivers/crypto/bcm/cipher.c 	rctx->parent = &req->base;
req              2719 drivers/crypto/bcm/cipher.c 	rctx->total_todo = req->cryptlen;
req              2728 drivers/crypto/bcm/cipher.c 	rctx->assoc = req->src;
req              2735 drivers/crypto/bcm/cipher.c 	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
req              2744 drivers/crypto/bcm/cipher.c 	if (req->dst == req->src) {
req              2753 drivers/crypto/bcm/cipher.c 		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
req              2768 drivers/crypto/bcm/cipher.c 			crypto_aead_ivsize(crypto_aead_reqtfm(req));
req              2777 drivers/crypto/bcm/cipher.c 	flow_log("  src sg: %p\n", req->src);
req              2780 drivers/crypto/bcm/cipher.c 	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
req              2781 drivers/crypto/bcm/cipher.c 	flow_log("  dst sg: %p\n", req->dst);
req              2785 drivers/crypto/bcm/cipher.c 	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
req              2794 drivers/crypto/bcm/cipher.c 	if (unlikely(aead_need_fallback(req)))
req              2795 drivers/crypto/bcm/cipher.c 		return aead_do_fallback(req, is_encrypt);
req              2806 drivers/crypto/bcm/cipher.c 		       req->iv,
req              3105 drivers/crypto/bcm/cipher.c static int aead_encrypt(struct aead_request *req)
req              3107 drivers/crypto/bcm/cipher.c 	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
req              3108 drivers/crypto/bcm/cipher.c 		 req->cryptlen);
req              3109 drivers/crypto/bcm/cipher.c 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
req              3110 drivers/crypto/bcm/cipher.c 	flow_log("  assoc_len:%u\n", req->assoclen);
req              3112 drivers/crypto/bcm/cipher.c 	return aead_enqueue(req, true);
req              3115 drivers/crypto/bcm/cipher.c static int aead_decrypt(struct aead_request *req)
req              3117 drivers/crypto/bcm/cipher.c 	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
req              3118 drivers/crypto/bcm/cipher.c 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
req              3119 drivers/crypto/bcm/cipher.c 	flow_log("  assoc_len:%u\n", req->assoclen);
req              3121 drivers/crypto/bcm/cipher.c 	return aead_enqueue(req, false);
req               944 drivers/crypto/caam/caamalg.c 		       struct aead_request *req)
req               946 drivers/crypto/caam/caamalg.c 	caam_unmap(dev, req->src, req->dst,
req               952 drivers/crypto/caam/caamalg.c 			   struct skcipher_request *req)
req               954 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               957 drivers/crypto/caam/caamalg.c 	caam_unmap(dev, req->src, req->dst,
req               966 drivers/crypto/caam/caamalg.c 	struct aead_request *req = context;
req               977 drivers/crypto/caam/caamalg.c 	aead_unmap(jrdev, edesc, req);
req               981 drivers/crypto/caam/caamalg.c 	aead_request_complete(req, ecode);
req               987 drivers/crypto/caam/caamalg.c 	struct aead_request *req = context;
req               998 drivers/crypto/caam/caamalg.c 	aead_unmap(jrdev, edesc, req);
req              1002 drivers/crypto/caam/caamalg.c 	aead_request_complete(req, ecode);
req              1008 drivers/crypto/caam/caamalg.c 	struct skcipher_request *req = context;
req              1010 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1021 drivers/crypto/caam/caamalg.c 	skcipher_unmap(jrdev, edesc, req);
req              1029 drivers/crypto/caam/caamalg.c 		memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
req              1032 drivers/crypto/caam/caamalg.c 				     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
req              1037 drivers/crypto/caam/caamalg.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
req              1038 drivers/crypto/caam/caamalg.c 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
req              1042 drivers/crypto/caam/caamalg.c 	skcipher_request_complete(req, ecode);
req              1048 drivers/crypto/caam/caamalg.c 	struct skcipher_request *req = context;
req              1050 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1060 drivers/crypto/caam/caamalg.c 	skcipher_unmap(jrdev, edesc, req);
req              1068 drivers/crypto/caam/caamalg.c 		memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
req              1072 drivers/crypto/caam/caamalg.c 				     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
req              1077 drivers/crypto/caam/caamalg.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
req              1078 drivers/crypto/caam/caamalg.c 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
req              1082 drivers/crypto/caam/caamalg.c 	skcipher_request_complete(req, ecode);
req              1088 drivers/crypto/caam/caamalg.c static void init_aead_job(struct aead_request *req,
req              1092 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1109 drivers/crypto/caam/caamalg.c 		src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
req              1118 drivers/crypto/caam/caamalg.c 	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
req              1124 drivers/crypto/caam/caamalg.c 	if (unlikely(req->src != req->dst)) {
req              1129 drivers/crypto/caam/caamalg.c 			dst_dma = sg_dma_address(req->dst);
req              1141 drivers/crypto/caam/caamalg.c 				   req->assoclen + req->cryptlen + authsize,
req              1145 drivers/crypto/caam/caamalg.c 				   req->assoclen + req->cryptlen - authsize,
req              1149 drivers/crypto/caam/caamalg.c static void init_gcm_job(struct aead_request *req,
req              1153 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1160 drivers/crypto/caam/caamalg.c 	init_aead_job(req, edesc, all_contig, encrypt);
req              1161 drivers/crypto/caam/caamalg.c 	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
req              1165 drivers/crypto/caam/caamalg.c 	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
req              1175 drivers/crypto/caam/caamalg.c 	append_data(desc, req->iv, ivsize);
req              1179 drivers/crypto/caam/caamalg.c static void init_chachapoly_job(struct aead_request *req,
req              1183 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1185 drivers/crypto/caam/caamalg.c 	unsigned int assoclen = req->assoclen;
req              1189 drivers/crypto/caam/caamalg.c 	init_aead_job(req, edesc, all_contig, encrypt);
req              1208 drivers/crypto/caam/caamalg.c 	append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
req              1213 drivers/crypto/caam/caamalg.c static void init_authenc_job(struct aead_request *req,
req              1217 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1244 drivers/crypto/caam/caamalg.c 	init_aead_job(req, edesc, all_contig, encrypt);
req              1251 drivers/crypto/caam/caamalg.c 		append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
req              1253 drivers/crypto/caam/caamalg.c 		append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
req              1256 drivers/crypto/caam/caamalg.c 		append_load_as_imm(desc, req->iv, ivsize,
req              1265 drivers/crypto/caam/caamalg.c static void init_skcipher_job(struct skcipher_request *req,
req              1269 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1280 drivers/crypto/caam/caamalg.c 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
req              1282 drivers/crypto/caam/caamalg.c 	       (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
req              1285 drivers/crypto/caam/caamalg.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req              1286 drivers/crypto/caam/caamalg.c 		     edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
req              1299 drivers/crypto/caam/caamalg.c 		src_dma = sg_dma_address(req->src);
req              1302 drivers/crypto/caam/caamalg.c 	append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
req              1304 drivers/crypto/caam/caamalg.c 	if (likely(req->src == req->dst)) {
req              1308 drivers/crypto/caam/caamalg.c 		dst_dma = sg_dma_address(req->dst);
req              1315 drivers/crypto/caam/caamalg.c 	append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
req              1321 drivers/crypto/caam/caamalg.c static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
req              1325 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1328 drivers/crypto/caam/caamalg.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1336 drivers/crypto/caam/caamalg.c 	if (unlikely(req->dst != req->src)) {
req              1337 drivers/crypto/caam/caamalg.c 		src_len = req->assoclen + req->cryptlen;
req              1340 drivers/crypto/caam/caamalg.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              1347 drivers/crypto/caam/caamalg.c 		dst_nents = sg_nents_for_len(req->dst, dst_len);
req              1354 drivers/crypto/caam/caamalg.c 		src_len = req->assoclen + req->cryptlen +
req              1357 drivers/crypto/caam/caamalg.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              1365 drivers/crypto/caam/caamalg.c 	if (likely(req->src == req->dst)) {
req              1366 drivers/crypto/caam/caamalg.c 		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1375 drivers/crypto/caam/caamalg.c 			mapped_src_nents = dma_map_sg(jrdev, req->src,
req              1387 drivers/crypto/caam/caamalg.c 			mapped_dst_nents = dma_map_sg(jrdev, req->dst,
req              1392 drivers/crypto/caam/caamalg.c 				dma_unmap_sg(jrdev, req->src, src_nents,
req              1417 drivers/crypto/caam/caamalg.c 		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
req              1432 drivers/crypto/caam/caamalg.c 		sg_to_sec4_sg_last(req->src, src_len,
req              1437 drivers/crypto/caam/caamalg.c 		sg_to_sec4_sg_last(req->dst, dst_len,
req              1448 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1458 drivers/crypto/caam/caamalg.c static int gcm_encrypt(struct aead_request *req)
req              1461 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1469 drivers/crypto/caam/caamalg.c 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
req              1474 drivers/crypto/caam/caamalg.c 	init_gcm_job(req, edesc, all_contig, true);
req              1481 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
req              1485 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1492 drivers/crypto/caam/caamalg.c static int chachapoly_encrypt(struct aead_request *req)
req              1495 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1502 drivers/crypto/caam/caamalg.c 	edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
req              1509 drivers/crypto/caam/caamalg.c 	init_chachapoly_job(req, edesc, all_contig, true);
req              1514 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
req              1518 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1525 drivers/crypto/caam/caamalg.c static int chachapoly_decrypt(struct aead_request *req)
req              1528 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1535 drivers/crypto/caam/caamalg.c 	edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
req              1542 drivers/crypto/caam/caamalg.c 	init_chachapoly_job(req, edesc, all_contig, false);
req              1547 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
req              1551 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1558 drivers/crypto/caam/caamalg.c static int ipsec_gcm_encrypt(struct aead_request *req)
req              1560 drivers/crypto/caam/caamalg.c 	return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
req              1563 drivers/crypto/caam/caamalg.c static int aead_encrypt(struct aead_request *req)
req              1566 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1574 drivers/crypto/caam/caamalg.c 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
req              1580 drivers/crypto/caam/caamalg.c 	init_authenc_job(req, edesc, all_contig, true);
req              1587 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
req              1591 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1598 drivers/crypto/caam/caamalg.c static int gcm_decrypt(struct aead_request *req)
req              1601 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1609 drivers/crypto/caam/caamalg.c 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
req              1614 drivers/crypto/caam/caamalg.c 	init_gcm_job(req, edesc, all_contig, false);
req              1621 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
req              1625 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1632 drivers/crypto/caam/caamalg.c static int ipsec_gcm_decrypt(struct aead_request *req)
req              1634 drivers/crypto/caam/caamalg.c 	return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
req              1637 drivers/crypto/caam/caamalg.c static int aead_decrypt(struct aead_request *req)
req              1640 drivers/crypto/caam/caamalg.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1648 drivers/crypto/caam/caamalg.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req              1649 drivers/crypto/caam/caamalg.c 		     req->assoclen + req->cryptlen, 1);
req              1652 drivers/crypto/caam/caamalg.c 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
req              1658 drivers/crypto/caam/caamalg.c 	init_authenc_job(req, edesc, all_contig, false);
req              1665 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
req              1669 drivers/crypto/caam/caamalg.c 		aead_unmap(jrdev, edesc, req);
req              1679 drivers/crypto/caam/caamalg.c static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
req              1682 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1685 drivers/crypto/caam/caamalg.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1694 drivers/crypto/caam/caamalg.c 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
req              1697 drivers/crypto/caam/caamalg.c 			req->cryptlen);
req              1701 drivers/crypto/caam/caamalg.c 	if (req->dst != req->src) {
req              1702 drivers/crypto/caam/caamalg.c 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
req              1705 drivers/crypto/caam/caamalg.c 				req->cryptlen);
req              1710 drivers/crypto/caam/caamalg.c 	if (likely(req->src == req->dst)) {
req              1711 drivers/crypto/caam/caamalg.c 		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1718 drivers/crypto/caam/caamalg.c 		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1724 drivers/crypto/caam/caamalg.c 		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
req              1728 drivers/crypto/caam/caamalg.c 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req              1752 drivers/crypto/caam/caamalg.c 		if (req->src == req->dst)
req              1770 drivers/crypto/caam/caamalg.c 		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
req              1786 drivers/crypto/caam/caamalg.c 		memcpy(iv, req->iv, ivsize);
req              1791 drivers/crypto/caam/caamalg.c 			caam_unmap(jrdev, req->src, req->dst, src_nents,
req              1800 drivers/crypto/caam/caamalg.c 		sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
req              1803 drivers/crypto/caam/caamalg.c 	if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
req              1804 drivers/crypto/caam/caamalg.c 		sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
req              1821 drivers/crypto/caam/caamalg.c 			caam_unmap(jrdev, req->src, req->dst, src_nents,
req              1837 drivers/crypto/caam/caamalg.c static int skcipher_encrypt(struct skcipher_request *req)
req              1840 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1846 drivers/crypto/caam/caamalg.c 	if (!req->cryptlen)
req              1850 drivers/crypto/caam/caamalg.c 	edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
req              1855 drivers/crypto/caam/caamalg.c 	init_skcipher_job(req, edesc, true);
req              1862 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
req              1867 drivers/crypto/caam/caamalg.c 		skcipher_unmap(jrdev, edesc, req);
req              1874 drivers/crypto/caam/caamalg.c static int skcipher_decrypt(struct skcipher_request *req)
req              1877 drivers/crypto/caam/caamalg.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1883 drivers/crypto/caam/caamalg.c 	if (!req->cryptlen)
req              1887 drivers/crypto/caam/caamalg.c 	edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
req              1892 drivers/crypto/caam/caamalg.c 	init_skcipher_job(req, edesc, false);
req              1899 drivers/crypto/caam/caamalg.c 	ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
req              1903 drivers/crypto/caam/caamalg.c 		skcipher_unmap(jrdev, edesc, req);
req               892 drivers/crypto/caam/caamalg_qi.c 		       struct aead_request *req)
req               894 drivers/crypto/caam/caamalg_qi.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               897 drivers/crypto/caam/caamalg_qi.c 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
req               904 drivers/crypto/caam/caamalg_qi.c 			   struct skcipher_request *req)
req               906 drivers/crypto/caam/caamalg_qi.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               909 drivers/crypto/caam/caamalg_qi.c 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
req               938 drivers/crypto/caam/caamalg_qi.c static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
req               941 drivers/crypto/caam/caamalg_qi.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               946 drivers/crypto/caam/caamalg_qi.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               970 drivers/crypto/caam/caamalg_qi.c 	if (likely(req->src == req->dst)) {
req               971 drivers/crypto/caam/caamalg_qi.c 		src_len = req->assoclen + req->cryptlen +
req               974 drivers/crypto/caam/caamalg_qi.c 		src_nents = sg_nents_for_len(req->src, src_len);
req               982 drivers/crypto/caam/caamalg_qi.c 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
req               990 drivers/crypto/caam/caamalg_qi.c 		src_len = req->assoclen + req->cryptlen;
req               993 drivers/crypto/caam/caamalg_qi.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              1001 drivers/crypto/caam/caamalg_qi.c 		dst_nents = sg_nents_for_len(req->dst, dst_len);
req              1010 drivers/crypto/caam/caamalg_qi.c 			mapped_src_nents = dma_map_sg(qidev, req->src,
req              1022 drivers/crypto/caam/caamalg_qi.c 			mapped_dst_nents = dma_map_sg(qidev, req->dst,
req              1027 drivers/crypto/caam/caamalg_qi.c 				dma_unmap_sg(qidev, req->src, src_nents,
req              1055 drivers/crypto/caam/caamalg_qi.c 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
req              1067 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
req              1077 drivers/crypto/caam/caamalg_qi.c 		memcpy(iv, req->iv, ivsize);
req              1082 drivers/crypto/caam/caamalg_qi.c 			caam_unmap(qidev, req->src, req->dst, src_nents,
req              1092 drivers/crypto/caam/caamalg_qi.c 	edesc->drv_req.app_ctx = req;
req              1096 drivers/crypto/caam/caamalg_qi.c 	edesc->assoclen = cpu_to_caam32(req->assoclen);
req              1101 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
req              1113 drivers/crypto/caam/caamalg_qi.c 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
req              1117 drivers/crypto/caam/caamalg_qi.c 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
req              1123 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
req              1132 drivers/crypto/caam/caamalg_qi.c 	out_len = req->assoclen + req->cryptlen +
req              1134 drivers/crypto/caam/caamalg_qi.c 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
req              1139 drivers/crypto/caam/caamalg_qi.c 	if (req->dst == req->src) {
req              1141 drivers/crypto/caam/caamalg_qi.c 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
req              1148 drivers/crypto/caam/caamalg_qi.c 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
req              1158 drivers/crypto/caam/caamalg_qi.c static inline int aead_crypt(struct aead_request *req, bool encrypt)
req              1161 drivers/crypto/caam/caamalg_qi.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1169 drivers/crypto/caam/caamalg_qi.c 	edesc = aead_edesc_alloc(req, encrypt);
req              1178 drivers/crypto/caam/caamalg_qi.c 		aead_unmap(ctx->qidev, edesc, req);
req              1185 drivers/crypto/caam/caamalg_qi.c static int aead_encrypt(struct aead_request *req)
req              1187 drivers/crypto/caam/caamalg_qi.c 	return aead_crypt(req, true);
req              1190 drivers/crypto/caam/caamalg_qi.c static int aead_decrypt(struct aead_request *req)
req              1192 drivers/crypto/caam/caamalg_qi.c 	return aead_crypt(req, false);
req              1195 drivers/crypto/caam/caamalg_qi.c static int ipsec_gcm_encrypt(struct aead_request *req)
req              1197 drivers/crypto/caam/caamalg_qi.c 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
req              1201 drivers/crypto/caam/caamalg_qi.c static int ipsec_gcm_decrypt(struct aead_request *req)
req              1203 drivers/crypto/caam/caamalg_qi.c 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
req              1210 drivers/crypto/caam/caamalg_qi.c 	struct skcipher_request *req = drv_req->app_ctx;
req              1211 drivers/crypto/caam/caamalg_qi.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1225 drivers/crypto/caam/caamalg_qi.c 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
req              1228 drivers/crypto/caam/caamalg_qi.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
req              1229 drivers/crypto/caam/caamalg_qi.c 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
req              1231 drivers/crypto/caam/caamalg_qi.c 	skcipher_unmap(qidev, edesc, req);
req              1239 drivers/crypto/caam/caamalg_qi.c 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
req              1243 drivers/crypto/caam/caamalg_qi.c 	skcipher_request_complete(req, ecode);
req              1246 drivers/crypto/caam/caamalg_qi.c static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
req              1249 drivers/crypto/caam/caamalg_qi.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1252 drivers/crypto/caam/caamalg_qi.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1267 drivers/crypto/caam/caamalg_qi.c 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
req              1270 drivers/crypto/caam/caamalg_qi.c 			req->cryptlen);
req              1274 drivers/crypto/caam/caamalg_qi.c 	if (unlikely(req->src != req->dst)) {
req              1275 drivers/crypto/caam/caamalg_qi.c 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
req              1278 drivers/crypto/caam/caamalg_qi.c 				req->cryptlen);
req              1282 drivers/crypto/caam/caamalg_qi.c 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
req              1289 drivers/crypto/caam/caamalg_qi.c 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
req              1293 drivers/crypto/caam/caamalg_qi.c 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
req              1297 drivers/crypto/caam/caamalg_qi.c 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
req              1316 drivers/crypto/caam/caamalg_qi.c 	if (req->src != req->dst)
req              1326 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
req              1335 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
req              1343 drivers/crypto/caam/caamalg_qi.c 	memcpy(iv, req->iv, ivsize);
req              1348 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
req              1358 drivers/crypto/caam/caamalg_qi.c 	edesc->drv_req.app_ctx = req;
req              1363 drivers/crypto/caam/caamalg_qi.c 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
req              1365 drivers/crypto/caam/caamalg_qi.c 	if (req->src != req->dst)
req              1366 drivers/crypto/caam/caamalg_qi.c 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
req              1375 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
req              1384 drivers/crypto/caam/caamalg_qi.c 				  ivsize + req->cryptlen, 0);
req              1386 drivers/crypto/caam/caamalg_qi.c 	if (req->src == req->dst)
req              1388 drivers/crypto/caam/caamalg_qi.c 				     sizeof(*sg_table), req->cryptlen + ivsize,
req              1392 drivers/crypto/caam/caamalg_qi.c 				     sizeof(*sg_table), req->cryptlen + ivsize,
req              1398 drivers/crypto/caam/caamalg_qi.c static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
req              1401 drivers/crypto/caam/caamalg_qi.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1405 drivers/crypto/caam/caamalg_qi.c 	if (!req->cryptlen)
req              1412 drivers/crypto/caam/caamalg_qi.c 	edesc = skcipher_edesc_alloc(req, encrypt);
req              1420 drivers/crypto/caam/caamalg_qi.c 		skcipher_unmap(ctx->qidev, edesc, req);
req              1427 drivers/crypto/caam/caamalg_qi.c static int skcipher_encrypt(struct skcipher_request *req)
req              1429 drivers/crypto/caam/caamalg_qi.c 	return skcipher_crypt(req, true);
req              1432 drivers/crypto/caam/caamalg_qi.c static int skcipher_decrypt(struct skcipher_request *req)
req              1434 drivers/crypto/caam/caamalg_qi.c 	return skcipher_crypt(req, false);
req               347 drivers/crypto/caam/caamalg_qi2.c static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
req               350 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               351 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *req_ctx = aead_request_ctx(req);
req               358 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               377 drivers/crypto/caam/caamalg_qi2.c 	if (unlikely(req->dst != req->src)) {
req               378 drivers/crypto/caam/caamalg_qi2.c 		src_len = req->assoclen + req->cryptlen;
req               381 drivers/crypto/caam/caamalg_qi2.c 		src_nents = sg_nents_for_len(req->src, src_len);
req               389 drivers/crypto/caam/caamalg_qi2.c 		dst_nents = sg_nents_for_len(req->dst, dst_len);
req               398 drivers/crypto/caam/caamalg_qi2.c 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
req               410 drivers/crypto/caam/caamalg_qi2.c 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
req               414 drivers/crypto/caam/caamalg_qi2.c 				dma_unmap_sg(dev, req->src, src_nents,
req               423 drivers/crypto/caam/caamalg_qi2.c 		src_len = req->assoclen + req->cryptlen +
req               426 drivers/crypto/caam/caamalg_qi2.c 		src_nents = sg_nents_for_len(req->src, src_len);
req               434 drivers/crypto/caam/caamalg_qi2.c 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
req               461 drivers/crypto/caam/caamalg_qi2.c 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
req               474 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
req               484 drivers/crypto/caam/caamalg_qi2.c 		memcpy(iv, req->iv, ivsize);
req               489 drivers/crypto/caam/caamalg_qi2.c 			caam_unmap(dev, req->src, req->dst, src_nents,
req               506 drivers/crypto/caam/caamalg_qi2.c 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
req               508 drivers/crypto/caam/caamalg_qi2.c 		edesc->assoclen = cpu_to_caam32(req->assoclen);
req               513 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
req               525 drivers/crypto/caam/caamalg_qi2.c 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
req               529 drivers/crypto/caam/caamalg_qi2.c 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
req               535 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
req               544 drivers/crypto/caam/caamalg_qi2.c 	out_len = req->assoclen + req->cryptlen +
req               546 drivers/crypto/caam/caamalg_qi2.c 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
req               554 drivers/crypto/caam/caamalg_qi2.c 	if (req->dst == req->src) {
req               557 drivers/crypto/caam/caamalg_qi2.c 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
req               573 drivers/crypto/caam/caamalg_qi2.c 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
req              1116 drivers/crypto/caam/caamalg_qi2.c static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
req              1118 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1119 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *req_ctx = skcipher_request_ctx(req);
req              1124 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1134 drivers/crypto/caam/caamalg_qi2.c 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
req              1137 drivers/crypto/caam/caamalg_qi2.c 			req->cryptlen);
req              1141 drivers/crypto/caam/caamalg_qi2.c 	if (unlikely(req->dst != req->src)) {
req              1142 drivers/crypto/caam/caamalg_qi2.c 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
req              1145 drivers/crypto/caam/caamalg_qi2.c 				req->cryptlen);
req              1149 drivers/crypto/caam/caamalg_qi2.c 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
req              1156 drivers/crypto/caam/caamalg_qi2.c 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
req              1160 drivers/crypto/caam/caamalg_qi2.c 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
req              1164 drivers/crypto/caam/caamalg_qi2.c 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
req              1183 drivers/crypto/caam/caamalg_qi2.c 	if (req->src != req->dst)
req              1193 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
req              1202 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
req              1210 drivers/crypto/caam/caamalg_qi2.c 	memcpy(iv, req->iv, ivsize);
req              1215 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
req              1227 drivers/crypto/caam/caamalg_qi2.c 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
req              1229 drivers/crypto/caam/caamalg_qi2.c 	if (req->src != req->dst)
req              1230 drivers/crypto/caam/caamalg_qi2.c 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
req              1239 drivers/crypto/caam/caamalg_qi2.c 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
req              1247 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
req              1248 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
req              1255 drivers/crypto/caam/caamalg_qi2.c 	if (req->src == req->dst)
req              1266 drivers/crypto/caam/caamalg_qi2.c 		       struct aead_request *req)
req              1268 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1271 drivers/crypto/caam/caamalg_qi2.c 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
req              1278 drivers/crypto/caam/caamalg_qi2.c 			   struct skcipher_request *req)
req              1280 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1283 drivers/crypto/caam/caamalg_qi2.c 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
req              1291 drivers/crypto/caam/caamalg_qi2.c 	struct aead_request *req = container_of(areq, struct aead_request,
req              1295 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1304 drivers/crypto/caam/caamalg_qi2.c 	aead_unmap(ctx->dev, edesc, req);
req              1306 drivers/crypto/caam/caamalg_qi2.c 	aead_request_complete(req, ecode);
req              1312 drivers/crypto/caam/caamalg_qi2.c 	struct aead_request *req = container_of(areq, struct aead_request,
req              1316 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1325 drivers/crypto/caam/caamalg_qi2.c 	aead_unmap(ctx->dev, edesc, req);
req              1327 drivers/crypto/caam/caamalg_qi2.c 	aead_request_complete(req, ecode);
req              1330 drivers/crypto/caam/caamalg_qi2.c static int aead_encrypt(struct aead_request *req)
req              1333 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1335 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *caam_req = aead_request_ctx(req);
req              1339 drivers/crypto/caam/caamalg_qi2.c 	edesc = aead_edesc_alloc(req, true);
req              1346 drivers/crypto/caam/caamalg_qi2.c 	caam_req->ctx = &req->base;
req              1350 drivers/crypto/caam/caamalg_qi2.c 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1351 drivers/crypto/caam/caamalg_qi2.c 		aead_unmap(ctx->dev, edesc, req);
req              1358 drivers/crypto/caam/caamalg_qi2.c static int aead_decrypt(struct aead_request *req)
req              1361 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              1363 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *caam_req = aead_request_ctx(req);
req              1367 drivers/crypto/caam/caamalg_qi2.c 	edesc = aead_edesc_alloc(req, false);
req              1374 drivers/crypto/caam/caamalg_qi2.c 	caam_req->ctx = &req->base;
req              1378 drivers/crypto/caam/caamalg_qi2.c 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1379 drivers/crypto/caam/caamalg_qi2.c 		aead_unmap(ctx->dev, edesc, req);
req              1386 drivers/crypto/caam/caamalg_qi2.c static int ipsec_gcm_encrypt(struct aead_request *req)
req              1388 drivers/crypto/caam/caamalg_qi2.c 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
req              1391 drivers/crypto/caam/caamalg_qi2.c static int ipsec_gcm_decrypt(struct aead_request *req)
req              1393 drivers/crypto/caam/caamalg_qi2.c 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
req              1399 drivers/crypto/caam/caamalg_qi2.c 	struct skcipher_request *req = skcipher_request_cast(areq);
req              1401 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1413 drivers/crypto/caam/caamalg_qi2.c 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
req              1416 drivers/crypto/caam/caamalg_qi2.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
req              1417 drivers/crypto/caam/caamalg_qi2.c 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
req              1419 drivers/crypto/caam/caamalg_qi2.c 	skcipher_unmap(ctx->dev, edesc, req);
req              1427 drivers/crypto/caam/caamalg_qi2.c 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
req              1431 drivers/crypto/caam/caamalg_qi2.c 	skcipher_request_complete(req, ecode);
req              1437 drivers/crypto/caam/caamalg_qi2.c 	struct skcipher_request *req = skcipher_request_cast(areq);
req              1439 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1451 drivers/crypto/caam/caamalg_qi2.c 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
req              1454 drivers/crypto/caam/caamalg_qi2.c 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
req              1455 drivers/crypto/caam/caamalg_qi2.c 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
req              1457 drivers/crypto/caam/caamalg_qi2.c 	skcipher_unmap(ctx->dev, edesc, req);
req              1465 drivers/crypto/caam/caamalg_qi2.c 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
req              1469 drivers/crypto/caam/caamalg_qi2.c 	skcipher_request_complete(req, ecode);
req              1472 drivers/crypto/caam/caamalg_qi2.c static int skcipher_encrypt(struct skcipher_request *req)
req              1475 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1477 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *caam_req = skcipher_request_ctx(req);
req              1480 drivers/crypto/caam/caamalg_qi2.c 	if (!req->cryptlen)
req              1484 drivers/crypto/caam/caamalg_qi2.c 	edesc = skcipher_edesc_alloc(req);
req              1491 drivers/crypto/caam/caamalg_qi2.c 	caam_req->ctx = &req->base;
req              1495 drivers/crypto/caam/caamalg_qi2.c 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1496 drivers/crypto/caam/caamalg_qi2.c 		skcipher_unmap(ctx->dev, edesc, req);
req              1503 drivers/crypto/caam/caamalg_qi2.c static int skcipher_decrypt(struct skcipher_request *req)
req              1506 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req              1508 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *caam_req = skcipher_request_ctx(req);
req              1511 drivers/crypto/caam/caamalg_qi2.c 	if (!req->cryptlen)
req              1514 drivers/crypto/caam/caamalg_qi2.c 	edesc = skcipher_edesc_alloc(req);
req              1521 drivers/crypto/caam/caamalg_qi2.c 	caam_req->ctx = &req->base;
req              1525 drivers/crypto/caam/caamalg_qi2.c 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1526 drivers/crypto/caam/caamalg_qi2.c 		skcipher_unmap(ctx->dev, edesc, req);
req              3006 drivers/crypto/caam/caamalg_qi2.c 	int (*update)(struct ahash_request *req);
req              3007 drivers/crypto/caam/caamalg_qi2.c 	int (*final)(struct ahash_request *req);
req              3008 drivers/crypto/caam/caamalg_qi2.c 	int (*finup)(struct ahash_request *req);
req              3016 drivers/crypto/caam/caamalg_qi2.c 	int (*update)(struct ahash_request *req);
req              3017 drivers/crypto/caam/caamalg_qi2.c 	int (*final)(struct ahash_request *req);
req              3018 drivers/crypto/caam/caamalg_qi2.c 	int (*finup)(struct ahash_request *req);
req              3312 drivers/crypto/caam/caamalg_qi2.c 			       struct ahash_request *req)
req              3314 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3317 drivers/crypto/caam/caamalg_qi2.c 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
req              3332 drivers/crypto/caam/caamalg_qi2.c 				   struct ahash_request *req, u32 flag)
req              3334 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3340 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap(dev, edesc, req);
req              3346 drivers/crypto/caam/caamalg_qi2.c 	struct ahash_request *req = ahash_request_cast(areq);
req              3347 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3348 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3359 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
req              3360 drivers/crypto/caam/caamalg_qi2.c 	memcpy(req->result, state->caam_ctx, digestsize);
req              3367 drivers/crypto/caam/caamalg_qi2.c 	req->base.complete(&req->base, ecode);
req              3373 drivers/crypto/caam/caamalg_qi2.c 	struct ahash_request *req = ahash_request_cast(areq);
req              3374 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3375 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3385 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
req              3392 drivers/crypto/caam/caamalg_qi2.c 	if (req->result)
req              3394 drivers/crypto/caam/caamalg_qi2.c 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
req              3397 drivers/crypto/caam/caamalg_qi2.c 	req->base.complete(&req->base, ecode);
req              3403 drivers/crypto/caam/caamalg_qi2.c 	struct ahash_request *req = ahash_request_cast(areq);
req              3404 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3405 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3416 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
req              3417 drivers/crypto/caam/caamalg_qi2.c 	memcpy(req->result, state->caam_ctx, digestsize);
req              3424 drivers/crypto/caam/caamalg_qi2.c 	req->base.complete(&req->base, ecode);
req              3430 drivers/crypto/caam/caamalg_qi2.c 	struct ahash_request *req = ahash_request_cast(areq);
req              3431 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3432 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3442 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
req              3449 drivers/crypto/caam/caamalg_qi2.c 	if (req->result)
req              3451 drivers/crypto/caam/caamalg_qi2.c 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
req              3454 drivers/crypto/caam/caamalg_qi2.c 	req->base.complete(&req->base, ecode);
req              3457 drivers/crypto/caam/caamalg_qi2.c static int ahash_update_ctx(struct ahash_request *req)
req              3459 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3461 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3465 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              3471 drivers/crypto/caam/caamalg_qi2.c 	int in_len = *buflen + req->nbytes, to_hash;
req              3482 drivers/crypto/caam/caamalg_qi2.c 		int src_len = req->nbytes - *next_buflen;
req              3484 drivers/crypto/caam/caamalg_qi2.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              3491 drivers/crypto/caam/caamalg_qi2.c 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
req              3504 drivers/crypto/caam/caamalg_qi2.c 			dma_unmap_sg(ctx->dev, req->src, src_nents,
req              3525 drivers/crypto/caam/caamalg_qi2.c 			sg_to_qm_sg_last(req->src, src_len,
req              3528 drivers/crypto/caam/caamalg_qi2.c 				scatterwalk_map_and_copy(next_buf, req->src,
req              3557 drivers/crypto/caam/caamalg_qi2.c 		req_ctx->ctx = &req->base;
req              3563 drivers/crypto/caam/caamalg_qi2.c 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              3566 drivers/crypto/caam/caamalg_qi2.c 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req              3567 drivers/crypto/caam/caamalg_qi2.c 					 req->nbytes, 0);
req              3580 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
req              3585 drivers/crypto/caam/caamalg_qi2.c static int ahash_final_ctx(struct ahash_request *req)
req              3587 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3589 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3593 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              3642 drivers/crypto/caam/caamalg_qi2.c 	req_ctx->ctx = &req->base;
req              3647 drivers/crypto/caam/caamalg_qi2.c 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              3651 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
req              3656 drivers/crypto/caam/caamalg_qi2.c static int ahash_finup_ctx(struct ahash_request *req)
req              3658 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3660 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3664 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              3674 drivers/crypto/caam/caamalg_qi2.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req              3681 drivers/crypto/caam/caamalg_qi2.c 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
req              3694 drivers/crypto/caam/caamalg_qi2.c 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
req              3713 drivers/crypto/caam/caamalg_qi2.c 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
req              3728 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
req              3736 drivers/crypto/caam/caamalg_qi2.c 	req_ctx->ctx = &req->base;
req              3741 drivers/crypto/caam/caamalg_qi2.c 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              3745 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
req              3750 drivers/crypto/caam/caamalg_qi2.c static int ahash_digest(struct ahash_request *req)
req              3752 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3754 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3758 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              3767 drivers/crypto/caam/caamalg_qi2.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req              3774 drivers/crypto/caam/caamalg_qi2.c 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
req              3787 drivers/crypto/caam/caamalg_qi2.c 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
req              3799 drivers/crypto/caam/caamalg_qi2.c 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
req              3811 drivers/crypto/caam/caamalg_qi2.c 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
req              3824 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fl_set_len(in_fle, req->nbytes);
req              3832 drivers/crypto/caam/caamalg_qi2.c 	req_ctx->ctx = &req->base;
req              3836 drivers/crypto/caam/caamalg_qi2.c 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              3840 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
req              3845 drivers/crypto/caam/caamalg_qi2.c static int ahash_final_no_ctx(struct ahash_request *req)
req              3847 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3849 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3853 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              3904 drivers/crypto/caam/caamalg_qi2.c 	req_ctx->ctx = &req->base;
req              3909 drivers/crypto/caam/caamalg_qi2.c 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              3913 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
req              3918 drivers/crypto/caam/caamalg_qi2.c static int ahash_update_no_ctx(struct ahash_request *req)
req              3920 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              3922 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              3926 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              3932 drivers/crypto/caam/caamalg_qi2.c 	int in_len = *buflen + req->nbytes, to_hash;
req              3942 drivers/crypto/caam/caamalg_qi2.c 		int src_len = req->nbytes - *next_buflen;
req              3944 drivers/crypto/caam/caamalg_qi2.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              3951 drivers/crypto/caam/caamalg_qi2.c 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
req              3964 drivers/crypto/caam/caamalg_qi2.c 			dma_unmap_sg(ctx->dev, req->src, src_nents,
req              3978 drivers/crypto/caam/caamalg_qi2.c 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
req              3981 drivers/crypto/caam/caamalg_qi2.c 			scatterwalk_map_and_copy(next_buf, req->src,
req              4016 drivers/crypto/caam/caamalg_qi2.c 		req_ctx->ctx = &req->base;
req              4022 drivers/crypto/caam/caamalg_qi2.c 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              4029 drivers/crypto/caam/caamalg_qi2.c 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req              4030 drivers/crypto/caam/caamalg_qi2.c 					 req->nbytes, 0);
req              4043 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
req              4048 drivers/crypto/caam/caamalg_qi2.c static int ahash_finup_no_ctx(struct ahash_request *req)
req              4050 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              4052 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4056 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              4065 drivers/crypto/caam/caamalg_qi2.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req              4072 drivers/crypto/caam/caamalg_qi2.c 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
req              4085 drivers/crypto/caam/caamalg_qi2.c 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
req              4097 drivers/crypto/caam/caamalg_qi2.c 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
req              4122 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
req              4130 drivers/crypto/caam/caamalg_qi2.c 	req_ctx->ctx = &req->base;
req              4134 drivers/crypto/caam/caamalg_qi2.c 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              4139 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
req              4144 drivers/crypto/caam/caamalg_qi2.c static int ahash_update_first(struct ahash_request *req)
req              4146 drivers/crypto/caam/caamalg_qi2.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              4148 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4152 drivers/crypto/caam/caamalg_qi2.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              4161 drivers/crypto/caam/caamalg_qi2.c 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
req              4163 drivers/crypto/caam/caamalg_qi2.c 	to_hash = req->nbytes - *next_buflen;
req              4167 drivers/crypto/caam/caamalg_qi2.c 		int src_len = req->nbytes - *next_buflen;
req              4169 drivers/crypto/caam/caamalg_qi2.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              4176 drivers/crypto/caam/caamalg_qi2.c 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
req              4189 drivers/crypto/caam/caamalg_qi2.c 			dma_unmap_sg(ctx->dev, req->src, src_nents,
req              4204 drivers/crypto/caam/caamalg_qi2.c 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
req              4220 drivers/crypto/caam/caamalg_qi2.c 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
req              4224 drivers/crypto/caam/caamalg_qi2.c 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
req              4244 drivers/crypto/caam/caamalg_qi2.c 		req_ctx->ctx = &req->base;
req              4249 drivers/crypto/caam/caamalg_qi2.c 		    !(ret == -EBUSY && req->base.flags &
req              4260 drivers/crypto/caam/caamalg_qi2.c 		scatterwalk_map_and_copy(next_buf, req->src, 0,
req              4261 drivers/crypto/caam/caamalg_qi2.c 					 req->nbytes, 0);
req              4271 drivers/crypto/caam/caamalg_qi2.c 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
req              4276 drivers/crypto/caam/caamalg_qi2.c static int ahash_finup_first(struct ahash_request *req)
req              4278 drivers/crypto/caam/caamalg_qi2.c 	return ahash_digest(req);
req              4281 drivers/crypto/caam/caamalg_qi2.c static int ahash_init(struct ahash_request *req)
req              4283 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4299 drivers/crypto/caam/caamalg_qi2.c static int ahash_update(struct ahash_request *req)
req              4301 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4303 drivers/crypto/caam/caamalg_qi2.c 	return state->update(req);
req              4306 drivers/crypto/caam/caamalg_qi2.c static int ahash_finup(struct ahash_request *req)
req              4308 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4310 drivers/crypto/caam/caamalg_qi2.c 	return state->finup(req);
req              4313 drivers/crypto/caam/caamalg_qi2.c static int ahash_final(struct ahash_request *req)
req              4315 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4317 drivers/crypto/caam/caamalg_qi2.c 	return state->final(req);
req              4320 drivers/crypto/caam/caamalg_qi2.c static int ahash_export(struct ahash_request *req, void *out)
req              4322 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4345 drivers/crypto/caam/caamalg_qi2.c static int ahash_import(struct ahash_request *req, const void *in)
req              4347 drivers/crypto/caam/caamalg_qi2.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              4776 drivers/crypto/caam/caamalg_qi2.c 	struct caam_request *req;
req              4792 drivers/crypto/caam/caamalg_qi2.c 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
req              4793 drivers/crypto/caam/caamalg_qi2.c 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
req              4795 drivers/crypto/caam/caamalg_qi2.c 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
req              5389 drivers/crypto/caam/caamalg_qi2.c int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
req              5396 drivers/crypto/caam/caamalg_qi2.c 	if (IS_ERR(req))
req              5397 drivers/crypto/caam/caamalg_qi2.c 		return PTR_ERR(req);
req              5409 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
req              5411 drivers/crypto/caam/caamalg_qi2.c 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
req              5413 drivers/crypto/caam/caamalg_qi2.c 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
req              5420 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
req              5421 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
req              5422 drivers/crypto/caam/caamalg_qi2.c 	dpaa2_fd_set_flc(&fd, req->flc_dma);
req              5442 drivers/crypto/caam/caamalg_qi2.c 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
req               196 drivers/crypto/caam/caamalg_qi2.h int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
req               115 drivers/crypto/caam/caamhash.c 	int (*update)(struct ahash_request *req);
req               116 drivers/crypto/caam/caamhash.c 	int (*final)(struct ahash_request *req);
req               117 drivers/crypto/caam/caamhash.c 	int (*finup)(struct ahash_request *req);
req               125 drivers/crypto/caam/caamhash.c 	int (*update)(struct ahash_request *req);
req               126 drivers/crypto/caam/caamhash.c 	int (*final)(struct ahash_request *req);
req               127 drivers/crypto/caam/caamhash.c 	int (*finup)(struct ahash_request *req);
req               569 drivers/crypto/caam/caamhash.c 			struct ahash_request *req, int dst_len)
req               571 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               574 drivers/crypto/caam/caamhash.c 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
req               589 drivers/crypto/caam/caamhash.c 			struct ahash_request *req, int dst_len, u32 flag)
req               591 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               597 drivers/crypto/caam/caamhash.c 	ahash_unmap(dev, edesc, req, dst_len);
req               603 drivers/crypto/caam/caamhash.c 	struct ahash_request *req = context;
req               605 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               607 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               617 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
req               618 drivers/crypto/caam/caamhash.c 	memcpy(req->result, state->caam_ctx, digestsize);
req               625 drivers/crypto/caam/caamhash.c 	req->base.complete(&req->base, ecode);
req               631 drivers/crypto/caam/caamhash.c 	struct ahash_request *req = context;
req               633 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               635 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               645 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
req               652 drivers/crypto/caam/caamhash.c 	if (req->result)
req               654 drivers/crypto/caam/caamhash.c 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
req               657 drivers/crypto/caam/caamhash.c 	req->base.complete(&req->base, ecode);
req               663 drivers/crypto/caam/caamhash.c 	struct ahash_request *req = context;
req               665 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               667 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               677 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
req               678 drivers/crypto/caam/caamhash.c 	memcpy(req->result, state->caam_ctx, digestsize);
req               685 drivers/crypto/caam/caamhash.c 	req->base.complete(&req->base, ecode);
req               691 drivers/crypto/caam/caamhash.c 	struct ahash_request *req = context;
req               693 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               695 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               705 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
req               712 drivers/crypto/caam/caamhash.c 	if (req->result)
req               714 drivers/crypto/caam/caamhash.c 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
req               717 drivers/crypto/caam/caamhash.c 	req->base.complete(&req->base, ecode);
req               746 drivers/crypto/caam/caamhash.c 			       struct ahash_request *req, int nents,
req               758 drivers/crypto/caam/caamhash.c 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
req               770 drivers/crypto/caam/caamhash.c 		src_dma = sg_dma_address(req->src);
req               781 drivers/crypto/caam/caamhash.c static int ahash_update_ctx(struct ahash_request *req)
req               783 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               785 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               787 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               794 drivers/crypto/caam/caamhash.c 	int in_len = *buflen + req->nbytes, to_hash;
req               817 drivers/crypto/caam/caamhash.c 		int src_len = req->nbytes - *next_buflen;
req               819 drivers/crypto/caam/caamhash.c 		src_nents = sg_nents_for_len(req->src, src_len);
req               826 drivers/crypto/caam/caamhash.c 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
req               847 drivers/crypto/caam/caamhash.c 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req               864 drivers/crypto/caam/caamhash.c 			sg_to_sec4_sg_last(req->src, src_len,
req               872 drivers/crypto/caam/caamhash.c 			scatterwalk_map_and_copy(next_buf, req->src,
req               895 drivers/crypto/caam/caamhash.c 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
req               901 drivers/crypto/caam/caamhash.c 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req               902 drivers/crypto/caam/caamhash.c 					 req->nbytes, 0);
req               915 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
req               920 drivers/crypto/caam/caamhash.c static int ahash_final_ctx(struct ahash_request *req)
req               922 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               924 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               926 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               975 drivers/crypto/caam/caamhash.c 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
req               981 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
req               986 drivers/crypto/caam/caamhash.c static int ahash_finup_ctx(struct ahash_request *req)
req               988 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               990 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req               992 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1002 drivers/crypto/caam/caamhash.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req              1009 drivers/crypto/caam/caamhash.c 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1026 drivers/crypto/caam/caamhash.c 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req              1043 drivers/crypto/caam/caamhash.c 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
req              1045 drivers/crypto/caam/caamhash.c 				  req->nbytes);
req              1055 drivers/crypto/caam/caamhash.c 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
req              1061 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
req              1066 drivers/crypto/caam/caamhash.c static int ahash_digest(struct ahash_request *req)
req              1068 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1070 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1072 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1082 drivers/crypto/caam/caamhash.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req              1089 drivers/crypto/caam/caamhash.c 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1104 drivers/crypto/caam/caamhash.c 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req              1110 drivers/crypto/caam/caamhash.c 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
req              1111 drivers/crypto/caam/caamhash.c 				  req->nbytes);
req              1113 drivers/crypto/caam/caamhash.c 		ahash_unmap(jrdev, edesc, req, digestsize);
req              1122 drivers/crypto/caam/caamhash.c 		ahash_unmap(jrdev, edesc, req, digestsize);
req              1131 drivers/crypto/caam/caamhash.c 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
req              1135 drivers/crypto/caam/caamhash.c 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
req              1143 drivers/crypto/caam/caamhash.c static int ahash_final_no_ctx(struct ahash_request *req)
req              1145 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1147 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1149 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1185 drivers/crypto/caam/caamhash.c 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
req              1189 drivers/crypto/caam/caamhash.c 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
req              1195 drivers/crypto/caam/caamhash.c 	ahash_unmap(jrdev, edesc, req, digestsize);
req              1202 drivers/crypto/caam/caamhash.c static int ahash_update_no_ctx(struct ahash_request *req)
req              1204 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1206 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1208 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1215 drivers/crypto/caam/caamhash.c 	int in_len = *buflen + req->nbytes, to_hash;
req              1237 drivers/crypto/caam/caamhash.c 		int src_len = req->nbytes - *next_buflen;
req              1239 drivers/crypto/caam/caamhash.c 		src_nents = sg_nents_for_len(req->src, src_len);
req              1246 drivers/crypto/caam/caamhash.c 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1268 drivers/crypto/caam/caamhash.c 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req              1279 drivers/crypto/caam/caamhash.c 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
req              1282 drivers/crypto/caam/caamhash.c 			scatterwalk_map_and_copy(next_buf, req->src,
req              1308 drivers/crypto/caam/caamhash.c 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
req              1317 drivers/crypto/caam/caamhash.c 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req              1318 drivers/crypto/caam/caamhash.c 					 req->nbytes, 0);
req              1331 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
req              1337 drivers/crypto/caam/caamhash.c static int ahash_finup_no_ctx(struct ahash_request *req)
req              1339 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1341 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1343 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1352 drivers/crypto/caam/caamhash.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req              1359 drivers/crypto/caam/caamhash.c 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1378 drivers/crypto/caam/caamhash.c 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req              1391 drivers/crypto/caam/caamhash.c 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
req              1392 drivers/crypto/caam/caamhash.c 				  req->nbytes);
req              1406 drivers/crypto/caam/caamhash.c 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
req              1410 drivers/crypto/caam/caamhash.c 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
req              1416 drivers/crypto/caam/caamhash.c 	ahash_unmap(jrdev, edesc, req, digestsize);
req              1423 drivers/crypto/caam/caamhash.c static int ahash_update_first(struct ahash_request *req)
req              1425 drivers/crypto/caam/caamhash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1427 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1429 drivers/crypto/caam/caamhash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req              1440 drivers/crypto/caam/caamhash.c 	*next_buflen = req->nbytes & (blocksize - 1);
req              1441 drivers/crypto/caam/caamhash.c 	to_hash = req->nbytes - *next_buflen;
req              1455 drivers/crypto/caam/caamhash.c 		src_nents = sg_nents_for_len(req->src,
req              1456 drivers/crypto/caam/caamhash.c 					     req->nbytes - *next_buflen);
req              1463 drivers/crypto/caam/caamhash.c 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
req              1483 drivers/crypto/caam/caamhash.c 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
req              1489 drivers/crypto/caam/caamhash.c 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
req              1495 drivers/crypto/caam/caamhash.c 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
req              1508 drivers/crypto/caam/caamhash.c 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
req              1520 drivers/crypto/caam/caamhash.c 		scatterwalk_map_and_copy(next_buf, req->src, 0,
req              1521 drivers/crypto/caam/caamhash.c 					 req->nbytes, 0);
req              1531 drivers/crypto/caam/caamhash.c 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
req              1536 drivers/crypto/caam/caamhash.c static int ahash_finup_first(struct ahash_request *req)
req              1538 drivers/crypto/caam/caamhash.c 	return ahash_digest(req);
req              1541 drivers/crypto/caam/caamhash.c static int ahash_init(struct ahash_request *req)
req              1543 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1559 drivers/crypto/caam/caamhash.c static int ahash_update(struct ahash_request *req)
req              1561 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1563 drivers/crypto/caam/caamhash.c 	return state->update(req);
req              1566 drivers/crypto/caam/caamhash.c static int ahash_finup(struct ahash_request *req)
req              1568 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1570 drivers/crypto/caam/caamhash.c 	return state->finup(req);
req              1573 drivers/crypto/caam/caamhash.c static int ahash_final(struct ahash_request *req)
req              1575 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1577 drivers/crypto/caam/caamhash.c 	return state->final(req);
req              1580 drivers/crypto/caam/caamhash.c static int ahash_export(struct ahash_request *req, void *out)
req              1582 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req              1605 drivers/crypto/caam/caamhash.c static int ahash_import(struct ahash_request *req, const void *in)
req              1607 drivers/crypto/caam/caamhash.c 	struct caam_hash_state *state = ahash_request_ctx(req);
req                44 drivers/crypto/caam/caampkc.c 			 struct akcipher_request *req)
req                46 drivers/crypto/caam/caampkc.c 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
req                48 drivers/crypto/caam/caampkc.c 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
req                57 drivers/crypto/caam/caampkc.c 			  struct akcipher_request *req)
req                59 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req                69 drivers/crypto/caam/caampkc.c 			      struct akcipher_request *req)
req                71 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req                81 drivers/crypto/caam/caampkc.c 			      struct akcipher_request *req)
req                83 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req                98 drivers/crypto/caam/caampkc.c 			      struct akcipher_request *req)
req               100 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               119 drivers/crypto/caam/caampkc.c 	struct akcipher_request *req = context;
req               128 drivers/crypto/caam/caampkc.c 	rsa_pub_unmap(dev, edesc, req);
req               129 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(dev, edesc, req);
req               132 drivers/crypto/caam/caampkc.c 	akcipher_request_complete(req, ecode);
req               138 drivers/crypto/caam/caampkc.c 	struct akcipher_request *req = context;
req               147 drivers/crypto/caam/caampkc.c 	rsa_priv_f1_unmap(dev, edesc, req);
req               148 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(dev, edesc, req);
req               151 drivers/crypto/caam/caampkc.c 	akcipher_request_complete(req, ecode);
req               157 drivers/crypto/caam/caampkc.c 	struct akcipher_request *req = context;
req               166 drivers/crypto/caam/caampkc.c 	rsa_priv_f2_unmap(dev, edesc, req);
req               167 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(dev, edesc, req);
req               170 drivers/crypto/caam/caampkc.c 	akcipher_request_complete(req, ecode);
req               176 drivers/crypto/caam/caampkc.c 	struct akcipher_request *req = context;
req               185 drivers/crypto/caam/caampkc.c 	rsa_priv_f3_unmap(dev, edesc, req);
req               186 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(dev, edesc, req);
req               189 drivers/crypto/caam/caampkc.c 	akcipher_request_complete(req, ecode);
req               243 drivers/crypto/caam/caampkc.c static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
req               246 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               249 drivers/crypto/caam/caampkc.c 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
req               252 drivers/crypto/caam/caampkc.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               261 drivers/crypto/caam/caampkc.c 	if (req->src_len > key->n_sz) {
req               266 drivers/crypto/caam/caampkc.c 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
req               271 drivers/crypto/caam/caampkc.c 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
req               273 drivers/crypto/caam/caampkc.c 		req_ctx->fixup_src_len = req->src_len - lzeros;
req               279 drivers/crypto/caam/caampkc.c 		diff_size = key->n_sz - req->src_len;
req               280 drivers/crypto/caam/caampkc.c 		req_ctx->fixup_src = req->src;
req               281 drivers/crypto/caam/caampkc.c 		req_ctx->fixup_src_len = req->src_len;
req               286 drivers/crypto/caam/caampkc.c 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
req               312 drivers/crypto/caam/caampkc.c 	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
req               328 drivers/crypto/caam/caampkc.c 		sg_to_sec4_sg_last(req->dst, req->dst_len,
req               354 drivers/crypto/caam/caampkc.c 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
req               362 drivers/crypto/caam/caampkc.c static int set_rsa_pub_pdb(struct akcipher_request *req,
req               365 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               366 drivers/crypto/caam/caampkc.c 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
req               399 drivers/crypto/caam/caampkc.c 		pdb->g_dma = sg_dma_address(req->dst);
req               408 drivers/crypto/caam/caampkc.c static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
req               411 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               436 drivers/crypto/caam/caampkc.c 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
req               446 drivers/crypto/caam/caampkc.c 		pdb->f_dma = sg_dma_address(req->dst);
req               454 drivers/crypto/caam/caampkc.c static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
req               457 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               501 drivers/crypto/caam/caampkc.c 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
req               511 drivers/crypto/caam/caampkc.c 		pdb->f_dma = sg_dma_address(req->dst);
req               531 drivers/crypto/caam/caampkc.c static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
req               534 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               590 drivers/crypto/caam/caampkc.c 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
req               600 drivers/crypto/caam/caampkc.c 		pdb->f_dma = sg_dma_address(req->dst);
req               624 drivers/crypto/caam/caampkc.c static int caam_rsa_enc(struct akcipher_request *req)
req               626 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               636 drivers/crypto/caam/caampkc.c 	if (req->dst_len < key->n_sz) {
req               637 drivers/crypto/caam/caampkc.c 		req->dst_len = key->n_sz;
req               643 drivers/crypto/caam/caampkc.c 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
req               648 drivers/crypto/caam/caampkc.c 	ret = set_rsa_pub_pdb(req, edesc);
req               655 drivers/crypto/caam/caampkc.c 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
req               659 drivers/crypto/caam/caampkc.c 	rsa_pub_unmap(jrdev, edesc, req);
req               662 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(jrdev, edesc, req);
req               667 drivers/crypto/caam/caampkc.c static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
req               669 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               676 drivers/crypto/caam/caampkc.c 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
req               681 drivers/crypto/caam/caampkc.c 	ret = set_rsa_priv_f1_pdb(req, edesc);
req               688 drivers/crypto/caam/caampkc.c 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
req               692 drivers/crypto/caam/caampkc.c 	rsa_priv_f1_unmap(jrdev, edesc, req);
req               695 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(jrdev, edesc, req);
req               700 drivers/crypto/caam/caampkc.c static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
req               702 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               709 drivers/crypto/caam/caampkc.c 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
req               714 drivers/crypto/caam/caampkc.c 	ret = set_rsa_priv_f2_pdb(req, edesc);
req               721 drivers/crypto/caam/caampkc.c 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
req               725 drivers/crypto/caam/caampkc.c 	rsa_priv_f2_unmap(jrdev, edesc, req);
req               728 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(jrdev, edesc, req);
req               733 drivers/crypto/caam/caampkc.c static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
req               735 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               742 drivers/crypto/caam/caampkc.c 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
req               747 drivers/crypto/caam/caampkc.c 	ret = set_rsa_priv_f3_pdb(req, edesc);
req               754 drivers/crypto/caam/caampkc.c 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
req               758 drivers/crypto/caam/caampkc.c 	rsa_priv_f3_unmap(jrdev, edesc, req);
req               761 drivers/crypto/caam/caampkc.c 	rsa_io_unmap(jrdev, edesc, req);
req               766 drivers/crypto/caam/caampkc.c static int caam_rsa_dec(struct akcipher_request *req)
req               768 drivers/crypto/caam/caampkc.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               776 drivers/crypto/caam/caampkc.c 	if (req->dst_len < key->n_sz) {
req               777 drivers/crypto/caam/caampkc.c 		req->dst_len = key->n_sz;
req               783 drivers/crypto/caam/caampkc.c 		ret = caam_rsa_dec_priv_f3(req);
req               785 drivers/crypto/caam/caampkc.c 		ret = caam_rsa_dec_priv_f2(req);
req               787 drivers/crypto/caam/caampkc.c 		ret = caam_rsa_dec_priv_f1(req);
req               107 drivers/crypto/caam/qi.c int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
req               115 drivers/crypto/caam/qi.c 	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
req               117 drivers/crypto/caam/qi.c 	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
req               126 drivers/crypto/caam/qi.c 		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
req               120 drivers/crypto/caam/qi.h int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
req                29 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct crypto_async_request *req = (struct crypto_async_request *)arg;
req                31 drivers/crypto/cavium/cpt/cptvf_algs.c 	req->complete(req, !status);
req                41 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.dlen += enc_iv_len;
req                62 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.dlen += nbytes;
req                95 drivers/crypto/cavium/cpt/cptvf_algs.c static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
req                98 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               100 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
req               111 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.opcode.s.major = MAJOR_OP_FC |
req               114 drivers/crypto/cavium/cpt/cptvf_algs.c 		req_info->req.opcode.s.minor = 2;
req               116 drivers/crypto/cavium/cpt/cptvf_algs.c 		req_info->req.opcode.s.minor = 3;
req               118 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.param1 = req->nbytes; /* Encryption Data length */
req               119 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.param2 = 0; /*Auth data length */
req               138 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.dlen += CONTROL_WORD_LEN;
req               143 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->req.dlen += sizeof(struct fc_context);
req               150 drivers/crypto/cavium/cpt/cptvf_algs.c static inline u32 create_input_list(struct ablkcipher_request  *req, u32 enc,
req               153 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
req               157 drivers/crypto/cavium/cpt/cptvf_algs.c 	create_ctx_hdr(req, enc, &argcnt);
req               158 drivers/crypto/cavium/cpt/cptvf_algs.c 	update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
req               159 drivers/crypto/cavium/cpt/cptvf_algs.c 	update_input_data(req_info, req->src, req->nbytes, &argcnt);
req               165 drivers/crypto/cavium/cpt/cptvf_algs.c static inline void store_cb_info(struct ablkcipher_request *req,
req               169 drivers/crypto/cavium/cpt/cptvf_algs.c 	req_info->callback_arg = (void *)&req->base;
req               172 drivers/crypto/cavium/cpt/cptvf_algs.c static inline void create_output_list(struct ablkcipher_request *req,
req               175 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
req               187 drivers/crypto/cavium/cpt/cptvf_algs.c 	update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
req               188 drivers/crypto/cavium/cpt/cptvf_algs.c 	update_output_data(req_info, req->dst, req->nbytes, &argcnt);
req               192 drivers/crypto/cavium/cpt/cptvf_algs.c static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
req               194 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               195 drivers/crypto/cavium/cpt/cptvf_algs.c 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
req               204 drivers/crypto/cavium/cpt/cptvf_algs.c 	create_input_list(req, enc, enc_iv_len);
req               205 drivers/crypto/cavium/cpt/cptvf_algs.c 	create_output_list(req, enc_iv_len);
req               206 drivers/crypto/cavium/cpt/cptvf_algs.c 	store_cb_info(req, req_info);
req               220 drivers/crypto/cavium/cpt/cptvf_algs.c static int cvm_encrypt(struct ablkcipher_request *req)
req               222 drivers/crypto/cavium/cpt/cptvf_algs.c 	return cvm_enc_dec(req, true);
req               225 drivers/crypto/cavium/cpt/cptvf_algs.c static int cvm_decrypt(struct ablkcipher_request *req)
req               227 drivers/crypto/cavium/cpt/cptvf_algs.c 	return cvm_enc_dec(req, false);
req               116 drivers/crypto/cavium/cpt/cptvf_algs.h int cptvf_do_request(void *cptvf, struct cpt_request_info *req);
req               122 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 				  struct cpt_request_info *req)
req               128 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) {
req               135 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
req               142 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	ret = setup_sgio_components(cptvf, req->in,
req               143 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 				    req->incnt,
req               152 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
req               159 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	ret = setup_sgio_components(cptvf, req->out,
req               160 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 				    req->outcnt,
req               176 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	((u16 *)info->in_buffer)[0] = req->outcnt;
req               177 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	((u16 *)info->in_buffer)[1] = req->incnt;
req               275 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	struct cpt_request_info *req;
req               289 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	if (info->req) {
req               290 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		req = info->req;
req               291 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		for (i = 0; i < req->outcnt; i++) {
req               292 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 			if (req->out[i].dma_addr)
req               294 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 						 req->out[i].dma_addr,
req               295 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 						 req->out[i].size,
req               299 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		for (i = 0; i < req->incnt; i++) {
req               300 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 			if (req->in[i].dma_addr)
req               302 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 						 req->in[i].dma_addr,
req               303 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 						 req->in[i].size,
req               410 drivers/crypto/cavium/cpt/cptvf_reqmanager.c int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
req               430 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	cpt_req = (struct cptvf_request *)&req->req;
req               431 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	ctrl = (union ctrl_info *)&req->ctrl;
req               435 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	ret = setup_sgio_list(cptvf, info, req);
req               508 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	pentry->callback = req->callback;
req               509 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	pentry->callback_arg = req->callback_arg;
req               517 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	info->req = req;
req               560 drivers/crypto/cavium/cpt/cptvf_reqmanager.c int cptvf_do_request(void *vfdev, struct cpt_request_info *req)
req               570 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) {
req               574 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	} else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) {
req               580 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	return process_request(cptvf, req);
req                63 drivers/crypto/cavium/cpt/request_manager.h 	struct cptvf_request req; /* Request Information (Core specific) */
req                93 drivers/crypto/cavium/cpt/request_manager.h 	struct cpt_request_info *req;
req               143 drivers/crypto/cavium/cpt/request_manager.h int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req);
req                24 drivers/crypto/cavium/nitrox/nitrox_common.h 			      struct se_crypto_request *req,
req               157 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 			  struct se_crypto_request *req)
req               160 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	struct scatterlist *sg = req->src;
req               163 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	nents = dma_map_sg(dev, req->src, sg_nents(req->src),
req               168 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	for_each_sg(req->src, sg, nents, i)
req               171 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->in.sg = req->src;
req               180 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
req               186 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 			   struct se_crypto_request *req)
req               191 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
req               196 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->out.sg = req->dst;
req               205 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
req               377 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 			      struct se_crypto_request *req,
req               388 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr = kzalloc(sizeof(*sr), req->gfp);
req               393 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->flags = req->flags;
req               394 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->gfp = req->gfp;
req               400 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->resp.orh = req->orh;
req               401 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->resp.completion = req->comp;
req               403 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	ret = softreq_map_iobuf(sr, req);
req               410 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	if (req->ctx_handle) {
req               414 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
req               457 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
req               460 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
req               461 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->instr.irh.s.arg = req->ctrl.s.arg;
req               462 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->instr.irh.s.opcode = req->opcode;
req               481 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->instr.fdata[0] = *((u64 *)&req->gph);
req                26 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct ahash_request *req = ahash_request_cast(async_req);
req                27 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                28 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
req                46 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	if (req->result && rctx->final)
req                47 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 		memcpy(req->result, rctx->iv, digest_size);
req                55 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
req                58 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                60 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
req                78 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
req                85 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	rctx->src = req->src;
req               107 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
req               108 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
req               125 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 		sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
req               173 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req               183 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_init(struct ahash_request *req)
req               185 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
req               194 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_update(struct ahash_request *req)
req               196 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	return ccp_do_cmac_update(req, req->nbytes, 0);
req               199 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_final(struct ahash_request *req)
req               201 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	return ccp_do_cmac_update(req, 0, 1);
req               204 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_finup(struct ahash_request *req)
req               206 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	return ccp_do_cmac_update(req, req->nbytes, 1);
req               209 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_digest(struct ahash_request *req)
req               213 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	ret = ccp_aes_cmac_init(req);
req               217 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	return ccp_aes_cmac_finup(req);
req               220 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
req               222 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
req               239 drivers/crypto/ccp/ccp-crypto-aes-cmac.c static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
req               241 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
req                77 drivers/crypto/ccp/ccp-crypto-aes-galois.c static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
req                79 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req                81 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
req                93 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	if (!req->iv)
req               106 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
req               128 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	rctx->cmd.u.aes.src = req->src;
req               129 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	rctx->cmd.u.aes.src_len = req->cryptlen;
req               130 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	rctx->cmd.u.aes.aad_len = req->assoclen;
req               133 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	rctx->cmd.u.aes.dst = req->dst;
req               135 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req               140 drivers/crypto/ccp/ccp-crypto-aes-galois.c static int ccp_aes_gcm_encrypt(struct aead_request *req)
req               142 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT);
req               145 drivers/crypto/ccp/ccp-crypto-aes-galois.c static int ccp_aes_gcm_decrypt(struct aead_request *req)
req               147 drivers/crypto/ccp/ccp-crypto-aes-galois.c 	return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT);
req                64 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req                65 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req                70 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
req               105 drivers/crypto/ccp/ccp-crypto-aes-xts.c static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
req               108 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               109 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req               119 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	if (!req->info)
req               130 drivers/crypto/ccp/ccp-crypto-aes-xts.c 		if (req->nbytes == xts_unit_sizes[unit].size) {
req               155 drivers/crypto/ccp/ccp-crypto-aes-xts.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               157 drivers/crypto/ccp/ccp-crypto-aes-xts.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               158 drivers/crypto/ccp/ccp-crypto-aes-xts.c 					   req->nbytes, req->info);
req               165 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
req               179 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	rctx->cmd.u.xts.src = req->src;
req               180 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	rctx->cmd.u.xts.src_len = req->nbytes;
req               181 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	rctx->cmd.u.xts.dst = req->dst;
req               183 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req               188 drivers/crypto/ccp/ccp-crypto-aes-xts.c static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
req               190 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	return ccp_aes_xts_crypt(req, 1);
req               193 drivers/crypto/ccp/ccp-crypto-aes-xts.c static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
req               195 drivers/crypto/ccp/ccp-crypto-aes-xts.c 	return ccp_aes_xts_crypt(req, 0);
req                24 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req                25 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req                26 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req                32 drivers/crypto/ccp/ccp-crypto-aes.c 		memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
req                67 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
req                69 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req                70 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req                80 drivers/crypto/ccp/ccp-crypto-aes.c 	    (req->nbytes & (AES_BLOCK_SIZE - 1)))
req                84 drivers/crypto/ccp/ccp-crypto-aes.c 		if (!req->info)
req                87 drivers/crypto/ccp/ccp-crypto-aes.c 		memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
req               104 drivers/crypto/ccp/ccp-crypto-aes.c 	rctx->cmd.u.aes.src = req->src;
req               105 drivers/crypto/ccp/ccp-crypto-aes.c 	rctx->cmd.u.aes.src_len = req->nbytes;
req               106 drivers/crypto/ccp/ccp-crypto-aes.c 	rctx->cmd.u.aes.dst = req->dst;
req               108 drivers/crypto/ccp/ccp-crypto-aes.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req               113 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_aes_encrypt(struct ablkcipher_request *req)
req               115 drivers/crypto/ccp/ccp-crypto-aes.c 	return ccp_aes_crypt(req, true);
req               118 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_aes_decrypt(struct ablkcipher_request *req)
req               120 drivers/crypto/ccp/ccp-crypto-aes.c 	return ccp_aes_crypt(req, false);
req               142 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req               143 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req               146 drivers/crypto/ccp/ccp-crypto-aes.c 	req->info = rctx->rfc3686_info;
req               165 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
req               167 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               168 drivers/crypto/ccp/ccp-crypto-aes.c 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req               176 drivers/crypto/ccp/ccp-crypto-aes.c 	memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
req               182 drivers/crypto/ccp/ccp-crypto-aes.c 	rctx->rfc3686_info = req->info;
req               183 drivers/crypto/ccp/ccp-crypto-aes.c 	req->info = rctx->rfc3686_iv;
req               185 drivers/crypto/ccp/ccp-crypto-aes.c 	return ccp_aes_crypt(req, encrypt);
req               188 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
req               190 drivers/crypto/ccp/ccp-crypto-aes.c 	return ccp_aes_rfc3686_crypt(req, true);
req               193 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
req               195 drivers/crypto/ccp/ccp-crypto-aes.c 	return ccp_aes_rfc3686_crypt(req, false);
req                23 drivers/crypto/ccp/ccp-crypto-des3.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req                24 drivers/crypto/ccp/ccp-crypto-des3.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req                25 drivers/crypto/ccp/ccp-crypto-des3.c 	struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
req                31 drivers/crypto/ccp/ccp-crypto-des3.c 		memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
req                61 drivers/crypto/ccp/ccp-crypto-des3.c static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
req                63 drivers/crypto/ccp/ccp-crypto-des3.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req                64 drivers/crypto/ccp/ccp-crypto-des3.c 	struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
req                74 drivers/crypto/ccp/ccp-crypto-des3.c 	    (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
req                78 drivers/crypto/ccp/ccp-crypto-des3.c 		if (!req->info)
req                81 drivers/crypto/ccp/ccp-crypto-des3.c 		memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
req                99 drivers/crypto/ccp/ccp-crypto-des3.c 	rctx->cmd.u.des3.src = req->src;
req               100 drivers/crypto/ccp/ccp-crypto-des3.c 	rctx->cmd.u.des3.src_len = req->nbytes;
req               101 drivers/crypto/ccp/ccp-crypto-des3.c 	rctx->cmd.u.des3.dst = req->dst;
req               103 drivers/crypto/ccp/ccp-crypto-des3.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req               108 drivers/crypto/ccp/ccp-crypto-des3.c static int ccp_des3_encrypt(struct ablkcipher_request *req)
req               110 drivers/crypto/ccp/ccp-crypto-des3.c 	return ccp_des3_crypt(req, true);
req               113 drivers/crypto/ccp/ccp-crypto-des3.c static int ccp_des3_decrypt(struct ablkcipher_request *req)
req               115 drivers/crypto/ccp/ccp-crypto-des3.c 	return ccp_des3_crypt(req, false);
req                74 drivers/crypto/ccp/ccp-crypto-main.c 	struct crypto_async_request *req;
req               148 drivers/crypto/ccp/ccp-crypto-main.c 	struct crypto_async_request *req = crypto_cmd->req;
req               149 drivers/crypto/ccp/ccp-crypto-main.c 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
req               156 drivers/crypto/ccp/ccp-crypto-main.c 			req->complete(req, -EINPROGRESS);
req               169 drivers/crypto/ccp/ccp-crypto-main.c 		backlog->req->complete(backlog->req, -EINPROGRESS);
req               174 drivers/crypto/ccp/ccp-crypto-main.c 		req->complete(req, -EINPROGRESS);
req               179 drivers/crypto/ccp/ccp-crypto-main.c 		ret = ctx->complete(req, ret);
req               180 drivers/crypto/ccp/ccp-crypto-main.c 	req->complete(req, ret);
req               193 drivers/crypto/ccp/ccp-crypto-main.c 		ctx = crypto_tfm_ctx(held->req->tfm);
req               195 drivers/crypto/ccp/ccp-crypto-main.c 			ret = ctx->complete(held->req, ret);
req               196 drivers/crypto/ccp/ccp-crypto-main.c 		held->req->complete(held->req, ret);
req               201 drivers/crypto/ccp/ccp-crypto-main.c 			backlog->req->complete(backlog->req, -EINPROGRESS);
req               274 drivers/crypto/ccp/ccp-crypto-main.c int ccp_crypto_enqueue_request(struct crypto_async_request *req,
req               280 drivers/crypto/ccp/ccp-crypto-main.c 	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
req               292 drivers/crypto/ccp/ccp-crypto-main.c 	crypto_cmd->req = req;
req               293 drivers/crypto/ccp/ccp-crypto-main.c 	crypto_cmd->tfm = req->tfm;
req               298 drivers/crypto/ccp/ccp-crypto-main.c 	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
req                23 drivers/crypto/ccp/ccp-crypto-rsa.c 	struct crypto_async_request *req)
req                25 drivers/crypto/ccp/ccp-crypto-rsa.c 	return container_of(req, struct akcipher_request, base);
req                46 drivers/crypto/ccp/ccp-crypto-rsa.c 	struct akcipher_request *req = akcipher_request_cast(async_req);
req                47 drivers/crypto/ccp/ccp-crypto-rsa.c 	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
req                52 drivers/crypto/ccp/ccp-crypto-rsa.c 	req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
req                64 drivers/crypto/ccp/ccp-crypto-rsa.c static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
req                66 drivers/crypto/ccp/ccp-crypto-rsa.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req                68 drivers/crypto/ccp/ccp-crypto-rsa.c 	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
req                85 drivers/crypto/ccp/ccp-crypto-rsa.c 	rctx->cmd.u.rsa.src = req->src;
req                86 drivers/crypto/ccp/ccp-crypto-rsa.c 	rctx->cmd.u.rsa.src_len = req->src_len;
req                87 drivers/crypto/ccp/ccp-crypto-rsa.c 	rctx->cmd.u.rsa.dst = req->dst;
req                89 drivers/crypto/ccp/ccp-crypto-rsa.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req                94 drivers/crypto/ccp/ccp-crypto-rsa.c static int ccp_rsa_encrypt(struct akcipher_request *req)
req                96 drivers/crypto/ccp/ccp-crypto-rsa.c 	return ccp_rsa_crypt(req, true);
req                99 drivers/crypto/ccp/ccp-crypto-rsa.c static int ccp_rsa_decrypt(struct akcipher_request *req)
req               101 drivers/crypto/ccp/ccp-crypto-rsa.c 	return ccp_rsa_crypt(req, false);
req                27 drivers/crypto/ccp/ccp-crypto-sha.c 	struct ahash_request *req = ahash_request_cast(async_req);
req                28 drivers/crypto/ccp/ccp-crypto-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                29 drivers/crypto/ccp/ccp-crypto-sha.c 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
req                47 drivers/crypto/ccp/ccp-crypto-sha.c 	if (req->result && rctx->final)
req                48 drivers/crypto/ccp/ccp-crypto-sha.c 		memcpy(req->result, rctx->ctx, digest_size);
req                56 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
req                59 drivers/crypto/ccp/ccp-crypto-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                61 drivers/crypto/ccp/ccp-crypto-sha.c 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
req                73 drivers/crypto/ccp/ccp-crypto-sha.c 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
req                80 drivers/crypto/ccp/ccp-crypto-sha.c 	rctx->src = req->src;
req               100 drivers/crypto/ccp/ccp-crypto-sha.c 		gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
req               102 drivers/crypto/ccp/ccp-crypto-sha.c 		sg_count = sg_nents(req->src) + 1;
req               113 drivers/crypto/ccp/ccp-crypto-sha.c 		sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
req               126 drivers/crypto/ccp/ccp-crypto-sha.c 		sg = req->src;
req               170 drivers/crypto/ccp/ccp-crypto-sha.c 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
req               180 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_init(struct ahash_request *req)
req               182 drivers/crypto/ccp/ccp-crypto-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               184 drivers/crypto/ccp/ccp-crypto-sha.c 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               204 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_update(struct ahash_request *req)
req               206 drivers/crypto/ccp/ccp-crypto-sha.c 	return ccp_do_sha_update(req, req->nbytes, 0);
req               209 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_final(struct ahash_request *req)
req               211 drivers/crypto/ccp/ccp-crypto-sha.c 	return ccp_do_sha_update(req, 0, 1);
req               214 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_finup(struct ahash_request *req)
req               216 drivers/crypto/ccp/ccp-crypto-sha.c 	return ccp_do_sha_update(req, req->nbytes, 1);
req               219 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_digest(struct ahash_request *req)
req               223 drivers/crypto/ccp/ccp-crypto-sha.c 	ret = ccp_sha_init(req);
req               227 drivers/crypto/ccp/ccp-crypto-sha.c 	return ccp_sha_finup(req);
req               230 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_export(struct ahash_request *req, void *out)
req               232 drivers/crypto/ccp/ccp-crypto-sha.c 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               251 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_sha_import(struct ahash_request *req, const void *in)
req               253 drivers/crypto/ccp/ccp-crypto-sha.c 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               259 drivers/crypto/ccp/ccp-crypto.h 	int (*complete)(struct crypto_async_request *req, int ret);
req               269 drivers/crypto/ccp/ccp-crypto.h int ccp_crypto_enqueue_request(struct crypto_async_request *req,
req                63 drivers/crypto/ccree/cc_aead.c static inline bool valid_assoclen(struct aead_request *req)
req                65 drivers/crypto/ccree/cc_aead.c 	return ((req->assoclen == 16) || (req->assoclen == 20));
req               880 drivers/crypto/ccree/cc_aead.c static void cc_proc_digest_desc(struct aead_request *req,
req               884 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               886 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req               930 drivers/crypto/ccree/cc_aead.c static void cc_set_cipher_desc(struct aead_request *req,
req               934 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               936 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req               975 drivers/crypto/ccree/cc_aead.c static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
req               978 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req               985 drivers/crypto/ccree/cc_aead.c 	cc_set_cipher_desc(req, desc, &idx);
req               986 drivers/crypto/ccree/cc_aead.c 	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
req               998 drivers/crypto/ccree/cc_aead.c static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
req              1001 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1031 drivers/crypto/ccree/cc_aead.c static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
req              1034 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1091 drivers/crypto/ccree/cc_aead.c static void cc_proc_header_desc(struct aead_request *req,
req              1095 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              1100 drivers/crypto/ccree/cc_aead.c 		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
req              1106 drivers/crypto/ccree/cc_aead.c static void cc_proc_scheme_desc(struct aead_request *req,
req              1110 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1168 drivers/crypto/ccree/cc_aead.c static void cc_mlli_to_sram(struct aead_request *req,
req              1171 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1172 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1220 drivers/crypto/ccree/cc_aead.c static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
req              1223 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1225 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1235 drivers/crypto/ccree/cc_aead.c 		cc_set_hmac_desc(req, desc, seq_size);
req              1236 drivers/crypto/ccree/cc_aead.c 		cc_set_cipher_desc(req, desc, seq_size);
req              1237 drivers/crypto/ccree/cc_aead.c 		cc_proc_header_desc(req, desc, seq_size);
req              1238 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
req              1239 drivers/crypto/ccree/cc_aead.c 		cc_proc_scheme_desc(req, desc, seq_size);
req              1240 drivers/crypto/ccree/cc_aead.c 		cc_proc_digest_desc(req, desc, seq_size);
req              1251 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
req              1253 drivers/crypto/ccree/cc_aead.c 		cc_set_hmac_desc(req, desc, seq_size);
req              1254 drivers/crypto/ccree/cc_aead.c 		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
req              1255 drivers/crypto/ccree/cc_aead.c 		cc_proc_scheme_desc(req, desc, seq_size);
req              1256 drivers/crypto/ccree/cc_aead.c 		cc_proc_digest_desc(req, desc, seq_size);
req              1260 drivers/crypto/ccree/cc_aead.c 		cc_set_hmac_desc(req, desc, seq_size);
req              1261 drivers/crypto/ccree/cc_aead.c 		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
req              1262 drivers/crypto/ccree/cc_aead.c 		cc_proc_scheme_desc(req, desc, seq_size);
req              1264 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
req              1268 drivers/crypto/ccree/cc_aead.c 		cc_proc_digest_desc(req, desc, seq_size);
req              1273 drivers/crypto/ccree/cc_aead.c cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
req              1276 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1278 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1288 drivers/crypto/ccree/cc_aead.c 		cc_set_xcbc_desc(req, desc, seq_size);
req              1289 drivers/crypto/ccree/cc_aead.c 		cc_set_cipher_desc(req, desc, seq_size);
req              1290 drivers/crypto/ccree/cc_aead.c 		cc_proc_header_desc(req, desc, seq_size);
req              1291 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
req              1292 drivers/crypto/ccree/cc_aead.c 		cc_proc_digest_desc(req, desc, seq_size);
req              1303 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
req              1305 drivers/crypto/ccree/cc_aead.c 		cc_set_xcbc_desc(req, desc, seq_size);
req              1306 drivers/crypto/ccree/cc_aead.c 		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
req              1307 drivers/crypto/ccree/cc_aead.c 		cc_proc_digest_desc(req, desc, seq_size);
req              1310 drivers/crypto/ccree/cc_aead.c 		cc_set_xcbc_desc(req, desc, seq_size);
req              1311 drivers/crypto/ccree/cc_aead.c 		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
req              1313 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
req              1317 drivers/crypto/ccree/cc_aead.c 		cc_proc_digest_desc(req, desc, seq_size);
req              1323 drivers/crypto/ccree/cc_aead.c 			      struct aead_request *req)
req              1325 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              1329 drivers/crypto/ccree/cc_aead.c 			(req->cryptlen - ctx->authsize) : req->cryptlen;
req              1332 drivers/crypto/ccree/cc_aead.c 	    req->cryptlen < ctx->authsize)
req              1419 drivers/crypto/ccree/cc_aead.c static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
req              1422 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1424 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1487 drivers/crypto/ccree/cc_aead.c 		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
req              1499 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
req              1541 drivers/crypto/ccree/cc_aead.c static int config_ccm_adata(struct aead_request *req)
req              1543 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1546 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1548 drivers/crypto/ccree/cc_aead.c 	unsigned int lp = req->iv[0];
req              1559 drivers/crypto/ccree/cc_aead.c 				req->cryptlen :
req              1560 drivers/crypto/ccree/cc_aead.c 				(req->cryptlen - ctx->authsize);
req              1569 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "illegal iv value %X\n", req->iv[0]);
req              1572 drivers/crypto/ccree/cc_aead.c 	memcpy(b0, req->iv, AES_BLOCK_SIZE);
req              1591 drivers/crypto/ccree/cc_aead.c 	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req              1592 drivers/crypto/ccree/cc_aead.c 	req->iv[15] = 1;
req              1594 drivers/crypto/ccree/cc_aead.c 	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
req              1600 drivers/crypto/ccree/cc_aead.c static void cc_proc_rfc4309_ccm(struct aead_request *req)
req              1602 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1604 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              1618 drivers/crypto/ccree/cc_aead.c 	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
req              1620 drivers/crypto/ccree/cc_aead.c 	req->iv = areq_ctx->ctr_iv;
req              1624 drivers/crypto/ccree/cc_aead.c static void cc_set_ghash_desc(struct aead_request *req,
req              1627 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1629 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1702 drivers/crypto/ccree/cc_aead.c static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
req              1705 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1707 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1738 drivers/crypto/ccree/cc_aead.c static void cc_proc_gcm_result(struct aead_request *req,
req              1742 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1744 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1803 drivers/crypto/ccree/cc_aead.c static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
req              1806 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1817 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
req              1818 drivers/crypto/ccree/cc_aead.c 		cc_set_ghash_desc(req, desc, seq_size);
req              1820 drivers/crypto/ccree/cc_aead.c 		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
req              1821 drivers/crypto/ccree/cc_aead.c 		cc_set_gctr_desc(req, desc, seq_size);
req              1822 drivers/crypto/ccree/cc_aead.c 		cc_proc_gcm_result(req, desc, seq_size);
req              1827 drivers/crypto/ccree/cc_aead.c 	cc_set_ghash_desc(req, desc, seq_size);
req              1830 drivers/crypto/ccree/cc_aead.c 		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
req              1831 drivers/crypto/ccree/cc_aead.c 	cc_set_gctr_desc(req, desc, seq_size);
req              1834 drivers/crypto/ccree/cc_aead.c 		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
req              1835 drivers/crypto/ccree/cc_aead.c 	cc_proc_gcm_result(req, desc, seq_size);
req              1840 drivers/crypto/ccree/cc_aead.c static int config_gcm_context(struct aead_request *req)
req              1842 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1844 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
req              1849 drivers/crypto/ccree/cc_aead.c 				req->cryptlen :
req              1850 drivers/crypto/ccree/cc_aead.c 				(req->cryptlen - ctx->authsize);
req              1860 drivers/crypto/ccree/cc_aead.c 	memcpy(req->iv + 12, &counter, 4);
req              1861 drivers/crypto/ccree/cc_aead.c 	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
req              1864 drivers/crypto/ccree/cc_aead.c 	memcpy(req->iv + 12, &counter, 4);
req              1865 drivers/crypto/ccree/cc_aead.c 	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
req              1890 drivers/crypto/ccree/cc_aead.c static void cc_proc_rfc4_gcm(struct aead_request *req)
req              1892 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1894 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              1898 drivers/crypto/ccree/cc_aead.c 	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
req              1900 drivers/crypto/ccree/cc_aead.c 	req->iv = areq_ctx->ctr_iv;
req              1904 drivers/crypto/ccree/cc_aead.c static int cc_proc_aead(struct aead_request *req,
req              1910 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1912 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              1918 drivers/crypto/ccree/cc_aead.c 		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
req              1919 drivers/crypto/ccree/cc_aead.c 		sg_virt(req->dst), req->dst->offset, req->cryptlen);
req              1924 drivers/crypto/ccree/cc_aead.c 	if (validate_data_size(ctx, direct, req)) {
req              1926 drivers/crypto/ccree/cc_aead.c 			req->cryptlen, areq_ctx->assoclen);
req              1933 drivers/crypto/ccree/cc_aead.c 	cc_req.user_arg = (void *)req;
req              1948 drivers/crypto/ccree/cc_aead.c 		memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
req              1955 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->ctr_iv;
req              1960 drivers/crypto/ccree/cc_aead.c 		if (areq_ctx->ctr_iv != req->iv) {
req              1961 drivers/crypto/ccree/cc_aead.c 			memcpy(areq_ctx->ctr_iv, req->iv,
req              1963 drivers/crypto/ccree/cc_aead.c 			req->iv = areq_ctx->ctr_iv;
req              1970 drivers/crypto/ccree/cc_aead.c 		rc = config_ccm_adata(req);
req              1981 drivers/crypto/ccree/cc_aead.c 		rc = config_gcm_context(req);
req              1989 drivers/crypto/ccree/cc_aead.c 	rc = cc_map_aead_request(ctx->drvdata, req);
req              1998 drivers/crypto/ccree/cc_aead.c 	cc_mlli_to_sram(req, desc, &seq_len);
req              2004 drivers/crypto/ccree/cc_aead.c 		cc_hmac_authenc(req, desc, &seq_len);
req              2007 drivers/crypto/ccree/cc_aead.c 		cc_xcbc_authenc(req, desc, &seq_len);
req              2011 drivers/crypto/ccree/cc_aead.c 			cc_ccm(req, desc, &seq_len);
req              2013 drivers/crypto/ccree/cc_aead.c 			cc_gcm(req, desc, &seq_len);
req              2017 drivers/crypto/ccree/cc_aead.c 		cc_unmap_aead_request(dev, req);
req              2024 drivers/crypto/ccree/cc_aead.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
req              2028 drivers/crypto/ccree/cc_aead.c 		cc_unmap_aead_request(dev, req);
req              2035 drivers/crypto/ccree/cc_aead.c static int cc_aead_encrypt(struct aead_request *req)
req              2037 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2043 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2044 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2049 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
req              2051 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2056 drivers/crypto/ccree/cc_aead.c static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
req              2060 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2061 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2066 drivers/crypto/ccree/cc_aead.c 	if (!valid_assoclen(req)) {
req              2067 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
req              2074 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2075 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2078 drivers/crypto/ccree/cc_aead.c 	cc_proc_rfc4309_ccm(req);
req              2080 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
req              2082 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2087 drivers/crypto/ccree/cc_aead.c static int cc_aead_decrypt(struct aead_request *req)
req              2089 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2095 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2096 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2101 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
req              2103 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2108 drivers/crypto/ccree/cc_aead.c static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
req              2110 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2113 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2116 drivers/crypto/ccree/cc_aead.c 	if (!valid_assoclen(req)) {
req              2117 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
req              2124 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2125 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2128 drivers/crypto/ccree/cc_aead.c 	cc_proc_rfc4309_ccm(req);
req              2130 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
req              2132 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2225 drivers/crypto/ccree/cc_aead.c static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
req              2229 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2232 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2235 drivers/crypto/ccree/cc_aead.c 	if (!valid_assoclen(req)) {
req              2236 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
req              2243 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2244 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2247 drivers/crypto/ccree/cc_aead.c 	cc_proc_rfc4_gcm(req);
req              2250 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
req              2252 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2257 drivers/crypto/ccree/cc_aead.c static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
req              2260 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2263 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2266 drivers/crypto/ccree/cc_aead.c 	if (!valid_assoclen(req)) {
req              2267 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
req              2277 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2278 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2280 drivers/crypto/ccree/cc_aead.c 	cc_proc_rfc4_gcm(req);
req              2283 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
req              2285 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2290 drivers/crypto/ccree/cc_aead.c static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
req              2294 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2297 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2300 drivers/crypto/ccree/cc_aead.c 	if (!valid_assoclen(req)) {
req              2301 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
req              2308 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2309 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2312 drivers/crypto/ccree/cc_aead.c 	cc_proc_rfc4_gcm(req);
req              2315 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
req              2317 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req              2322 drivers/crypto/ccree/cc_aead.c static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
req              2325 drivers/crypto/ccree/cc_aead.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2328 drivers/crypto/ccree/cc_aead.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              2331 drivers/crypto/ccree/cc_aead.c 	if (!valid_assoclen(req)) {
req              2332 drivers/crypto/ccree/cc_aead.c 		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
req              2342 drivers/crypto/ccree/cc_aead.c 	areq_ctx->backup_iv = req->iv;
req              2343 drivers/crypto/ccree/cc_aead.c 	areq_ctx->assoclen = req->assoclen;
req              2345 drivers/crypto/ccree/cc_aead.c 	cc_proc_rfc4_gcm(req);
req              2348 drivers/crypto/ccree/cc_aead.c 	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
req              2350 drivers/crypto/ccree/cc_aead.c 		req->iv = areq_ctx->backup_iv;
req                63 drivers/crypto/ccree/cc_buffer_mgr.c static void cc_copy_mac(struct device *dev, struct aead_request *req,
req                66 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req                67 drivers/crypto/ccree/cc_buffer_mgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req                68 drivers/crypto/ccree/cc_buffer_mgr.c 	u32 skip = areq_ctx->assoclen + req->cryptlen;
req                73 drivers/crypto/ccree/cc_buffer_mgr.c 	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
req               486 drivers/crypto/ccree/cc_buffer_mgr.c void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
req               488 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               546 drivers/crypto/ccree/cc_buffer_mgr.c 		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
req               547 drivers/crypto/ccree/cc_buffer_mgr.c 		areq_ctx->assoclen, req->cryptlen);
req               549 drivers/crypto/ccree/cc_buffer_mgr.c 	dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
req               551 drivers/crypto/ccree/cc_buffer_mgr.c 	if (req->src != req->dst) {
req               553 drivers/crypto/ccree/cc_buffer_mgr.c 			sg_virt(req->dst));
req               554 drivers/crypto/ccree/cc_buffer_mgr.c 		dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
req               559 drivers/crypto/ccree/cc_buffer_mgr.c 	    req->src == req->dst) {
req               564 drivers/crypto/ccree/cc_buffer_mgr.c 		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
req               575 drivers/crypto/ccree/cc_buffer_mgr.c 			    struct aead_request *req,
req               579 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               582 drivers/crypto/ccree/cc_buffer_mgr.c 	gfp_t flags = cc_gfp_flags(&req->base);
req               585 drivers/crypto/ccree/cc_buffer_mgr.c 	if (!req->iv) {
req               591 drivers/crypto/ccree/cc_buffer_mgr.c 	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
req               600 drivers/crypto/ccree/cc_buffer_mgr.c 			hw_iv_size, req->iv);
req               608 drivers/crypto/ccree/cc_buffer_mgr.c 		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
req               611 drivers/crypto/ccree/cc_buffer_mgr.c 		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               627 drivers/crypto/ccree/cc_buffer_mgr.c 			       struct aead_request *req,
req               631 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               634 drivers/crypto/ccree/cc_buffer_mgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               656 drivers/crypto/ccree/cc_buffer_mgr.c 	mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
req               689 drivers/crypto/ccree/cc_buffer_mgr.c 		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
req               699 drivers/crypto/ccree/cc_buffer_mgr.c static void cc_prepare_aead_data_dlli(struct aead_request *req,
req               702 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               710 drivers/crypto/ccree/cc_buffer_mgr.c 	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
req               723 drivers/crypto/ccree/cc_buffer_mgr.c 				      struct aead_request *req,
req               728 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               734 drivers/crypto/ccree/cc_buffer_mgr.c 	if (req->src == req->dst) {
req               757 drivers/crypto/ccree/cc_buffer_mgr.c 					cc_copy_mac(dev, req, CC_SG_TO_BUF);
req               794 drivers/crypto/ccree/cc_buffer_mgr.c 			cc_copy_mac(dev, req, CC_SG_TO_BUF);
req               836 drivers/crypto/ccree/cc_buffer_mgr.c 			      struct aead_request *req,
req               840 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               849 drivers/crypto/ccree/cc_buffer_mgr.c 	unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
req               850 drivers/crypto/ccree/cc_buffer_mgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               864 drivers/crypto/ccree/cc_buffer_mgr.c 	areq_ctx->src_sgl = req->src;
req               865 drivers/crypto/ccree/cc_buffer_mgr.c 	areq_ctx->dst_sgl = req->dst;
req               872 drivers/crypto/ccree/cc_buffer_mgr.c 	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
req               895 drivers/crypto/ccree/cc_buffer_mgr.c 	if (req->src != req->dst) {
req               896 drivers/crypto/ccree/cc_buffer_mgr.c 		size_for_map = areq_ctx->assoclen + req->cryptlen;
req               906 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
req               914 drivers/crypto/ccree/cc_buffer_mgr.c 	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
req               940 drivers/crypto/ccree/cc_buffer_mgr.c 		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
req               945 drivers/crypto/ccree/cc_buffer_mgr.c 		cc_prepare_aead_data_dlli(req, &src_last_bytes,
req               954 drivers/crypto/ccree/cc_buffer_mgr.c 				      struct aead_request *req)
req               956 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req               967 drivers/crypto/ccree/cc_buffer_mgr.c 		if (req->src == req->dst) {
req              1004 drivers/crypto/ccree/cc_buffer_mgr.c int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
req              1006 drivers/crypto/ccree/cc_buffer_mgr.c 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
req              1013 drivers/crypto/ccree/cc_buffer_mgr.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              1019 drivers/crypto/ccree/cc_buffer_mgr.c 	gfp_t flags = cc_gfp_flags(&req->base);
req              1029 drivers/crypto/ccree/cc_buffer_mgr.c 	    req->src == req->dst)
req              1030 drivers/crypto/ccree/cc_buffer_mgr.c 		cc_copy_mac(dev, req, CC_SG_TO_BUF);
req              1035 drivers/crypto/ccree/cc_buffer_mgr.c 				req->cryptlen :
req              1036 drivers/crypto/ccree/cc_buffer_mgr.c 				(req->cryptlen - authsize);
req              1115 drivers/crypto/ccree/cc_buffer_mgr.c 	size_to_map = req->cryptlen + areq_ctx->assoclen;
req              1118 drivers/crypto/ccree/cc_buffer_mgr.c 	   (req->src == req->dst)) {
req              1123 drivers/crypto/ccree/cc_buffer_mgr.c 	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
req              1138 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
req              1141 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
req              1144 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
req              1168 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
req              1171 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
req              1174 drivers/crypto/ccree/cc_buffer_mgr.c 		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
req              1189 drivers/crypto/ccree/cc_buffer_mgr.c 		cc_update_aead_mlli_nents(drvdata, req);
req              1198 drivers/crypto/ccree/cc_buffer_mgr.c 	cc_unmap_aead_request(dev, req);
req                52 drivers/crypto/ccree/cc_buffer_mgr.h int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
req                54 drivers/crypto/ccree/cc_buffer_mgr.h void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
req               835 drivers/crypto/ccree/cc_cipher.c 	struct skcipher_request *req = (struct skcipher_request *)cc_req;
req               836 drivers/crypto/ccree/cc_cipher.c 	struct scatterlist *dst = req->dst;
req               837 drivers/crypto/ccree/cc_cipher.c 	struct scatterlist *src = req->src;
req               838 drivers/crypto/ccree/cc_cipher.c 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
req               839 drivers/crypto/ccree/cc_cipher.c 	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
req               845 drivers/crypto/ccree/cc_cipher.c 		memcpy(req->iv, req_ctx->iv, ivsize);
req               849 drivers/crypto/ccree/cc_cipher.c 	skcipher_request_complete(req, err);
req               852 drivers/crypto/ccree/cc_cipher.c static int cc_cipher_process(struct skcipher_request *req,
req               855 drivers/crypto/ccree/cc_cipher.c 	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
req               857 drivers/crypto/ccree/cc_cipher.c 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
req               859 drivers/crypto/ccree/cc_cipher.c 	struct scatterlist *dst = req->dst;
req               860 drivers/crypto/ccree/cc_cipher.c 	struct scatterlist *src = req->src;
req               861 drivers/crypto/ccree/cc_cipher.c 	unsigned int nbytes = req->cryptlen;
req               862 drivers/crypto/ccree/cc_cipher.c 	void *iv = req->iv;
req               869 drivers/crypto/ccree/cc_cipher.c 	gfp_t flags = cc_gfp_flags(&req->base);
req               873 drivers/crypto/ccree/cc_cipher.c 		"Encrypt" : "Decrypt"), req, iv, nbytes);
req               901 drivers/crypto/ccree/cc_cipher.c 	cc_req.user_arg = (void *)req;
req               927 drivers/crypto/ccree/cc_cipher.c 	cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
req               940 drivers/crypto/ccree/cc_cipher.c 			     &req->base);
req               956 drivers/crypto/ccree/cc_cipher.c static int cc_cipher_encrypt(struct skcipher_request *req)
req               958 drivers/crypto/ccree/cc_cipher.c 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
req               962 drivers/crypto/ccree/cc_cipher.c 	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
req               965 drivers/crypto/ccree/cc_cipher.c static int cc_cipher_decrypt(struct skcipher_request *req)
req               967 drivers/crypto/ccree/cc_cipher.c 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
req               971 drivers/crypto/ccree/cc_cipher.c 	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
req               127 drivers/crypto/ccree/cc_driver.h 	void (*user_cb)(struct device *dev, void *req, int err);
req               232 drivers/crypto/ccree/cc_driver.h static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
req               234 drivers/crypto/ccree/cc_driver.h 	return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               278 drivers/crypto/ccree/cc_hash.c 	struct ahash_request *req = (struct ahash_request *)cc_req;
req               279 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               280 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               283 drivers/crypto/ccree/cc_hash.c 	dev_dbg(dev, "req=%pK\n", req);
req               287 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, false);
req               291 drivers/crypto/ccree/cc_hash.c 	ahash_request_complete(req, err);
req               296 drivers/crypto/ccree/cc_hash.c 	struct ahash_request *req = (struct ahash_request *)cc_req;
req               297 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               298 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               302 drivers/crypto/ccree/cc_hash.c 	dev_dbg(dev, "req=%pK\n", req);
req               306 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, false);
req               307 drivers/crypto/ccree/cc_hash.c 		cc_unmap_result(dev, state, digestsize, req->result);
req               311 drivers/crypto/ccree/cc_hash.c 	ahash_request_complete(req, err);
req               316 drivers/crypto/ccree/cc_hash.c 	struct ahash_request *req = (struct ahash_request *)cc_req;
req               317 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               318 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               322 drivers/crypto/ccree/cc_hash.c 	dev_dbg(dev, "req=%pK\n", req);
req               326 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, false);
req               327 drivers/crypto/ccree/cc_hash.c 		cc_unmap_result(dev, state, digestsize, req->result);
req               331 drivers/crypto/ccree/cc_hash.c 	ahash_request_complete(req, err);
req               334 drivers/crypto/ccree/cc_hash.c static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
req               337 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               338 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               358 drivers/crypto/ccree/cc_hash.c static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
req               361 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               362 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               412 drivers/crypto/ccree/cc_hash.c static int cc_hash_digest(struct ahash_request *req)
req               414 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               415 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               418 drivers/crypto/ccree/cc_hash.c 	struct scatterlist *src = req->src;
req               419 drivers/crypto/ccree/cc_hash.c 	unsigned int nbytes = req->nbytes;
req               420 drivers/crypto/ccree/cc_hash.c 	u8 *result = req->result;
req               429 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req               457 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = req;
req               507 drivers/crypto/ccree/cc_hash.c 		idx = cc_fin_hmac(desc, req, idx);
req               510 drivers/crypto/ccree/cc_hash.c 	idx = cc_fin_result(desc, req, idx);
req               512 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req               549 drivers/crypto/ccree/cc_hash.c static int cc_hash_update(struct ahash_request *req)
req               551 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               552 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               555 drivers/crypto/ccree/cc_hash.c 	struct scatterlist *src = req->src;
req               556 drivers/crypto/ccree/cc_hash.c 	unsigned int nbytes = req->nbytes;
req               562 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req               593 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = req;
req               616 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req               625 drivers/crypto/ccree/cc_hash.c static int cc_do_finup(struct ahash_request *req, bool update)
req               627 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               628 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               631 drivers/crypto/ccree/cc_hash.c 	struct scatterlist *src = req->src;
req               632 drivers/crypto/ccree/cc_hash.c 	unsigned int nbytes = req->nbytes;
req               633 drivers/crypto/ccree/cc_hash.c 	u8 *result = req->result;
req               640 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req               665 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = req;
req               680 drivers/crypto/ccree/cc_hash.c 		idx = cc_fin_hmac(desc, req, idx);
req               682 drivers/crypto/ccree/cc_hash.c 	idx = cc_fin_result(desc, req, idx);
req               684 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req               694 drivers/crypto/ccree/cc_hash.c static int cc_hash_finup(struct ahash_request *req)
req               696 drivers/crypto/ccree/cc_hash.c 	return cc_do_finup(req, true);
req               700 drivers/crypto/ccree/cc_hash.c static int cc_hash_final(struct ahash_request *req)
req               702 drivers/crypto/ccree/cc_hash.c 	return cc_do_finup(req, false);
req               705 drivers/crypto/ccree/cc_hash.c static int cc_hash_init(struct ahash_request *req)
req               707 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               708 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               712 drivers/crypto/ccree/cc_hash.c 	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
req              1149 drivers/crypto/ccree/cc_hash.c static int cc_mac_update(struct ahash_request *req)
req              1151 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req              1152 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1160 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req              1162 drivers/crypto/ccree/cc_hash.c 	if (req->nbytes == 0) {
req              1169 drivers/crypto/ccree/cc_hash.c 	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
req              1170 drivers/crypto/ccree/cc_hash.c 					req->nbytes, block_size, flags);
req              1174 drivers/crypto/ccree/cc_hash.c 				req->nbytes);
req              1188 drivers/crypto/ccree/cc_hash.c 		cc_setup_xcbc(req, desc, &idx);
req              1190 drivers/crypto/ccree/cc_hash.c 		cc_setup_cmac(req, desc, &idx);
req              1206 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = (void *)req;
req              1208 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req              1211 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, true);
req              1217 drivers/crypto/ccree/cc_hash.c static int cc_mac_final(struct ahash_request *req)
req              1219 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req              1220 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1229 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req              1248 drivers/crypto/ccree/cc_hash.c 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req              1249 drivers/crypto/ccree/cc_hash.c 				      req->nbytes, 0, flags)) {
req              1257 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, true);
req              1264 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = (void *)req;
req              1298 drivers/crypto/ccree/cc_hash.c 		cc_setup_xcbc(req, desc, &idx);
req              1300 drivers/crypto/ccree/cc_hash.c 		cc_setup_cmac(req, desc, &idx);
req              1329 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req              1332 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, true);
req              1333 drivers/crypto/ccree/cc_hash.c 		cc_unmap_result(dev, state, digestsize, req->result);
req              1339 drivers/crypto/ccree/cc_hash.c static int cc_mac_finup(struct ahash_request *req)
req              1341 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req              1342 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1351 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req              1353 drivers/crypto/ccree/cc_hash.c 	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
req              1354 drivers/crypto/ccree/cc_hash.c 	if (state->xcbc_count > 0 && req->nbytes == 0) {
req              1356 drivers/crypto/ccree/cc_hash.c 		return cc_mac_final(req);
req              1364 drivers/crypto/ccree/cc_hash.c 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req              1365 drivers/crypto/ccree/cc_hash.c 				      req->nbytes, 1, flags)) {
req              1372 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, true);
req              1379 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = (void *)req;
req              1383 drivers/crypto/ccree/cc_hash.c 		cc_setup_xcbc(req, desc, &idx);
req              1386 drivers/crypto/ccree/cc_hash.c 		cc_setup_cmac(req, desc, &idx);
req              1389 drivers/crypto/ccree/cc_hash.c 	if (req->nbytes == 0) {
req              1411 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req              1414 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, true);
req              1415 drivers/crypto/ccree/cc_hash.c 		cc_unmap_result(dev, state, digestsize, req->result);
req              1421 drivers/crypto/ccree/cc_hash.c static int cc_mac_digest(struct ahash_request *req)
req              1423 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req              1424 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1433 drivers/crypto/ccree/cc_hash.c 	gfp_t flags = cc_gfp_flags(&req->base);
req              1435 drivers/crypto/ccree/cc_hash.c 	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
req              1449 drivers/crypto/ccree/cc_hash.c 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req              1450 drivers/crypto/ccree/cc_hash.c 				      req->nbytes, 1, flags)) {
req              1458 drivers/crypto/ccree/cc_hash.c 	cc_req.user_arg = (void *)req;
req              1462 drivers/crypto/ccree/cc_hash.c 		cc_setup_xcbc(req, desc, &idx);
req              1465 drivers/crypto/ccree/cc_hash.c 		cc_setup_cmac(req, desc, &idx);
req              1468 drivers/crypto/ccree/cc_hash.c 	if (req->nbytes == 0) {
req              1490 drivers/crypto/ccree/cc_hash.c 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
req              1493 drivers/crypto/ccree/cc_hash.c 		cc_unmap_hash_request(dev, state, req->src, true);
req              1494 drivers/crypto/ccree/cc_hash.c 		cc_unmap_result(dev, state, digestsize, req->result);
req              1500 drivers/crypto/ccree/cc_hash.c static int cc_hash_export(struct ahash_request *req, void *out)
req              1502 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1504 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req              1526 drivers/crypto/ccree/cc_hash.c static int cc_hash_import(struct ahash_request *req, const void *in)
req              1528 drivers/crypto/ccree/cc_hash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req              1531 drivers/crypto/ccree/cc_hash.c 	struct ahash_req_ctx *state = ahash_request_ctx(req);
req               354 drivers/crypto/ccree/cc_request_mgr.c 	void *req;
req               368 drivers/crypto/ccree/cc_request_mgr.c 		req = creq->user_arg;
req               375 drivers/crypto/ccree/cc_request_mgr.c 			creq->user_cb(dev, req, -EINPROGRESS);
req               399 drivers/crypto/ccree/cc_request_mgr.c 			creq->user_cb(dev, req, rc);
req               414 drivers/crypto/ccree/cc_request_mgr.c 		    struct crypto_async_request *req)
req               419 drivers/crypto/ccree/cc_request_mgr.c 	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
req               420 drivers/crypto/ccree/cc_request_mgr.c 	gfp_t flags = cc_gfp_flags(req);
req                30 drivers/crypto/ccree/cc_request_mgr.h 		    struct crypto_async_request *req);
req                96 drivers/crypto/chelsio/chcr_algo.c static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
req               176 drivers/crypto/chelsio/chcr_algo.c void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
req               179 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               190 drivers/crypto/chelsio/chcr_algo.c 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
req               191 drivers/crypto/chelsio/chcr_algo.c 				authsize, req->assoclen +
req               192 drivers/crypto/chelsio/chcr_algo.c 				req->cryptlen - authsize);
req               214 drivers/crypto/chelsio/chcr_algo.c static inline int chcr_handle_aead_resp(struct aead_request *req,
req               218 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req               219 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               222 drivers/crypto/chelsio/chcr_algo.c 	chcr_aead_common_exit(req);
req               224 drivers/crypto/chelsio/chcr_algo.c 		chcr_verify_tag(req, input, &err);
req               228 drivers/crypto/chelsio/chcr_algo.c 	req->base.complete(&req->base, err);
req               720 drivers/crypto/chelsio/chcr_algo.c 			       struct crypto_async_request *req,
req               736 drivers/crypto/chelsio/chcr_algo.c 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
req               760 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
req               767 drivers/crypto/chelsio/chcr_algo.c 		ablkcipher_request_ctx(wrparam->req);
req               772 drivers/crypto/chelsio/chcr_algo.c 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
req               832 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
req               833 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
req               838 drivers/crypto/chelsio/chcr_algo.c 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
req               845 drivers/crypto/chelsio/chcr_algo.c 		sg_pcopy_to_buffer(wrparam->req->src,
req               846 drivers/crypto/chelsio/chcr_algo.c 			sg_nents(wrparam->req->src), wrparam->req->info, 16,
req              1020 drivers/crypto/chelsio/chcr_algo.c static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
req              1023 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1025 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              1054 drivers/crypto/chelsio/chcr_algo.c static int chcr_update_cipher_iv(struct ablkcipher_request *req,
req              1057 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1058 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              1063 drivers/crypto/chelsio/chcr_algo.c 		ctr_add_iv(iv, req->info, (reqctx->processed /
req              1070 drivers/crypto/chelsio/chcr_algo.c 		ret = chcr_update_tweak(req, iv, 0);
req              1074 drivers/crypto/chelsio/chcr_algo.c 			memcpy(iv, req->info, AES_BLOCK_SIZE);
req              1088 drivers/crypto/chelsio/chcr_algo.c static int chcr_final_cipher_iv(struct ablkcipher_request *req,
req              1091 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1092 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              1097 drivers/crypto/chelsio/chcr_algo.c 		ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
req              1100 drivers/crypto/chelsio/chcr_algo.c 		ret = chcr_update_tweak(req, iv, 1);
req              1111 drivers/crypto/chelsio/chcr_algo.c static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
req              1114 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1119 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              1126 drivers/crypto/chelsio/chcr_algo.c 	if (req->nbytes == reqctx->processed) {
req              1128 drivers/crypto/chelsio/chcr_algo.c 				      req);
req              1129 drivers/crypto/chelsio/chcr_algo.c 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
req              1137 drivers/crypto/chelsio/chcr_algo.c 		if ((bytes + reqctx->processed) >= req->nbytes)
req              1138 drivers/crypto/chelsio/chcr_algo.c 			bytes  = req->nbytes - reqctx->processed;
req              1143 drivers/crypto/chelsio/chcr_algo.c 		bytes  = req->nbytes - reqctx->processed;
req              1145 drivers/crypto/chelsio/chcr_algo.c 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
req              1151 drivers/crypto/chelsio/chcr_algo.c 				      req);
req              1153 drivers/crypto/chelsio/chcr_algo.c 				     req->base.flags,
req              1154 drivers/crypto/chelsio/chcr_algo.c 				     req->src,
req              1155 drivers/crypto/chelsio/chcr_algo.c 				     req->dst,
req              1156 drivers/crypto/chelsio/chcr_algo.c 				     req->nbytes,
req              1157 drivers/crypto/chelsio/chcr_algo.c 				     req->info,
req              1166 drivers/crypto/chelsio/chcr_algo.c 	wrparam.req = req;
req              1181 drivers/crypto/chelsio/chcr_algo.c 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
req              1184 drivers/crypto/chelsio/chcr_algo.c 	req->base.complete(&req->base, err);
req              1188 drivers/crypto/chelsio/chcr_algo.c static int process_cipher(struct ablkcipher_request *req,
req              1193 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1195 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              1201 drivers/crypto/chelsio/chcr_algo.c 	if (!req->info)
req              1204 drivers/crypto/chelsio/chcr_algo.c 	    (req->nbytes == 0) ||
req              1205 drivers/crypto/chelsio/chcr_algo.c 	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
req              1207 drivers/crypto/chelsio/chcr_algo.c 		       ablkctx->enckey_len, req->nbytes, ivsize);
req              1211 drivers/crypto/chelsio/chcr_algo.c 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
req              1214 drivers/crypto/chelsio/chcr_algo.c 	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
req              1222 drivers/crypto/chelsio/chcr_algo.c 		dnents = sg_nents_xlen(req->dst, req->nbytes,
req              1227 drivers/crypto/chelsio/chcr_algo.c 		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
req              1229 drivers/crypto/chelsio/chcr_algo.c 		bytes = IV + req->nbytes;
req              1236 drivers/crypto/chelsio/chcr_algo.c 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
req              1239 drivers/crypto/chelsio/chcr_algo.c 		if ((bytes + reqctx->processed) >= req->nbytes)
req              1240 drivers/crypto/chelsio/chcr_algo.c 			bytes  = req->nbytes - reqctx->processed;
req              1244 drivers/crypto/chelsio/chcr_algo.c 		bytes = req->nbytes;
req              1248 drivers/crypto/chelsio/chcr_algo.c 		bytes = adjust_ctr_overflow(req->info, bytes);
req              1253 drivers/crypto/chelsio/chcr_algo.c 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
req              1262 drivers/crypto/chelsio/chcr_algo.c 		memcpy(reqctx->iv, req->info, IV);
req              1266 drivers/crypto/chelsio/chcr_algo.c 				      req);
req              1268 drivers/crypto/chelsio/chcr_algo.c 					   req->base.flags,
req              1269 drivers/crypto/chelsio/chcr_algo.c 					   req->src,
req              1270 drivers/crypto/chelsio/chcr_algo.c 					   req->dst,
req              1271 drivers/crypto/chelsio/chcr_algo.c 					   req->nbytes,
req              1277 drivers/crypto/chelsio/chcr_algo.c 	reqctx->srcsg = req->src;
req              1278 drivers/crypto/chelsio/chcr_algo.c 	reqctx->dstsg = req->dst;
req              1282 drivers/crypto/chelsio/chcr_algo.c 	wrparam.req = req;
req              1294 drivers/crypto/chelsio/chcr_algo.c 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
req              1299 drivers/crypto/chelsio/chcr_algo.c static int chcr_aes_encrypt(struct ablkcipher_request *req)
req              1301 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1313 drivers/crypto/chelsio/chcr_algo.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1319 drivers/crypto/chelsio/chcr_algo.c 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
req              1332 drivers/crypto/chelsio/chcr_algo.c static int chcr_aes_decrypt(struct ablkcipher_request *req)
req              1334 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              1347 drivers/crypto/chelsio/chcr_algo.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
req              1351 drivers/crypto/chelsio/chcr_algo.c 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
req              1491 drivers/crypto/chelsio/chcr_algo.c static struct sk_buff *create_hash_wr(struct ahash_request *req,
req              1494 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1495 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1503 drivers/crypto/chelsio/chcr_algo.c 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
req              1568 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_hash_src_ent(req, ulptx, param);
req              1573 drivers/crypto/chelsio/chcr_algo.c 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
req              1583 drivers/crypto/chelsio/chcr_algo.c static int chcr_ahash_update(struct ahash_request *req)
req              1585 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1586 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
req              1591 drivers/crypto/chelsio/chcr_algo.c 	unsigned int nbytes = req->nbytes;
req              1602 drivers/crypto/chelsio/chcr_algo.c 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
req              1616 drivers/crypto/chelsio/chcr_algo.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1623 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
req              1630 drivers/crypto/chelsio/chcr_algo.c 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
req              1632 drivers/crypto/chelsio/chcr_algo.c 	if (params.sg_len > req->nbytes)
req              1633 drivers/crypto/chelsio/chcr_algo.c 		params.sg_len = req->nbytes;
req              1641 drivers/crypto/chelsio/chcr_algo.c 	req_ctx->hctx_wr.srcsg = req->src;
req              1645 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
req              1655 drivers/crypto/chelsio/chcr_algo.c 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req              1656 drivers/crypto/chelsio/chcr_algo.c 				   req_ctx->reqbfr, remainder, req->nbytes -
req              1666 drivers/crypto/chelsio/chcr_algo.c 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
req              1682 drivers/crypto/chelsio/chcr_algo.c static int chcr_ahash_final(struct ahash_request *req)
req              1684 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1685 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
req              1717 drivers/crypto/chelsio/chcr_algo.c 	req_ctx->hctx_wr.srcsg = req->src;
req              1731 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
req              1746 drivers/crypto/chelsio/chcr_algo.c static int chcr_ahash_finup(struct ahash_request *req)
req              1748 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1749 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
req              1766 drivers/crypto/chelsio/chcr_algo.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1772 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
req              1787 drivers/crypto/chelsio/chcr_algo.c 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
req              1789 drivers/crypto/chelsio/chcr_algo.c 	if (params.sg_len < req->nbytes) {
req              1803 drivers/crypto/chelsio/chcr_algo.c 		params.sg_len = req->nbytes;
req              1811 drivers/crypto/chelsio/chcr_algo.c 	req_ctx->hctx_wr.srcsg = req->src;
req              1812 drivers/crypto/chelsio/chcr_algo.c 	if ((req_ctx->reqlen + req->nbytes) == 0) {
req              1819 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
req              1832 drivers/crypto/chelsio/chcr_algo.c 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
req              1838 drivers/crypto/chelsio/chcr_algo.c static int chcr_ahash_digest(struct ahash_request *req)
req              1840 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1841 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
req              1849 drivers/crypto/chelsio/chcr_algo.c 	rtfm->init(req);
req              1859 drivers/crypto/chelsio/chcr_algo.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1866 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
req              1880 drivers/crypto/chelsio/chcr_algo.c 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
req              1882 drivers/crypto/chelsio/chcr_algo.c 	if (params.sg_len < req->nbytes) {
req              1893 drivers/crypto/chelsio/chcr_algo.c 		params.sg_len = req->nbytes;
req              1897 drivers/crypto/chelsio/chcr_algo.c 		params.scmd1 = req->nbytes + req_ctx->data_len;
req              1902 drivers/crypto/chelsio/chcr_algo.c 	req_ctx->hctx_wr.srcsg = req->src;
req              1905 drivers/crypto/chelsio/chcr_algo.c 	if (req->nbytes == 0) {
req              1911 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
req              1922 drivers/crypto/chelsio/chcr_algo.c 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
req              1928 drivers/crypto/chelsio/chcr_algo.c static int chcr_ahash_continue(struct ahash_request *req)
req              1930 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
req              1932 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
req              1952 drivers/crypto/chelsio/chcr_algo.c 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
req              1953 drivers/crypto/chelsio/chcr_algo.c 		params.sg_len = req->nbytes - hctx_wr->processed;
req              1955 drivers/crypto/chelsio/chcr_algo.c 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
req              1973 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
req              1987 drivers/crypto/chelsio/chcr_algo.c static inline void chcr_handle_ahash_resp(struct ahash_request *req,
req              1991 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
req              1994 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              2000 drivers/crypto/chelsio/chcr_algo.c 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
req              2013 drivers/crypto/chelsio/chcr_algo.c 				 req->nbytes)) {
req              2016 drivers/crypto/chelsio/chcr_algo.c 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
req              2029 drivers/crypto/chelsio/chcr_algo.c 	err = chcr_ahash_continue(req);
req              2035 drivers/crypto/chelsio/chcr_algo.c 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
req              2040 drivers/crypto/chelsio/chcr_algo.c 	req->base.complete(&req->base, err);
req              2047 drivers/crypto/chelsio/chcr_algo.c int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
req              2050 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_tfm *tfm = req->tfm;
req              2056 drivers/crypto/chelsio/chcr_algo.c 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
req              2060 drivers/crypto/chelsio/chcr_algo.c 		 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
req              2064 drivers/crypto/chelsio/chcr_algo.c 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
req              2253 drivers/crypto/chelsio/chcr_algo.c inline void chcr_aead_common_exit(struct aead_request *req)
req              2255 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2256 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2259 drivers/crypto/chelsio/chcr_algo.c 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
req              2262 drivers/crypto/chelsio/chcr_algo.c static int chcr_aead_common_init(struct aead_request *req)
req              2264 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2266 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2273 drivers/crypto/chelsio/chcr_algo.c 	if (reqctx->op && req->cryptlen < authsize)
req              2280 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
req              2292 drivers/crypto/chelsio/chcr_algo.c static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
req              2296 drivers/crypto/chelsio/chcr_algo.c 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
req              2298 drivers/crypto/chelsio/chcr_algo.c 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
req              2300 drivers/crypto/chelsio/chcr_algo.c 	    (req->assoclen > aadmax) ||
req              2306 drivers/crypto/chelsio/chcr_algo.c static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
req              2308 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2310 drivers/crypto/chelsio/chcr_algo.c 	struct aead_request *subreq = aead_request_ctx(req);
req              2313 drivers/crypto/chelsio/chcr_algo.c 	aead_request_set_callback(subreq, req->base.flags,
req              2314 drivers/crypto/chelsio/chcr_algo.c 				  req->base.complete, req->base.data);
req              2315 drivers/crypto/chelsio/chcr_algo.c 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req              2316 drivers/crypto/chelsio/chcr_algo.c 				 req->iv);
req              2317 drivers/crypto/chelsio/chcr_algo.c 	aead_request_set_ad(subreq, req->assoclen);
req              2322 drivers/crypto/chelsio/chcr_algo.c static struct sk_buff *create_authenc_wr(struct aead_request *req,
req              2326 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2329 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req              2341 drivers/crypto/chelsio/chcr_algo.c 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
req              2345 drivers/crypto/chelsio/chcr_algo.c 	if (req->cryptlen == 0)
req              2349 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_aead_common_init(req);
req              2357 drivers/crypto/chelsio/chcr_algo.c 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
req              2360 drivers/crypto/chelsio/chcr_algo.c 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
req              2366 drivers/crypto/chelsio/chcr_algo.c 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
req              2368 drivers/crypto/chelsio/chcr_algo.c 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
req              2373 drivers/crypto/chelsio/chcr_algo.c 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
req              2376 drivers/crypto/chelsio/chcr_algo.c 		chcr_aead_common_exit(req);
req              2377 drivers/crypto/chelsio/chcr_algo.c 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
req              2396 drivers/crypto/chelsio/chcr_algo.c 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
req              2399 drivers/crypto/chelsio/chcr_algo.c 					null ? 0 : IV + req->assoclen,
req              2400 drivers/crypto/chelsio/chcr_algo.c 					req->assoclen + IV + 1,
req              2404 drivers/crypto/chelsio/chcr_algo.c 					null ? 0 : req->assoclen + IV + 1,
req              2437 drivers/crypto/chelsio/chcr_algo.c 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
req              2442 drivers/crypto/chelsio/chcr_algo.c 		memcpy(ivptr, req->iv, IV);
req              2444 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
req              2445 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_aead_src_ent(req, ulptx);
req              2448 drivers/crypto/chelsio/chcr_algo.c 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
req              2449 drivers/crypto/chelsio/chcr_algo.c 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
req              2455 drivers/crypto/chelsio/chcr_algo.c 	chcr_aead_common_exit(req);
req              2461 drivers/crypto/chelsio/chcr_algo.c 		      struct aead_request *req,
req              2465 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2466 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2470 drivers/crypto/chelsio/chcr_algo.c 	dst_size = req->assoclen + req->cryptlen + (op_type ?
req              2472 drivers/crypto/chelsio/chcr_algo.c 	if (!req->cryptlen || !dst_size)
req              2482 drivers/crypto/chelsio/chcr_algo.c 	if (req->src == req->dst) {
req              2483 drivers/crypto/chelsio/chcr_algo.c 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
req              2488 drivers/crypto/chelsio/chcr_algo.c 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
req              2492 drivers/crypto/chelsio/chcr_algo.c 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
req              2495 drivers/crypto/chelsio/chcr_algo.c 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2508 drivers/crypto/chelsio/chcr_algo.c 			 struct aead_request *req,
req              2511 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2512 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2516 drivers/crypto/chelsio/chcr_algo.c 	dst_size = req->assoclen + req->cryptlen + (op_type ?
req              2518 drivers/crypto/chelsio/chcr_algo.c 	if (!req->cryptlen || !dst_size)
req              2523 drivers/crypto/chelsio/chcr_algo.c 	if (req->src == req->dst) {
req              2524 drivers/crypto/chelsio/chcr_algo.c 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2527 drivers/crypto/chelsio/chcr_algo.c 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2529 drivers/crypto/chelsio/chcr_algo.c 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
req              2534 drivers/crypto/chelsio/chcr_algo.c void chcr_add_aead_src_ent(struct aead_request *req,
req              2538 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2547 drivers/crypto/chelsio/chcr_algo.c 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req              2548 drivers/crypto/chelsio/chcr_algo.c 				   buf, req->cryptlen + req->assoclen, 0);
req              2554 drivers/crypto/chelsio/chcr_algo.c 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
req              2555 drivers/crypto/chelsio/chcr_algo.c 				  req->assoclen,  0);
req              2560 drivers/crypto/chelsio/chcr_algo.c void chcr_add_aead_dst_ent(struct aead_request *req,
req              2564 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2565 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2573 drivers/crypto/chelsio/chcr_algo.c 	temp = req->assoclen + req->cryptlen +
req              2575 drivers/crypto/chelsio/chcr_algo.c 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
req              2579 drivers/crypto/chelsio/chcr_algo.c void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
req              2584 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              2590 drivers/crypto/chelsio/chcr_algo.c 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req              2602 drivers/crypto/chelsio/chcr_algo.c void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
req              2607 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
req              2608 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
req              2621 drivers/crypto/chelsio/chcr_algo.c void chcr_add_hash_src_ent(struct ahash_request *req,
req              2626 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
req              2653 drivers/crypto/chelsio/chcr_algo.c 		      struct ahash_request *req)
req              2655 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              2658 drivers/crypto/chelsio/chcr_algo.c 	if (!req->nbytes)
req              2660 drivers/crypto/chelsio/chcr_algo.c 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
req              2669 drivers/crypto/chelsio/chcr_algo.c 			 struct ahash_request *req)
req              2671 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
req              2673 drivers/crypto/chelsio/chcr_algo.c 	if (!req->nbytes)
req              2676 drivers/crypto/chelsio/chcr_algo.c 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2683 drivers/crypto/chelsio/chcr_algo.c 			struct ablkcipher_request *req)
req              2687 drivers/crypto/chelsio/chcr_algo.c 	if (req->src == req->dst) {
req              2688 drivers/crypto/chelsio/chcr_algo.c 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
req              2693 drivers/crypto/chelsio/chcr_algo.c 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
req              2697 drivers/crypto/chelsio/chcr_algo.c 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
req              2700 drivers/crypto/chelsio/chcr_algo.c 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2712 drivers/crypto/chelsio/chcr_algo.c 			   struct ablkcipher_request *req)
req              2714 drivers/crypto/chelsio/chcr_algo.c 	if (req->src == req->dst) {
req              2715 drivers/crypto/chelsio/chcr_algo.c 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2718 drivers/crypto/chelsio/chcr_algo.c 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
req              2720 drivers/crypto/chelsio/chcr_algo.c 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
req              2743 drivers/crypto/chelsio/chcr_algo.c static int generate_b0(struct aead_request *req, u8 *ivptr,
req              2748 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req              2749 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req              2763 drivers/crypto/chelsio/chcr_algo.c 	if (req->assoclen)
req              2767 drivers/crypto/chelsio/chcr_algo.c 			 req->cryptlen - m : req->cryptlen, l);
req              2781 drivers/crypto/chelsio/chcr_algo.c static int ccm_format_packet(struct aead_request *req,
req              2787 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req              2788 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2795 drivers/crypto/chelsio/chcr_algo.c 		memcpy(ivptr + 4, req->iv, 8);
req              2798 drivers/crypto/chelsio/chcr_algo.c 		memcpy(ivptr, req->iv, 16);
req              2804 drivers/crypto/chelsio/chcr_algo.c 	rc = generate_b0(req, ivptr, op_type);
req              2812 drivers/crypto/chelsio/chcr_algo.c 				  struct aead_request *req,
req              2815 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2825 drivers/crypto/chelsio/chcr_algo.c 		assoclen = req->assoclen - 8;
req              2827 drivers/crypto/chelsio/chcr_algo.c 		assoclen = req->assoclen;
req              2831 drivers/crypto/chelsio/chcr_algo.c 	auth_offset = req->cryptlen ?
req              2832 drivers/crypto/chelsio/chcr_algo.c 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
req              2834 drivers/crypto/chelsio/chcr_algo.c 		if (crypto_aead_authsize(tfm) != req->cryptlen)
req              2844 drivers/crypto/chelsio/chcr_algo.c 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
req              2848 drivers/crypto/chelsio/chcr_algo.c 				req->assoclen + IV + 1 + ccm_xtra, 0);
req              2864 drivers/crypto/chelsio/chcr_algo.c 				   struct aead_request *req,
req              2869 drivers/crypto/chelsio/chcr_algo.c 		if (crypto_ccm_check_iv(req->iv)) {
req              2874 drivers/crypto/chelsio/chcr_algo.c 		if (req->assoclen != 16 && req->assoclen != 20) {
req              2876 drivers/crypto/chelsio/chcr_algo.c 			       req->assoclen);
req              2883 drivers/crypto/chelsio/chcr_algo.c static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
req              2887 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2889 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req              2896 drivers/crypto/chelsio/chcr_algo.c 	unsigned int sub_type, assoclen = req->assoclen;
req              2900 drivers/crypto/chelsio/chcr_algo.c 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
req              2908 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_aead_common_init(req);
req              2912 drivers/crypto/chelsio/chcr_algo.c 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
req              2915 drivers/crypto/chelsio/chcr_algo.c 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
req              2920 drivers/crypto/chelsio/chcr_algo.c 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
req              2925 drivers/crypto/chelsio/chcr_algo.c 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
req              2927 drivers/crypto/chelsio/chcr_algo.c 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
req              2933 drivers/crypto/chelsio/chcr_algo.c 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
req              2936 drivers/crypto/chelsio/chcr_algo.c 		chcr_aead_common_exit(req);
req              2937 drivers/crypto/chelsio/chcr_algo.c 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
req              2948 drivers/crypto/chelsio/chcr_algo.c 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
req              2958 drivers/crypto/chelsio/chcr_algo.c 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
req              2961 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
req              2962 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_aead_src_ent(req, ulptx);
req              2966 drivers/crypto/chelsio/chcr_algo.c 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
req              2968 drivers/crypto/chelsio/chcr_algo.c 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
req              2976 drivers/crypto/chelsio/chcr_algo.c 	chcr_aead_common_exit(req);
req              2980 drivers/crypto/chelsio/chcr_algo.c static struct sk_buff *create_gcm_wr(struct aead_request *req,
req              2984 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              2986 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              2992 drivers/crypto/chelsio/chcr_algo.c 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
req              2996 drivers/crypto/chelsio/chcr_algo.c 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
req              3001 drivers/crypto/chelsio/chcr_algo.c 		assoclen = req->assoclen - 8;
req              3004 drivers/crypto/chelsio/chcr_algo.c 	error = chcr_aead_common_init(req);
req              3007 drivers/crypto/chelsio/chcr_algo.c 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
req              3010 drivers/crypto/chelsio/chcr_algo.c 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
req              3016 drivers/crypto/chelsio/chcr_algo.c 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
req              3018 drivers/crypto/chelsio/chcr_algo.c 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
req              3022 drivers/crypto/chelsio/chcr_algo.c 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
req              3026 drivers/crypto/chelsio/chcr_algo.c 		chcr_aead_common_exit(req);
req              3027 drivers/crypto/chelsio/chcr_algo.c 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
req              3042 drivers/crypto/chelsio/chcr_algo.c 		htonl(req->assoclen + IV + req->cryptlen);
req              3046 drivers/crypto/chelsio/chcr_algo.c 					req->assoclen + IV + 1, 0);
req              3048 drivers/crypto/chelsio/chcr_algo.c 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
req              3070 drivers/crypto/chelsio/chcr_algo.c 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
req              3072 drivers/crypto/chelsio/chcr_algo.c 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
req              3078 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
req              3079 drivers/crypto/chelsio/chcr_algo.c 	chcr_add_aead_src_ent(req, ulptx);
req              3082 drivers/crypto/chelsio/chcr_algo.c 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
req              3083 drivers/crypto/chelsio/chcr_algo.c 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
req              3089 drivers/crypto/chelsio/chcr_algo.c 	chcr_aead_common_exit(req);
req              3603 drivers/crypto/chelsio/chcr_algo.c static int chcr_aead_op(struct aead_request *req,
req              3607 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              3608 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
req              3624 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_fallback(req, reqctx->op);
req              3631 drivers/crypto/chelsio/chcr_algo.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              3638 drivers/crypto/chelsio/chcr_algo.c 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
req              3651 drivers/crypto/chelsio/chcr_algo.c static int chcr_aead_encrypt(struct aead_request *req)
req              3653 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              3654 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req              3664 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_op(req, 0, create_authenc_wr);
req              3667 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
req              3669 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_op(req, 0, create_gcm_wr);
req              3673 drivers/crypto/chelsio/chcr_algo.c static int chcr_aead_decrypt(struct aead_request *req)
req              3675 drivers/crypto/chelsio/chcr_algo.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req              3677 drivers/crypto/chelsio/chcr_algo.c 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
req              3693 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_op(req, size, create_authenc_wr);
req              3696 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_op(req, size, create_aead_ccm_wr);
req              3698 drivers/crypto/chelsio/chcr_algo.c 		return chcr_aead_op(req, size, create_gcm_wr);
req               290 drivers/crypto/chelsio/chcr_algo.h 	struct ablkcipher_request *req;
req               158 drivers/crypto/chelsio/chcr_core.c 	struct crypto_async_request *req;
req               165 drivers/crypto/chelsio/chcr_core.c 	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
req               173 drivers/crypto/chelsio/chcr_core.c 	if (req) {
req               174 drivers/crypto/chelsio/chcr_core.c 		error_status = chcr_handle_resp(req, input, error_status);
req               173 drivers/crypto/chelsio/chcr_core.h 	struct chcr_ipsec_req req;
req               221 drivers/crypto/chelsio/chcr_core.h int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
req               315 drivers/crypto/chelsio/chcr_crypto.h void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
req               316 drivers/crypto/chelsio/chcr_crypto.h int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
req               318 drivers/crypto/chelsio/chcr_crypto.h void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
req               320 drivers/crypto/chelsio/chcr_crypto.h void chcr_add_aead_dst_ent(struct aead_request *req,
req               323 drivers/crypto/chelsio/chcr_crypto.h void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
req               324 drivers/crypto/chelsio/chcr_crypto.h void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
req               327 drivers/crypto/chelsio/chcr_crypto.h int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
req               328 drivers/crypto/chelsio/chcr_crypto.h void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
req               329 drivers/crypto/chelsio/chcr_crypto.h void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
req               334 drivers/crypto/chelsio/chcr_crypto.h void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
req               336 drivers/crypto/chelsio/chcr_crypto.h int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
req               337 drivers/crypto/chelsio/chcr_crypto.h void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
req               338 drivers/crypto/chelsio/chcr_crypto.h void chcr_aead_common_exit(struct aead_request *req);
req               578 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
req               579 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.ulptx.len = htonl(ndesc - 1);
req               582 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
req               583 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
req               584 drivers/crypto/chelsio/chcr_ipsec.c 					 sizeof(wr->req.key_ctx) +
req               594 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.op_ivinsrtofst = htonl(
req               601 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
req               610 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
req               615 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.cipherstop_lo_authinsert =
req               619 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.seqno_numivs =
req               625 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
req               306 drivers/crypto/chelsio/chtls/chtls.h 	struct ulp_mem_rw req;
req               165 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req *req = cplhdr(skb);
req               169 drivers/crypto/chelsio/chtls/chtls_cm.c 	req->cmd = CPL_ABORT_NO_RST;
req               186 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req *req;
req               194 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
req               196 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
req               197 drivers/crypto/chelsio/chtls/chtls_cm.c 	INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
req               199 drivers/crypto/chelsio/chtls/chtls_cm.c 	req->rsvd0 = htonl(tp->snd_nxt);
req               200 drivers/crypto/chelsio/chtls/chtls_cm.c 	req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
req               201 drivers/crypto/chelsio/chtls/chtls_cm.c 	req->cmd = mode;
req               257 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_close_con_req *req;
req               268 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = (struct cpl_close_con_req *)__skb_put(skb, len);
req               269 drivers/crypto/chelsio/chtls/chtls_cm.c 	memset(req, 0, len);
req               270 drivers/crypto/chelsio/chtls/chtls_cm.c 	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
req               271 drivers/crypto/chelsio/chtls/chtls_cm.c 			      FW_WR_IMMDLEN_V(sizeof(*req) -
req               272 drivers/crypto/chelsio/chtls/chtls_cm.c 					      sizeof(req->wr)));
req               273 drivers/crypto/chelsio/chtls/chtls_cm.c 	req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
req               276 drivers/crypto/chelsio/chtls/chtls_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
req               473 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct request_sock *req = *pprev;
req               475 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (req->rsk_ops == &chtls_rsk_ops) {
req               476 drivers/crypto/chelsio/chtls/chtls_cm.c 			struct sock *child = req->sk;
req               478 drivers/crypto/chelsio/chtls/chtls_cm.c 			*pprev = req->dl_next;
req               480 drivers/crypto/chelsio/chtls/chtls_cm.c 			reqsk_put(req);
req               490 drivers/crypto/chelsio/chtls/chtls_cm.c 			pprev = &req->dl_next;
req               562 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct request_sock *req;
req               566 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = csk->passive_reap_next;
req               568 drivers/crypto/chelsio/chtls/chtls_cm.c 	reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
req               570 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_reqsk_free(req);
req               862 drivers/crypto/chelsio/chtls/chtls_cm.c 				     struct cpl_pass_accept_req *req)
req               873 drivers/crypto/chelsio/chtls/chtls_cm.c 	mss = ntohs(req->tcpopt.mss);
req               881 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (req->tcpopt.tstamp)
req               919 drivers/crypto/chelsio/chtls/chtls_cm.c 				  struct cpl_pass_accept_req *req,
req               946 drivers/crypto/chelsio/chtls/chtls_cm.c 					req);
req               963 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (req->tcpopt.tstamp)
req               965 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (req->tcpopt.sack)
req               967 drivers/crypto/chelsio/chtls/chtls_cm.c 	hlen = ntohl(req->hdr_len);
req               969 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcph = (struct tcphdr *)((u8 *)(req + 1) +
req              1024 drivers/crypto/chelsio/chtls/chtls_cm.c 				    const struct cpl_pass_accept_req *req,
req              1076 drivers/crypto/chelsio/chtls/chtls_cm.c 	oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
req              1085 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
req              1125 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_tid_release *req;
req              1129 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = (struct cpl_tid_release *)__skb_put(skb, len);
req              1130 drivers/crypto/chelsio/chtls/chtls_cm.c 	memset(req, 0, len);
req              1132 drivers/crypto/chelsio/chtls/chtls_cm.c 	INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
req              1149 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_pass_accept_req *req;
req              1169 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = cplhdr(skb) + RSS_HDR;
req              1170 drivers/crypto/chelsio/chtls/chtls_cm.c 	tid = GET_TID(req);
req              1173 drivers/crypto/chelsio/chtls/chtls_cm.c 	stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
req              1206 drivers/crypto/chelsio/chtls/chtls_cm.c 	eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
req              1208 drivers/crypto/chelsio/chtls/chtls_cm.c 		eh = (struct ethhdr *)(req + 1);
req              1212 drivers/crypto/chelsio/chtls/chtls_cm.c 		vlan_eh = (struct vlan_ethhdr *)(req + 1);
req              1220 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_set_network_header(skb, (void *)iph - (void *)req);
req              1227 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (req->tcpopt.wsf <= 14 &&
req              1230 drivers/crypto/chelsio/chtls/chtls_cm.c 		inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
req              1241 drivers/crypto/chelsio/chtls/chtls_cm.c 	newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
req              1255 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_pass_accept_rpl(reply_skb, req, tid);
req              1272 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
req              1279 drivers/crypto/chelsio/chtls/chtls_cm.c 	stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
req              1280 drivers/crypto/chelsio/chtls/chtls_cm.c 	tid = GET_TID(req);
req              1419 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
req              1424 drivers/crypto/chelsio/chtls/chtls_cm.c 	hwtid = GET_TID(req);
req              1440 drivers/crypto/chelsio/chtls/chtls_cm.c 		make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
req              1441 drivers/crypto/chelsio/chtls/chtls_cm.c 		stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
req              1569 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
req              1570 drivers/crypto/chelsio/chtls/chtls_cm.c 	unsigned int hwtid = GET_TID(req);
req              1621 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_tls_data *req = cplhdr(skb);
req              1622 drivers/crypto/chelsio/chtls/chtls_cm.c 	unsigned int hwtid = GET_TID(req);
req              1690 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_rx_tls_cmp *req = cplhdr(skb);
req              1691 drivers/crypto/chelsio/chtls/chtls_cm.c 	unsigned int hwtid = GET_TID(req);
req              1819 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
req              1825 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_abort_rpl_wr(reply_skb, GET_TID(req),
req              1826 drivers/crypto/chelsio/chtls/chtls_cm.c 			 (req->status & CPL_ABORT_NO_RST));
req              1827 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
req              1835 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
req              1845 drivers/crypto/chelsio/chtls/chtls_cm.c 		req->status = (queue << 1);
req              1850 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_abort_rpl_wr(reply_skb, GET_TID(req), status);
req              1883 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
req              1889 drivers/crypto/chelsio/chtls/chtls_cm.c 	tid = GET_TID(req);
req              1893 drivers/crypto/chelsio/chtls/chtls_cm.c 		req->status = (queue << 1) | status;
req              1972 drivers/crypto/chelsio/chtls/chtls_cm.c 	const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
req              1977 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (is_neg_adv(req->status)) {
req              2042 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
req              2044 drivers/crypto/chelsio/chtls/chtls_cm.c 	unsigned int hwtid = GET_TID(req);
req               118 drivers/crypto/chelsio/chtls/chtls_cm.h static void chtls_rsk_destructor(struct request_sock *req)
req               135 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void chtls_reqsk_free(struct request_sock *req)
req               137 drivers/crypto/chelsio/chtls/chtls_cm.h 	if (req->rsk_listener)
req               138 drivers/crypto/chelsio/chtls/chtls_cm.h 		sock_put(req->rsk_listener);
req               139 drivers/crypto/chelsio/chtls/chtls_cm.h 	kmem_cache_free(req->rsk_ops->slab, req);
req                24 drivers/crypto/chelsio/chtls/chtls_hw.c 				   struct cpl_set_tcb_field *req, u16 word,
req                29 drivers/crypto/chelsio/chtls/chtls_hw.c 	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid);
req                30 drivers/crypto/chelsio/chtls/chtls_hw.c 	req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid));
req                31 drivers/crypto/chelsio/chtls/chtls_hw.c 	req->reply_ctrl = htons(NO_REPLY_V(no_reply) |
req                33 drivers/crypto/chelsio/chtls/chtls_hw.c 	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
req                34 drivers/crypto/chelsio/chtls/chtls_hw.c 	req->mask = cpu_to_be64(mask);
req                35 drivers/crypto/chelsio/chtls/chtls_hw.c 	req->val = cpu_to_be64(val);
req                36 drivers/crypto/chelsio/chtls/chtls_hw.c 	sc = (struct ulptx_idata *)(req + 1);
req                44 drivers/crypto/chelsio/chtls/chtls_hw.c 	struct cpl_set_tcb_field *req;
req                49 drivers/crypto/chelsio/chtls/chtls_hw.c 	wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
req                52 drivers/crypto/chelsio/chtls/chtls_hw.c 	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
req                53 drivers/crypto/chelsio/chtls/chtls_hw.c 	__set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply);
req                63 drivers/crypto/chelsio/chtls/chtls_hw.c 	struct cpl_set_tcb_field *req;
req                71 drivers/crypto/chelsio/chtls/chtls_hw.c 	wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
req               341 drivers/crypto/chelsio/chtls/chtls_hw.c 	kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
req               344 drivers/crypto/chelsio/chtls/chtls_hw.c 	kwr->req.len16 = cpu_to_be32((csk->tid << 8) |
req               346 drivers/crypto/chelsio/chtls/chtls_hw.c 	kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5));
req               347 drivers/crypto/chelsio/chtls/chtls_hw.c 	kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr));
req               354 drivers/crypto/chelsio/chtls/chtls_io.c 	unsigned char *req;
req               375 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
req               376 drivers/crypto/chelsio/chtls/chtls_io.c 	req_cpl = (struct cpl_tx_tls_sfo *)req;
req               377 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (unsigned char *)__skb_push(skb, (sizeof(struct
req               380 drivers/crypto/chelsio/chtls/chtls_io.c 	req_wr = (struct fw_tlstx_data_wr *)req;
req               512 drivers/crypto/chelsio/chtls/chtls_io.c 	struct fw_ofld_tx_data_wr *req;
req               520 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
req               521 drivers/crypto/chelsio/chtls/chtls_io.c 	req->op_to_immdlen = htonl(WR_OP_V(opcode) |
req               524 drivers/crypto/chelsio/chtls/chtls_io.c 	req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
req               533 drivers/crypto/chelsio/chtls/chtls_io.c 	req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
req               537 drivers/crypto/chelsio/chtls/chtls_io.c 	req->plen = htonl(len);
req               689 drivers/crypto/chelsio/chtls/chtls_io.c 			struct cpl_close_con_req *req = cplhdr(skb);
req               691 drivers/crypto/chelsio/chtls/chtls_io.c 					     (OPCODE_TID(req)));
req               699 drivers/crypto/chelsio/chtls/chtls_io.c 				req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
req              1341 drivers/crypto/chelsio/chtls/chtls_io.c 	struct cpl_rx_data_ack *req;
req              1344 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
req              1347 drivers/crypto/chelsio/chtls/chtls_io.c 	__skb_put(skb, sizeof(*req));
req              1348 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (struct cpl_rx_data_ack *)skb->head;
req              1351 drivers/crypto/chelsio/chtls/chtls_io.c 	INIT_TP_WR(req, csk->tid);
req              1352 drivers/crypto/chelsio/chtls/chtls_io.c 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
req              1354 drivers/crypto/chelsio/chtls/chtls_io.c 	req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
req               278 drivers/crypto/geode-aes.c static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
req               280 drivers/crypto/geode-aes.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               287 drivers/crypto/geode-aes.c 		struct skcipher_request *subreq = skcipher_request_ctx(req);
req               289 drivers/crypto/geode-aes.c 		*subreq = *req;
req               297 drivers/crypto/geode-aes.c 	err = skcipher_walk_virt(&walk, req, false);
req               309 drivers/crypto/geode-aes.c static int geode_cbc_encrypt(struct skcipher_request *req)
req               311 drivers/crypto/geode-aes.c 	return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
req               314 drivers/crypto/geode-aes.c static int geode_cbc_decrypt(struct skcipher_request *req)
req               316 drivers/crypto/geode-aes.c 	return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
req               319 drivers/crypto/geode-aes.c static int geode_ecb_encrypt(struct skcipher_request *req)
req               321 drivers/crypto/geode-aes.c 	return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
req               324 drivers/crypto/geode-aes.c static int geode_ecb_decrypt(struct skcipher_request *req)
req               326 drivers/crypto/geode-aes.c 	return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
req              1436 drivers/crypto/hifn_795x.c static int hifn_cipher_walk(struct ablkcipher_request *req,
req              1440 drivers/crypto/hifn_795x.c 	unsigned int nbytes = req->nbytes, offset, copy, diff;
req              1449 drivers/crypto/hifn_795x.c 		dst = &req->dst[idx];
req              1499 drivers/crypto/hifn_795x.c 				dst = &req->dst[idx];
req              1521 drivers/crypto/hifn_795x.c static int hifn_setup_session(struct ablkcipher_request *req)
req              1523 drivers/crypto/hifn_795x.c 	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
req              1524 drivers/crypto/hifn_795x.c 	struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
req              1527 drivers/crypto/hifn_795x.c 	unsigned int nbytes = req->nbytes, idx = 0;
req              1537 drivers/crypto/hifn_795x.c 		dst = &req->dst[idx];
req              1554 drivers/crypto/hifn_795x.c 	sg_num = hifn_cipher_walk(req, &rctx->walk);
req              1566 drivers/crypto/hifn_795x.c 	err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
req              1663 drivers/crypto/hifn_795x.c static void hifn_process_ready(struct ablkcipher_request *req, int error)
req              1665 drivers/crypto/hifn_795x.c 	struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
req              1668 drivers/crypto/hifn_795x.c 		unsigned int nbytes = req->nbytes;
req              1675 drivers/crypto/hifn_795x.c 			dst = &req->dst[idx];
req              1705 drivers/crypto/hifn_795x.c 	req->base.complete(&req->base, error);
req              1913 drivers/crypto/hifn_795x.c 	struct ablkcipher_request *req;
req              1929 drivers/crypto/hifn_795x.c 		req = ablkcipher_request_cast(async_req);
req              1932 drivers/crypto/hifn_795x.c 		hifn_process_ready(req, -ENODEV);
req              1977 drivers/crypto/hifn_795x.c static int hifn_handle_req(struct ablkcipher_request *req)
req              1979 drivers/crypto/hifn_795x.c 	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
req              1983 drivers/crypto/hifn_795x.c 	if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
req              1984 drivers/crypto/hifn_795x.c 		err = hifn_setup_session(req);
req              1990 drivers/crypto/hifn_795x.c 		err = ablkcipher_enqueue_request(&dev->queue, req);
req              1997 drivers/crypto/hifn_795x.c static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
req              2000 drivers/crypto/hifn_795x.c 	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
req              2001 drivers/crypto/hifn_795x.c 	struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
req              2004 drivers/crypto/hifn_795x.c 	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
req              2006 drivers/crypto/hifn_795x.c 	if (req->info && mode != ACRYPTO_MODE_ECB) {
req              2025 drivers/crypto/hifn_795x.c 	rctx->iv = req->info;
req              2034 drivers/crypto/hifn_795x.c 	return hifn_handle_req(req);
req              2040 drivers/crypto/hifn_795x.c 	struct ablkcipher_request *req;
req              2056 drivers/crypto/hifn_795x.c 		req = ablkcipher_request_cast(async_req);
req              2058 drivers/crypto/hifn_795x.c 		err = hifn_handle_req(req);
req              2066 drivers/crypto/hifn_795x.c static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
req              2070 drivers/crypto/hifn_795x.c 	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
req              2073 drivers/crypto/hifn_795x.c 	err = hifn_setup_crypto_req(req, op, type, mode);
req              2086 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
req              2088 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2091 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
req              2093 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2096 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
req              2098 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2101 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
req              2103 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2110 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
req              2112 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2115 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
req              2117 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2120 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
req              2122 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2125 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
req              2127 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2134 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
req              2136 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2139 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
req              2141 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2144 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
req              2146 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2149 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
req              2151 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2158 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
req              2160 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2163 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
req              2165 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2168 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
req              2170 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2173 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
req              2175 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2182 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
req              2184 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2187 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
req              2189 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2192 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
req              2194 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2197 drivers/crypto/hifn_795x.c static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
req              2199 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
req              2204 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
req              2206 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2209 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
req              2211 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2214 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
req              2216 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req              2219 drivers/crypto/hifn_795x.c static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
req              2221 drivers/crypto/hifn_795x.c 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
req               127 drivers/crypto/hisilicon/sec/sec_algs.c 					   struct sec_bd_info *req,
req               132 drivers/crypto/hisilicon/sec/sec_algs.c 	memset(req, 0, sizeof(*req));
req               133 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
req               134 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
req               135 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
req               136 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
req               138 drivers/crypto/hisilicon/sec/sec_algs.c 	req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
req               139 drivers/crypto/hisilicon/sec/sec_algs.c 	req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
req               403 drivers/crypto/hisilicon/sec/sec_algs.c 			ret = sec_queue_send(queue, &el->req, sec_req);
req               457 drivers/crypto/hisilicon/sec/sec_algs.c 		if (sec_req_el->req.w0 & SEC_BD_W0_DE)
req               493 drivers/crypto/hisilicon/sec/sec_algs.c 			sec_queue_send(ctx->queue, &nextrequest->req,
req               636 drivers/crypto/hisilicon/sec/sec_algs.c 	struct sec_bd_info *req;
req               643 drivers/crypto/hisilicon/sec/sec_algs.c 	req = &el->req;
req               644 drivers/crypto/hisilicon/sec/sec_algs.c 	memcpy(req, template, sizeof(*req));
req               646 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 &= ~SEC_BD_W0_CIPHER_M;
req               648 drivers/crypto/hisilicon/sec/sec_algs.c 		req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
req               650 drivers/crypto/hisilicon/sec/sec_algs.c 		req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
req               652 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
req               653 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
req               656 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
req               657 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
req               661 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
req               665 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
req               666 drivers/crypto/hisilicon/sec/sec_algs.c 	req->w1 |= SEC_BD_W1_ADDR_TYPE;
req               675 drivers/crypto/hisilicon/sec/sec_algs.c 	req->data_addr_lo = lower_32_bits(el->dma_in);
req               676 drivers/crypto/hisilicon/sec/sec_algs.c 	req->data_addr_hi = upper_32_bits(el->dma_in);
req               686 drivers/crypto/hisilicon/sec/sec_algs.c 		req->w0 |= SEC_BD_W0_DE;
req               687 drivers/crypto/hisilicon/sec/sec_algs.c 		req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
req               688 drivers/crypto/hisilicon/sec/sec_algs.c 		req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
req               691 drivers/crypto/hisilicon/sec/sec_algs.c 		req->w0 &= ~SEC_BD_W0_DE;
req               692 drivers/crypto/hisilicon/sec/sec_algs.c 		req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
req               693 drivers/crypto/hisilicon/sec/sec_algs.c 		req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
req               783 drivers/crypto/hisilicon/sec/sec_algs.c 		el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
req               784 drivers/crypto/hisilicon/sec/sec_algs.c 		el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
req               860 drivers/crypto/hisilicon/sec/sec_algs.c static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
req               862 drivers/crypto/hisilicon/sec/sec_algs.c 	return sec_alg_skcipher_crypto(req, true);
req               865 drivers/crypto/hisilicon/sec/sec_algs.c static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
req               867 drivers/crypto/hisilicon/sec/sec_algs.c 	return sec_alg_skcipher_crypto(req, false);
req               284 drivers/crypto/hisilicon/sec/sec_drv.h 	void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
req               304 drivers/crypto/hisilicon/sec/sec_drv.h 	struct sec_bd_info req;
req                47 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct acomp_req *req;
req               300 drivers/crypto/hisilicon/zip/zip_crypto.c 				struct hisi_zip_req *req)
req               305 drivers/crypto/hisilicon/zip/zip_crypto.c 	clear_bit(req->req_id, req_q->req_bitmap);
req               306 drivers/crypto/hisilicon/zip/zip_crypto.c 	memset(req, 0, sizeof(struct hisi_zip_req));
req               315 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct hisi_zip_req *req = req_q->q + sqe->tag;
req               316 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct acomp_req *acomp_req = req->req;
req               331 drivers/crypto/hisilicon/zip/zip_crypto.c 	hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
req               332 drivers/crypto/hisilicon/zip/zip_crypto.c 	hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
req               340 drivers/crypto/hisilicon/zip/zip_crypto.c 	hisi_zip_remove_req(qp_ctx, req);
req               426 drivers/crypto/hisilicon/zip/zip_crypto.c static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req               447 drivers/crypto/hisilicon/zip/zip_crypto.c 	req_cache->req = req;
req               462 drivers/crypto/hisilicon/zip/zip_crypto.c static int hisi_zip_do_work(struct hisi_zip_req *req,
req               466 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct acomp_req *a_req = req->req;
req               477 drivers/crypto/hisilicon/zip/zip_crypto.c 	req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req               478 drivers/crypto/hisilicon/zip/zip_crypto.c 						    req->req_id << 1, &input);
req               479 drivers/crypto/hisilicon/zip/zip_crypto.c 	if (IS_ERR(req->hw_src))
req               480 drivers/crypto/hisilicon/zip/zip_crypto.c 		return PTR_ERR(req->hw_src);
req               481 drivers/crypto/hisilicon/zip/zip_crypto.c 	req->dma_src = input;
req               483 drivers/crypto/hisilicon/zip/zip_crypto.c 	req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
req               484 drivers/crypto/hisilicon/zip/zip_crypto.c 						    (req->req_id << 1) + 1,
req               486 drivers/crypto/hisilicon/zip/zip_crypto.c 	if (IS_ERR(req->hw_dst)) {
req               487 drivers/crypto/hisilicon/zip/zip_crypto.c 		ret = PTR_ERR(req->hw_dst);
req               490 drivers/crypto/hisilicon/zip/zip_crypto.c 	req->dma_dst = output;
req               493 drivers/crypto/hisilicon/zip/zip_crypto.c 			  a_req->dlen, req->sskip, req->dskip);
req               495 drivers/crypto/hisilicon/zip/zip_crypto.c 	hisi_zip_config_tag(zip_sqe, req->req_id);
req               505 drivers/crypto/hisilicon/zip/zip_crypto.c 	hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
req               507 drivers/crypto/hisilicon/zip/zip_crypto.c 	hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
req               515 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct hisi_zip_req *req;
req               524 drivers/crypto/hisilicon/zip/zip_crypto.c 	req = hisi_zip_create_req(acomp_req, qp_ctx, (size_t)head_size, true);
req               525 drivers/crypto/hisilicon/zip/zip_crypto.c 	if (IS_ERR(req))
req               526 drivers/crypto/hisilicon/zip/zip_crypto.c 		return PTR_ERR(req);
req               528 drivers/crypto/hisilicon/zip/zip_crypto.c 	ret = hisi_zip_do_work(req, qp_ctx);
req               530 drivers/crypto/hisilicon/zip/zip_crypto.c 		hisi_zip_remove_req(qp_ctx, req);
req               539 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct hisi_zip_req *req;
req               545 drivers/crypto/hisilicon/zip/zip_crypto.c 	req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
req               546 drivers/crypto/hisilicon/zip/zip_crypto.c 	if (IS_ERR(req))
req               547 drivers/crypto/hisilicon/zip/zip_crypto.c 		return PTR_ERR(req);
req               549 drivers/crypto/hisilicon/zip/zip_crypto.c 	ret = hisi_zip_do_work(req, qp_ctx);
req               551 drivers/crypto/hisilicon/zip/zip_crypto.c 		hisi_zip_remove_req(qp_ctx, req);
req               132 drivers/crypto/img-hash.c 	struct ahash_request	*req;
req               165 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               211 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               224 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               254 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               256 drivers/crypto/img-hash.c 	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
req               257 drivers/crypto/img-hash.c 					ctx->buffer, hdev->req->nbytes);
req               259 drivers/crypto/img-hash.c 	ctx->total = hdev->req->nbytes;
req               269 drivers/crypto/img-hash.c static int img_hash_finish(struct ahash_request *req)
req               271 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
req               273 drivers/crypto/img-hash.c 	if (!req->result)
req               276 drivers/crypto/img-hash.c 	memcpy(req->result, ctx->digest, ctx->digsize);
req               281 drivers/crypto/img-hash.c static void img_hash_copy_hash(struct ahash_request *req)
req               283 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
req               291 drivers/crypto/img-hash.c static void img_hash_finish_req(struct ahash_request *req, int err)
req               293 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
req               297 drivers/crypto/img-hash.c 		img_hash_copy_hash(req);
req               299 drivers/crypto/img-hash.c 			err = img_hash_finish(req);
req               308 drivers/crypto/img-hash.c 	if (req->base.complete)
req               309 drivers/crypto/img-hash.c 		req->base.complete(&req->base, err);
req               314 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               359 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               364 drivers/crypto/img-hash.c 	if (!hdev->req || !ctx->sg)
req               430 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
req               440 drivers/crypto/img-hash.c 	struct ahash_request *req = hdev->req;
req               441 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
req               446 drivers/crypto/img-hash.c 	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
req               448 drivers/crypto/img-hash.c 			req->nbytes);
req               452 drivers/crypto/img-hash.c 			req->nbytes);
req               467 drivers/crypto/img-hash.c 	nbits = (u64)hdev->req->nbytes << 3;
req               481 drivers/crypto/img-hash.c static int img_hash_init(struct ahash_request *req)
req               483 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               484 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
req               488 drivers/crypto/img-hash.c 	rctx->fallback_req.base.flags =	req->base.flags
req               495 drivers/crypto/img-hash.c 				 struct ahash_request *req)
req               504 drivers/crypto/img-hash.c 	if (req)
req               505 drivers/crypto/img-hash.c 		res = ahash_enqueue_request(&hdev->queue, req);
req               525 drivers/crypto/img-hash.c 	req = ahash_request_cast(async_req);
req               526 drivers/crypto/img-hash.c 	hdev->req = req;
req               528 drivers/crypto/img-hash.c 	ctx = ahash_request_ctx(req);
req               531 drivers/crypto/img-hash.c 		 ctx->op, req->nbytes);
req               540 drivers/crypto/img-hash.c 		img_hash_finish_req(req, err);
req               545 drivers/crypto/img-hash.c static int img_hash_update(struct ahash_request *req)
req               547 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
req               548 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               552 drivers/crypto/img-hash.c 	rctx->fallback_req.base.flags = req->base.flags
req               554 drivers/crypto/img-hash.c 	rctx->fallback_req.nbytes = req->nbytes;
req               555 drivers/crypto/img-hash.c 	rctx->fallback_req.src = req->src;
req               560 drivers/crypto/img-hash.c static int img_hash_final(struct ahash_request *req)
req               562 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
req               563 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               567 drivers/crypto/img-hash.c 	rctx->fallback_req.base.flags = req->base.flags
req               569 drivers/crypto/img-hash.c 	rctx->fallback_req.result = req->result;
req               574 drivers/crypto/img-hash.c static int img_hash_finup(struct ahash_request *req)
req               576 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
req               577 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               581 drivers/crypto/img-hash.c 	rctx->fallback_req.base.flags = req->base.flags
req               583 drivers/crypto/img-hash.c 	rctx->fallback_req.nbytes = req->nbytes;
req               584 drivers/crypto/img-hash.c 	rctx->fallback_req.src = req->src;
req               585 drivers/crypto/img-hash.c 	rctx->fallback_req.result = req->result;
req               590 drivers/crypto/img-hash.c static int img_hash_import(struct ahash_request *req, const void *in)
req               592 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
req               593 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               597 drivers/crypto/img-hash.c 	rctx->fallback_req.base.flags = req->base.flags
req               603 drivers/crypto/img-hash.c static int img_hash_export(struct ahash_request *req, void *out)
req               605 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
req               606 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               610 drivers/crypto/img-hash.c 	rctx->fallback_req.base.flags = req->base.flags
req               616 drivers/crypto/img-hash.c static int img_hash_digest(struct ahash_request *req)
req               618 drivers/crypto/img-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               620 drivers/crypto/img-hash.c 	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
req               662 drivers/crypto/img-hash.c 	ctx->total = req->nbytes;
req               663 drivers/crypto/img-hash.c 	ctx->sg = req->src;
req               664 drivers/crypto/img-hash.c 	ctx->sgfirst = req->src;
req               667 drivers/crypto/img-hash.c 	err = img_hash_handle_queue(tctx->hdev, req);
req               926 drivers/crypto/img-hash.c 	img_hash_finish_req(hdev->req, err);
req               771 drivers/crypto/inside-secure/safexcel.c 	struct crypto_async_request *req, *backlog;
req               778 drivers/crypto/inside-secure/safexcel.c 	req = priv->ring[ring].req;
req               780 drivers/crypto/inside-secure/safexcel.c 	if (req)
req               786 drivers/crypto/inside-secure/safexcel.c 		req = crypto_dequeue_request(&priv->ring[ring].queue);
req               789 drivers/crypto/inside-secure/safexcel.c 		if (!req) {
req               790 drivers/crypto/inside-secure/safexcel.c 			priv->ring[ring].req = NULL;
req               796 drivers/crypto/inside-secure/safexcel.c 		ctx = crypto_tfm_ctx(req->tfm);
req               797 drivers/crypto/inside-secure/safexcel.c 		ret = ctx->send(req, ring, &commands, &results);
req               820 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].req = req;
req               887 drivers/crypto/inside-secure/safexcel.c 				 struct crypto_async_request *req)
req               891 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].rdr_req[i] = req;
req               917 drivers/crypto/inside-secure/safexcel.c void safexcel_inv_complete(struct crypto_async_request *req, int error)
req               919 drivers/crypto/inside-secure/safexcel.c 	struct safexcel_inv_result *result = req->data;
req               967 drivers/crypto/inside-secure/safexcel.c 	struct crypto_async_request *req;
req               982 drivers/crypto/inside-secure/safexcel.c 		req = safexcel_rdr_req_get(priv, ring);
req               984 drivers/crypto/inside-secure/safexcel.c 		ctx = crypto_tfm_ctx(req->tfm);
req               985 drivers/crypto/inside-secure/safexcel.c 		ndesc = ctx->handle_result(priv, ring, req,
req               995 drivers/crypto/inside-secure/safexcel.c 			req->complete(req, ret);
req               641 drivers/crypto/inside-secure/safexcel.h 	struct crypto_async_request *req;
req               733 drivers/crypto/inside-secure/safexcel.h 	int (*send)(struct crypto_async_request *req, int ring,
req               736 drivers/crypto/inside-secure/safexcel.h 			     struct crypto_async_request *req, bool *complete,
req               813 drivers/crypto/inside-secure/safexcel.h 			  struct crypto_async_request *req);
req               816 drivers/crypto/inside-secure/safexcel.h void safexcel_inv_complete(struct crypto_async_request *req, int error);
req               836 drivers/crypto/inside-secure/safexcel_cipher.c 	struct skcipher_request *req = skcipher_request_cast(async);
req               837 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
req               845 drivers/crypto/inside-secure/safexcel_cipher.c 		err = safexcel_handle_req_result(priv, ring, async, req->src,
req               846 drivers/crypto/inside-secure/safexcel_cipher.c 						 req->dst, req->cryptlen, sreq,
req               858 drivers/crypto/inside-secure/safexcel_cipher.c 	struct aead_request *req = aead_request_cast(async);
req               859 drivers/crypto/inside-secure/safexcel_cipher.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               860 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
req               868 drivers/crypto/inside-secure/safexcel_cipher.c 		err = safexcel_handle_req_result(priv, ring, async, req->src,
req               869 drivers/crypto/inside-secure/safexcel_cipher.c 						 req->dst,
req               870 drivers/crypto/inside-secure/safexcel_cipher.c 						 req->cryptlen + crypto_aead_authsize(tfm),
req               897 drivers/crypto/inside-secure/safexcel_cipher.c 	struct skcipher_request *req = skcipher_request_cast(async);
req               898 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               899 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
req               908 drivers/crypto/inside-secure/safexcel_cipher.c 		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
req               915 drivers/crypto/inside-secure/safexcel_cipher.c 		memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
req               917 drivers/crypto/inside-secure/safexcel_cipher.c 		ret = safexcel_send_req(async, ring, sreq, req->src,
req               918 drivers/crypto/inside-secure/safexcel_cipher.c 					req->dst, req->cryptlen, 0, 0, input_iv,
req               929 drivers/crypto/inside-secure/safexcel_cipher.c 	struct aead_request *req = aead_request_cast(async);
req               930 drivers/crypto/inside-secure/safexcel_cipher.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               931 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               932 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
req               941 drivers/crypto/inside-secure/safexcel_cipher.c 		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
req               942 drivers/crypto/inside-secure/safexcel_cipher.c 					req->cryptlen, req->assoclen,
req               943 drivers/crypto/inside-secure/safexcel_cipher.c 					crypto_aead_authsize(tfm), req->iv,
req               985 drivers/crypto/inside-secure/safexcel_cipher.c 	EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
req               986 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
req               989 drivers/crypto/inside-secure/safexcel_cipher.c 	memset(req, 0, sizeof(struct skcipher_request));
req               991 drivers/crypto/inside-secure/safexcel_cipher.c 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               993 drivers/crypto/inside-secure/safexcel_cipher.c 	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
req               995 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
req              1000 drivers/crypto/inside-secure/safexcel_cipher.c 	EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
req              1001 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
req              1004 drivers/crypto/inside-secure/safexcel_cipher.c 	memset(req, 0, sizeof(struct aead_request));
req              1006 drivers/crypto/inside-secure/safexcel_cipher.c 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              1008 drivers/crypto/inside-secure/safexcel_cipher.c 	aead_request_set_tfm(req, __crypto_aead_cast(tfm));
req              1010 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
req              1050 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_encrypt(struct skcipher_request *req)
req              1052 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
req              1056 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_decrypt(struct skcipher_request *req)
req              1058 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
req              1537 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_aead_encrypt(struct aead_request *req)
req              1539 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *creq = aead_request_ctx(req);
req              1541 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
req              1544 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_aead_decrypt(struct aead_request *req)
req              1546 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *creq = aead_request_ctx(req);
req              1548 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
req              2023 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_encrypt_xts(struct skcipher_request *req)
req              2025 drivers/crypto/inside-secure/safexcel_cipher.c 	if (req->cryptlen < XTS_BLOCK_SIZE)
req              2027 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
req              2031 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_decrypt_xts(struct skcipher_request *req)
req              2033 drivers/crypto/inside-secure/safexcel_cipher.c 	if (req->cryptlen < XTS_BLOCK_SIZE)
req              2035 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
req              2261 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_ccm_encrypt(struct aead_request *req)
req              2263 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *creq = aead_request_ctx(req);
req              2265 drivers/crypto/inside-secure/safexcel_cipher.c 	if (req->iv[0] < 1 || req->iv[0] > 7)
req              2268 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
req              2271 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_ccm_decrypt(struct aead_request *req)
req              2273 drivers/crypto/inside-secure/safexcel_cipher.c 	struct safexcel_cipher_req *creq = aead_request_ctx(req);
req              2275 drivers/crypto/inside-secure/safexcel_cipher.c 	if (req->iv[0] < 1 || req->iv[0] > 7)
req              2278 drivers/crypto/inside-secure/safexcel_cipher.c 	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
req                54 drivers/crypto/inside-secure/safexcel_hash.c static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
req                56 drivers/crypto/inside-secure/safexcel_hash.c 	return req->len - req->processed;
req                79 drivers/crypto/inside-secure/safexcel_hash.c 				     struct safexcel_ahash_req *req,
req                92 drivers/crypto/inside-secure/safexcel_hash.c 	if (!req->processed) {
req                94 drivers/crypto/inside-secure/safexcel_hash.c 		if (req->finish) {
req               112 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
req               114 drivers/crypto/inside-secure/safexcel_hash.c 	if (req->finish) {
req               116 drivers/crypto/inside-secure/safexcel_hash.c 		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
req               117 drivers/crypto/inside-secure/safexcel_hash.c 		    req->hmac_zlen || (req->processed != req->block_sz)) {
req               118 drivers/crypto/inside-secure/safexcel_hash.c 			count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
req               132 drivers/crypto/inside-secure/safexcel_hash.c 		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
req               134 drivers/crypto/inside-secure/safexcel_hash.c 		    req->hmac_zlen ||
req               136 drivers/crypto/inside-secure/safexcel_hash.c 		    (req->processed != req->block_sz)) {
req               139 drivers/crypto/inside-secure/safexcel_hash.c 				CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
req               143 drivers/crypto/inside-secure/safexcel_hash.c 			if (req->hmac_zlen)
req               148 drivers/crypto/inside-secure/safexcel_hash.c 			ctx->base.ctxr->data[req->state_sz >> 2] =
req               150 drivers/crypto/inside-secure/safexcel_hash.c 			req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req               153 drivers/crypto/inside-secure/safexcel_hash.c 			req->hmac_zlen = false;
req               156 drivers/crypto/inside-secure/safexcel_hash.c 			memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
req               157 drivers/crypto/inside-secure/safexcel_hash.c 			       ctx->opad, req->state_sz);
req               161 drivers/crypto/inside-secure/safexcel_hash.c 				CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
req               167 drivers/crypto/inside-secure/safexcel_hash.c 			CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
req               258 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               267 drivers/crypto/inside-secure/safexcel_hash.c 	queued = len = safexcel_queued_len(req);
req               273 drivers/crypto/inside-secure/safexcel_hash.c 	if (!req->finish && !req->last_req) {
req               286 drivers/crypto/inside-secure/safexcel_hash.c 				   req->cache_next, extra,
req               301 drivers/crypto/inside-secure/safexcel_hash.c 		req->cache_dma = dma_map_single(priv->dev, req->cache,
req               303 drivers/crypto/inside-secure/safexcel_hash.c 		if (dma_mapping_error(priv->dev, req->cache_dma))
req               306 drivers/crypto/inside-secure/safexcel_hash.c 		req->cache_sz = cache_len;
req               309 drivers/crypto/inside-secure/safexcel_hash.c 						 req->cache_dma, cache_len, len,
req               327 drivers/crypto/inside-secure/safexcel_hash.c 	req->nents = dma_map_sg(priv->dev, areq->src,
req               331 drivers/crypto/inside-secure/safexcel_hash.c 	if (!req->nents) {
req               336 drivers/crypto/inside-secure/safexcel_hash.c 	for_each_sg(areq->src, sg, req->nents, i) {
req               363 drivers/crypto/inside-secure/safexcel_hash.c 	safexcel_context_control(ctx, req, first_cdesc);
req               366 drivers/crypto/inside-secure/safexcel_hash.c 	safexcel_hash_token(first_cdesc, len, req->state_sz);
req               368 drivers/crypto/inside-secure/safexcel_hash.c 	req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
req               370 drivers/crypto/inside-secure/safexcel_hash.c 	if (dma_mapping_error(priv->dev, req->result_dma)) {
req               376 drivers/crypto/inside-secure/safexcel_hash.c 	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
req               377 drivers/crypto/inside-secure/safexcel_hash.c 				   req->state_sz);
req               385 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed += len;
req               392 drivers/crypto/inside-secure/safexcel_hash.c 	dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
req               395 drivers/crypto/inside-secure/safexcel_hash.c 	dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
req               400 drivers/crypto/inside-secure/safexcel_hash.c 	if (req->cache_dma) {
req               401 drivers/crypto/inside-secure/safexcel_hash.c 		dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
req               403 drivers/crypto/inside-secure/safexcel_hash.c 		req->cache_dma = 0;
req               404 drivers/crypto/inside-secure/safexcel_hash.c 		req->cache_sz = 0;
req               465 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               468 drivers/crypto/inside-secure/safexcel_hash.c 	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
req               470 drivers/crypto/inside-secure/safexcel_hash.c 	if (req->needs_inv) {
req               471 drivers/crypto/inside-secure/safexcel_hash.c 		req->needs_inv = false;
req               504 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               507 drivers/crypto/inside-secure/safexcel_hash.c 	if (req->needs_inv)
req               519 drivers/crypto/inside-secure/safexcel_hash.c 	EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
req               520 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
req               524 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, EIP197_AHASH_REQ_SIZE);
req               528 drivers/crypto/inside-secure/safexcel_hash.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               531 drivers/crypto/inside-secure/safexcel_hash.c 	ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
req               532 drivers/crypto/inside-secure/safexcel_hash.c 	ctx = crypto_tfm_ctx(req->base.tfm);
req               537 drivers/crypto/inside-secure/safexcel_hash.c 	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
req               559 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               565 drivers/crypto/inside-secure/safexcel_hash.c 	cache_len = safexcel_queued_len(req);
req               573 drivers/crypto/inside-secure/safexcel_hash.c 				   req->cache + cache_len,
req               585 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               589 drivers/crypto/inside-secure/safexcel_hash.c 	req->needs_inv = false;
req               593 drivers/crypto/inside-secure/safexcel_hash.c 		    req->processed &&
req               595 drivers/crypto/inside-secure/safexcel_hash.c 		     (req->finish &&
req               596 drivers/crypto/inside-secure/safexcel_hash.c 		      (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
req               598 drivers/crypto/inside-secure/safexcel_hash.c 		     memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
req               600 drivers/crypto/inside-secure/safexcel_hash.c 		     (req->finish && (req->processed != req->block_sz)) ||
req               602 drivers/crypto/inside-secure/safexcel_hash.c 		     (req->finish &&
req               603 drivers/crypto/inside-secure/safexcel_hash.c 		      memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
req               604 drivers/crypto/inside-secure/safexcel_hash.c 			     ctx->opad, req->state_sz))))
req               615 drivers/crypto/inside-secure/safexcel_hash.c 			req->needs_inv = true;
req               640 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               651 drivers/crypto/inside-secure/safexcel_hash.c 	req->len += areq->nbytes;
req               657 drivers/crypto/inside-secure/safexcel_hash.c 	if ((ret && !req->finish) || req->last_req)
req               665 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               668 drivers/crypto/inside-secure/safexcel_hash.c 	req->finish = true;
req               670 drivers/crypto/inside-secure/safexcel_hash.c 	if (unlikely(!req->len && !areq->nbytes)) {
req               696 drivers/crypto/inside-secure/safexcel_hash.c 	} else if (unlikely(req->hmac &&
req               697 drivers/crypto/inside-secure/safexcel_hash.c 			    (req->len == req->block_sz) &&
req               707 drivers/crypto/inside-secure/safexcel_hash.c 		memset(req->cache, 0, req->block_sz);
req               709 drivers/crypto/inside-secure/safexcel_hash.c 		req->cache[0] = 0x80;
req               711 drivers/crypto/inside-secure/safexcel_hash.c 		if (req->len_is_le) {
req               713 drivers/crypto/inside-secure/safexcel_hash.c 			req->cache[req->block_sz-8] = (req->block_sz << 3) &
req               715 drivers/crypto/inside-secure/safexcel_hash.c 			req->cache[req->block_sz-7] = (req->block_sz >> 5);
req               718 drivers/crypto/inside-secure/safexcel_hash.c 			req->cache[req->block_sz-2] = (req->block_sz >> 5);
req               719 drivers/crypto/inside-secure/safexcel_hash.c 			req->cache[req->block_sz-1] = (req->block_sz << 3) &
req               723 drivers/crypto/inside-secure/safexcel_hash.c 		req->len += req->block_sz; /* plus 1 hash block */
req               726 drivers/crypto/inside-secure/safexcel_hash.c 		req->hmac_zlen = true;
req               729 drivers/crypto/inside-secure/safexcel_hash.c 		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
req               730 drivers/crypto/inside-secure/safexcel_hash.c 	} else if (req->hmac) {
req               732 drivers/crypto/inside-secure/safexcel_hash.c 		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
req               740 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               742 drivers/crypto/inside-secure/safexcel_hash.c 	req->finish = true;
req               750 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               753 drivers/crypto/inside-secure/safexcel_hash.c 	export->len = req->len;
req               754 drivers/crypto/inside-secure/safexcel_hash.c 	export->processed = req->processed;
req               756 drivers/crypto/inside-secure/safexcel_hash.c 	export->digest = req->digest;
req               758 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(export->state, req->state, req->state_sz);
req               759 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
req               766 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               774 drivers/crypto/inside-secure/safexcel_hash.c 	req->len = export->len;
req               775 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed = export->processed;
req               777 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = export->digest;
req               779 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
req               780 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, export->state, req->state_sz);
req               804 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               806 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req               809 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req               810 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA1_DIGEST_SIZE;
req               811 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA1_BLOCK_SIZE;
req               879 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req               881 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req               884 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
req               886 drivers/crypto/inside-secure/safexcel_hash.c 	req->len	= SHA1_BLOCK_SIZE;
req               887 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed	= SHA1_BLOCK_SIZE;
req               890 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req               891 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA1_DIGEST_SIZE;
req               892 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA1_BLOCK_SIZE;
req               893 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req               913 drivers/crypto/inside-secure/safexcel_hash.c static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
req               915 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_result *result = req->data;
req               977 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req;
req               991 drivers/crypto/inside-secure/safexcel_hash.c 	req = ahash_request_ctx(areq);
req               992 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req               993 drivers/crypto/inside-secure/safexcel_hash.c 	req->last_req = true;
req              1121 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1123 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1126 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1127 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA256_DIGEST_SIZE;
req              1128 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA256_BLOCK_SIZE;
req              1176 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1178 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1181 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1182 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA256_DIGEST_SIZE;
req              1183 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA256_BLOCK_SIZE;
req              1238 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1240 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1243 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
req              1245 drivers/crypto/inside-secure/safexcel_hash.c 	req->len	= SHA256_BLOCK_SIZE;
req              1246 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed	= SHA256_BLOCK_SIZE;
req              1249 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1250 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA256_DIGEST_SIZE;
req              1251 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA256_BLOCK_SIZE;
req              1252 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req              1308 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1310 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1313 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
req              1315 drivers/crypto/inside-secure/safexcel_hash.c 	req->len	= SHA256_BLOCK_SIZE;
req              1316 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed	= SHA256_BLOCK_SIZE;
req              1319 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1320 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA256_DIGEST_SIZE;
req              1321 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA256_BLOCK_SIZE;
req              1322 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req              1371 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1373 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1376 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1377 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA512_DIGEST_SIZE;
req              1378 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA512_BLOCK_SIZE;
req              1426 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1428 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1431 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1432 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA512_DIGEST_SIZE;
req              1433 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA512_BLOCK_SIZE;
req              1488 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1490 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1493 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
req              1495 drivers/crypto/inside-secure/safexcel_hash.c 	req->len	= SHA512_BLOCK_SIZE;
req              1496 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed	= SHA512_BLOCK_SIZE;
req              1499 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1500 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA512_DIGEST_SIZE;
req              1501 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA512_BLOCK_SIZE;
req              1502 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req              1558 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1560 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1563 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
req              1565 drivers/crypto/inside-secure/safexcel_hash.c 	req->len	= SHA512_BLOCK_SIZE;
req              1566 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed	= SHA512_BLOCK_SIZE;
req              1569 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1570 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = SHA512_DIGEST_SIZE;
req              1571 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = SHA512_BLOCK_SIZE;
req              1572 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req              1621 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1623 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1626 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1627 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = MD5_DIGEST_SIZE;
req              1628 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = MD5_HMAC_BLOCK_SIZE;
req              1676 drivers/crypto/inside-secure/safexcel_hash.c 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
req              1678 drivers/crypto/inside-secure/safexcel_hash.c 	memset(req, 0, sizeof(*req));
req              1681 drivers/crypto/inside-secure/safexcel_hash.c 	memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
req              1683 drivers/crypto/inside-secure/safexcel_hash.c 	req->len	= MD5_HMAC_BLOCK_SIZE;
req              1684 drivers/crypto/inside-secure/safexcel_hash.c 	req->processed	= MD5_HMAC_BLOCK_SIZE;
req              1687 drivers/crypto/inside-secure/safexcel_hash.c 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req              1688 drivers/crypto/inside-secure/safexcel_hash.c 	req->state_sz = MD5_DIGEST_SIZE;
req              1689 drivers/crypto/inside-secure/safexcel_hash.c 	req->block_sz = MD5_HMAC_BLOCK_SIZE;
req              1690 drivers/crypto/inside-secure/safexcel_hash.c 	req->len_is_le = true; /* MD5 is little endian! ... */
req              1691 drivers/crypto/inside-secure/safexcel_hash.c 	req->hmac = true;
req               343 drivers/crypto/ixp4xx_crypto.c 	struct aead_request *req = crypt->data.aead_req;
req               344 drivers/crypto/ixp4xx_crypto.c 	struct aead_ctx *req_ctx = aead_request_ctx(req);
req               345 drivers/crypto/ixp4xx_crypto.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               347 drivers/crypto/ixp4xx_crypto.c 	int decryptlen = req->assoclen + req->cryptlen - authsize;
req               351 drivers/crypto/ixp4xx_crypto.c 			req->dst, decryptlen, authsize, 1);
req               369 drivers/crypto/ixp4xx_crypto.c 		struct aead_request *req = crypt->data.aead_req;
req               370 drivers/crypto/ixp4xx_crypto.c 		struct aead_ctx *req_ctx = aead_request_ctx(req);
req               377 drivers/crypto/ixp4xx_crypto.c 		req->base.complete(&req->base, failed);
req               381 drivers/crypto/ixp4xx_crypto.c 		struct ablkcipher_request *req = crypt->data.ablk_req;
req               382 drivers/crypto/ixp4xx_crypto.c 		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
req               388 drivers/crypto/ixp4xx_crypto.c 		req->base.complete(&req->base, failed);
req               871 drivers/crypto/ixp4xx_crypto.c static int ablk_perform(struct ablkcipher_request *req, int encrypt)
req               873 drivers/crypto/ixp4xx_crypto.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               878 drivers/crypto/ixp4xx_crypto.c 	unsigned int nbytes = req->nbytes;
req               880 drivers/crypto/ixp4xx_crypto.c 	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
req               883 drivers/crypto/ixp4xx_crypto.c 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
req               897 drivers/crypto/ixp4xx_crypto.c 	crypt->data.ablk_req = req;
req               905 drivers/crypto/ixp4xx_crypto.c 	BUG_ON(ivsize && !req->info);
req               906 drivers/crypto/ixp4xx_crypto.c 	memcpy(crypt->iv, req->info, ivsize);
req               907 drivers/crypto/ixp4xx_crypto.c 	if (req->src != req->dst) {
req               913 drivers/crypto/ixp4xx_crypto.c 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
req               923 drivers/crypto/ixp4xx_crypto.c 	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
req               937 drivers/crypto/ixp4xx_crypto.c 	if (req->src != req->dst) {
req               944 drivers/crypto/ixp4xx_crypto.c static int ablk_encrypt(struct ablkcipher_request *req)
req               946 drivers/crypto/ixp4xx_crypto.c 	return ablk_perform(req, 1);
req               949 drivers/crypto/ixp4xx_crypto.c static int ablk_decrypt(struct ablkcipher_request *req)
req               951 drivers/crypto/ixp4xx_crypto.c 	return ablk_perform(req, 0);
req               954 drivers/crypto/ixp4xx_crypto.c static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
req               956 drivers/crypto/ixp4xx_crypto.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               959 drivers/crypto/ixp4xx_crypto.c 	u8 *info = req->info;
req               970 drivers/crypto/ixp4xx_crypto.c 	req->info = iv;
req               971 drivers/crypto/ixp4xx_crypto.c 	ret = ablk_perform(req, 1);
req               972 drivers/crypto/ixp4xx_crypto.c 	req->info = info;
req               976 drivers/crypto/ixp4xx_crypto.c static int aead_perform(struct aead_request *req, int encrypt,
req               979 drivers/crypto/ixp4xx_crypto.c 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
req               987 drivers/crypto/ixp4xx_crypto.c 	struct aead_ctx *req_ctx = aead_request_ctx(req);
req               989 drivers/crypto/ixp4xx_crypto.c 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
req              1001 drivers/crypto/ixp4xx_crypto.c 		cryptlen = req->cryptlen;
req              1005 drivers/crypto/ixp4xx_crypto.c 		cryptlen = req->cryptlen -authsize;
req              1012 drivers/crypto/ixp4xx_crypto.c 	crypt->data.aead_req = req;
req              1021 drivers/crypto/ixp4xx_crypto.c 	crypt->auth_len = req->assoclen + cryptlen;
req              1022 drivers/crypto/ixp4xx_crypto.c 	BUG_ON(ivsize && !req->iv);
req              1023 drivers/crypto/ixp4xx_crypto.c 	memcpy(crypt->iv, req->iv, ivsize);
req              1025 drivers/crypto/ixp4xx_crypto.c 	buf = chainup_buffers(dev, req->src, crypt->auth_len,
req              1039 drivers/crypto/ixp4xx_crypto.c 	if (req->src != req->dst) {
req              1045 drivers/crypto/ixp4xx_crypto.c 		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
req              1070 drivers/crypto/ixp4xx_crypto.c 				req->src, cryptlen, authsize, 0);
req              1202 drivers/crypto/ixp4xx_crypto.c static int aead_encrypt(struct aead_request *req)
req              1204 drivers/crypto/ixp4xx_crypto.c 	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
req              1207 drivers/crypto/ixp4xx_crypto.c static int aead_decrypt(struct aead_request *req)
req              1209 drivers/crypto/ixp4xx_crypto.c 	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
req                41 drivers/crypto/marvell/cesa.c 	struct crypto_async_request *req;
req                44 drivers/crypto/marvell/cesa.c 	req = crypto_dequeue_request(&engine->queue);
req                46 drivers/crypto/marvell/cesa.c 	if (!req)
req                49 drivers/crypto/marvell/cesa.c 	return req;
req                54 drivers/crypto/marvell/cesa.c 	struct crypto_async_request *req = NULL, *backlog = NULL;
req                59 drivers/crypto/marvell/cesa.c 	if (!engine->req) {
req                60 drivers/crypto/marvell/cesa.c 		req = mv_cesa_dequeue_req_locked(engine, &backlog);
req                61 drivers/crypto/marvell/cesa.c 		engine->req = req;
req                65 drivers/crypto/marvell/cesa.c 	if (!req)
req                71 drivers/crypto/marvell/cesa.c 	ctx = crypto_tfm_ctx(req->tfm);
req                72 drivers/crypto/marvell/cesa.c 	ctx->ops->step(req);
req                77 drivers/crypto/marvell/cesa.c 	struct crypto_async_request *req;
req                81 drivers/crypto/marvell/cesa.c 	req = engine->req;
req                82 drivers/crypto/marvell/cesa.c 	ctx = crypto_tfm_ctx(req->tfm);
req                83 drivers/crypto/marvell/cesa.c 	res = ctx->ops->process(req, status);
req                86 drivers/crypto/marvell/cesa.c 		ctx->ops->complete(req);
req                87 drivers/crypto/marvell/cesa.c 		mv_cesa_engine_enqueue_complete_request(engine, req);
req                89 drivers/crypto/marvell/cesa.c 		ctx->ops->step(req);
req               104 drivers/crypto/marvell/cesa.c mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
req               107 drivers/crypto/marvell/cesa.c 	ctx->ops->cleanup(req);
req               109 drivers/crypto/marvell/cesa.c 	req->complete(req, res);
req               116 drivers/crypto/marvell/cesa.c 	struct crypto_async_request *req;
req               142 drivers/crypto/marvell/cesa.c 		req = engine->req;
req               144 drivers/crypto/marvell/cesa.c 			engine->req = NULL;
req               147 drivers/crypto/marvell/cesa.c 		ctx = crypto_tfm_ctx(req->tfm);
req               150 drivers/crypto/marvell/cesa.c 			mv_cesa_complete_req(ctx, req, res);
req               157 drivers/crypto/marvell/cesa.c 			req = mv_cesa_engine_dequeue_complete_request(engine);
req               158 drivers/crypto/marvell/cesa.c 			if (!req)
req               161 drivers/crypto/marvell/cesa.c 			ctx = crypto_tfm_ctx(req->tfm);
req               162 drivers/crypto/marvell/cesa.c 			mv_cesa_complete_req(ctx, req, 0);
req               169 drivers/crypto/marvell/cesa.c int mv_cesa_queue_req(struct crypto_async_request *req,
req               176 drivers/crypto/marvell/cesa.c 	ret = crypto_enqueue_request(&engine->queue, req);
req               450 drivers/crypto/marvell/cesa.h 	struct crypto_async_request *req;
req               473 drivers/crypto/marvell/cesa.h 	int (*process)(struct crypto_async_request *req, u32 status);
req               474 drivers/crypto/marvell/cesa.h 	void (*step)(struct crypto_async_request *req);
req               475 drivers/crypto/marvell/cesa.h 	void (*cleanup)(struct crypto_async_request *req);
req               476 drivers/crypto/marvell/cesa.h 	void (*complete)(struct crypto_async_request *req);
req               605 drivers/crypto/marvell/cesa.h 	} req;
req               623 drivers/crypto/marvell/cesa.h 					struct crypto_async_request *req)
req               625 drivers/crypto/marvell/cesa.h 	list_add_tail(&req->list, &engine->complete_queue);
req               631 drivers/crypto/marvell/cesa.h 	struct crypto_async_request *req;
req               633 drivers/crypto/marvell/cesa.h 	req = list_first_entry_or_null(&engine->complete_queue,
req               636 drivers/crypto/marvell/cesa.h 	if (req)
req               637 drivers/crypto/marvell/cesa.h 		list_del(&req->list);
req               639 drivers/crypto/marvell/cesa.h 	return req;
req               644 drivers/crypto/marvell/cesa.h mv_cesa_req_get_type(struct mv_cesa_req *req)
req               646 drivers/crypto/marvell/cesa.h 	return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
req               721 drivers/crypto/marvell/cesa.h int mv_cesa_queue_req(struct crypto_async_request *req,
req               752 drivers/crypto/marvell/cesa.h static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
req                40 drivers/crypto/marvell/cipher.c 			       struct skcipher_request *req)
req                42 drivers/crypto/marvell/cipher.c 	mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
req                43 drivers/crypto/marvell/cipher.c 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
req                44 drivers/crypto/marvell/cipher.c 	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
req                57 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
req                59 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req                61 drivers/crypto/marvell/cipher.c 	if (req->dst != req->src) {
req                62 drivers/crypto/marvell/cipher.c 		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
req                64 drivers/crypto/marvell/cipher.c 		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
req                67 drivers/crypto/marvell/cipher.c 		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
req                73 drivers/crypto/marvell/cipher.c static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
req                75 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req                78 drivers/crypto/marvell/cipher.c 		mv_cesa_skcipher_dma_cleanup(req);
req                81 drivers/crypto/marvell/cipher.c static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
req                83 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req                86 drivers/crypto/marvell/cipher.c 	size_t  len = min_t(size_t, req->cryptlen - sreq->offset,
req                92 drivers/crypto/marvell/cipher.c 	len = sg_pcopy_to_buffer(req->src, creq->src_nents,
req               114 drivers/crypto/marvell/cipher.c static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
req               117 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               122 drivers/crypto/marvell/cipher.c 	len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
req               127 drivers/crypto/marvell/cipher.c 	if (sreq->offset < req->cryptlen)
req               133 drivers/crypto/marvell/cipher.c static int mv_cesa_skcipher_process(struct crypto_async_request *req,
req               136 drivers/crypto/marvell/cipher.c 	struct skcipher_request *skreq = skcipher_request_cast(req);
req               146 drivers/crypto/marvell/cipher.c static void mv_cesa_skcipher_step(struct crypto_async_request *req)
req               148 drivers/crypto/marvell/cipher.c 	struct skcipher_request *skreq = skcipher_request_cast(req);
req               158 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
req               160 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               167 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
req               169 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               176 drivers/crypto/marvell/cipher.c static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
req               179 drivers/crypto/marvell/cipher.c 	struct skcipher_request *skreq = skcipher_request_cast(req);
req               190 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
req               192 drivers/crypto/marvell/cipher.c 	struct skcipher_request *skreq = skcipher_request_cast(req);
req               198 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_complete(struct crypto_async_request *req)
req               200 drivers/crypto/marvell/cipher.c 	struct skcipher_request *skreq = skcipher_request_cast(req);
req               302 drivers/crypto/marvell/cipher.c static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
req               305 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               306 drivers/crypto/marvell/cipher.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               316 drivers/crypto/marvell/cipher.c 	if (req->src != req->dst) {
req               317 drivers/crypto/marvell/cipher.c 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
req               322 drivers/crypto/marvell/cipher.c 		ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
req               329 drivers/crypto/marvell/cipher.c 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
req               336 drivers/crypto/marvell/cipher.c 	mv_cesa_skcipher_req_iter_init(&iter, req);
req               383 drivers/crypto/marvell/cipher.c 	if (req->dst != req->src)
req               384 drivers/crypto/marvell/cipher.c 		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
req               388 drivers/crypto/marvell/cipher.c 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
req               389 drivers/crypto/marvell/cipher.c 		     req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
req               395 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
req               398 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               410 drivers/crypto/marvell/cipher.c static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
req               413 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               414 drivers/crypto/marvell/cipher.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               418 drivers/crypto/marvell/cipher.c 	if (!IS_ALIGNED(req->cryptlen, blksize))
req               421 drivers/crypto/marvell/cipher.c 	creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
req               426 drivers/crypto/marvell/cipher.c 	creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
req               436 drivers/crypto/marvell/cipher.c 		ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
req               438 drivers/crypto/marvell/cipher.c 		ret = mv_cesa_skcipher_std_req_init(req, tmpl);
req               443 drivers/crypto/marvell/cipher.c static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
req               447 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
req               450 drivers/crypto/marvell/cipher.c 	ret = mv_cesa_skcipher_req_init(req, tmpl);
req               454 drivers/crypto/marvell/cipher.c 	engine = mv_cesa_select_engine(req->cryptlen);
req               455 drivers/crypto/marvell/cipher.c 	mv_cesa_skcipher_prepare(&req->base, engine);
req               457 drivers/crypto/marvell/cipher.c 	ret = mv_cesa_queue_req(&req->base, &creq->base);
req               459 drivers/crypto/marvell/cipher.c 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
req               460 drivers/crypto/marvell/cipher.c 		mv_cesa_skcipher_cleanup(req);
req               465 drivers/crypto/marvell/cipher.c static int mv_cesa_des_op(struct skcipher_request *req,
req               468 drivers/crypto/marvell/cipher.c 	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               475 drivers/crypto/marvell/cipher.c 	return mv_cesa_skcipher_queue_req(req, tmpl);
req               478 drivers/crypto/marvell/cipher.c static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
req               486 drivers/crypto/marvell/cipher.c 	return mv_cesa_des_op(req, &tmpl);
req               489 drivers/crypto/marvell/cipher.c static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
req               497 drivers/crypto/marvell/cipher.c 	return mv_cesa_des_op(req, &tmpl);
req               520 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_des_op(struct skcipher_request *req,
req               526 drivers/crypto/marvell/cipher.c 	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
req               528 drivers/crypto/marvell/cipher.c 	return mv_cesa_des_op(req, tmpl);
req               531 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
req               537 drivers/crypto/marvell/cipher.c 	return mv_cesa_cbc_des_op(req, &tmpl);
req               540 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
req               546 drivers/crypto/marvell/cipher.c 	return mv_cesa_cbc_des_op(req, &tmpl);
req               570 drivers/crypto/marvell/cipher.c static int mv_cesa_des3_op(struct skcipher_request *req,
req               573 drivers/crypto/marvell/cipher.c 	struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               580 drivers/crypto/marvell/cipher.c 	return mv_cesa_skcipher_queue_req(req, tmpl);
req               583 drivers/crypto/marvell/cipher.c static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
req               592 drivers/crypto/marvell/cipher.c 	return mv_cesa_des3_op(req, &tmpl);
req               595 drivers/crypto/marvell/cipher.c static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
req               604 drivers/crypto/marvell/cipher.c 	return mv_cesa_des3_op(req, &tmpl);
req               628 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
req               631 drivers/crypto/marvell/cipher.c 	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
req               633 drivers/crypto/marvell/cipher.c 	return mv_cesa_des3_op(req, tmpl);
req               636 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
req               645 drivers/crypto/marvell/cipher.c 	return mv_cesa_cbc_des3_op(req, &tmpl);
req               648 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
req               657 drivers/crypto/marvell/cipher.c 	return mv_cesa_cbc_des3_op(req, &tmpl);
req               681 drivers/crypto/marvell/cipher.c static int mv_cesa_aes_op(struct skcipher_request *req,
req               684 drivers/crypto/marvell/cipher.c 	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               708 drivers/crypto/marvell/cipher.c 	return mv_cesa_skcipher_queue_req(req, tmpl);
req               711 drivers/crypto/marvell/cipher.c static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
req               719 drivers/crypto/marvell/cipher.c 	return mv_cesa_aes_op(req, &tmpl);
req               722 drivers/crypto/marvell/cipher.c static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
req               730 drivers/crypto/marvell/cipher.c 	return mv_cesa_aes_op(req, &tmpl);
req               753 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
req               758 drivers/crypto/marvell/cipher.c 	memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
req               760 drivers/crypto/marvell/cipher.c 	return mv_cesa_aes_op(req, tmpl);
req               763 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
req               769 drivers/crypto/marvell/cipher.c 	return mv_cesa_cbc_aes_op(req, &tmpl);
req               772 drivers/crypto/marvell/cipher.c static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
req               778 drivers/crypto/marvell/cipher.c 	return mv_cesa_cbc_aes_op(req, &tmpl);
req                25 drivers/crypto/marvell/hash.c 			    struct ahash_request *req)
req                27 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req                28 drivers/crypto/marvell/hash.c 	unsigned int len = req->nbytes + creq->cache_ptr;
req                34 drivers/crypto/marvell/hash.c 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
req                47 drivers/crypto/marvell/hash.c mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
req                49 drivers/crypto/marvell/hash.c 	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
req                50 drivers/crypto/marvell/hash.c 				    &req->cache_dma);
req                51 drivers/crypto/marvell/hash.c 	if (!req->cache)
req                58 drivers/crypto/marvell/hash.c mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
req                60 drivers/crypto/marvell/hash.c 	if (!req->cache)
req                63 drivers/crypto/marvell/hash.c 	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
req                64 drivers/crypto/marvell/hash.c 		      req->cache_dma);
req                67 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
req                70 drivers/crypto/marvell/hash.c 	if (req->padding)
req                73 drivers/crypto/marvell/hash.c 	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
req                74 drivers/crypto/marvell/hash.c 				      &req->padding_dma);
req                75 drivers/crypto/marvell/hash.c 	if (!req->padding)
req                81 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
req                83 drivers/crypto/marvell/hash.c 	if (!req->padding)
req                86 drivers/crypto/marvell/hash.c 	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
req                87 drivers/crypto/marvell/hash.c 		      req->padding_dma);
req                88 drivers/crypto/marvell/hash.c 	req->padding = NULL;
req                91 drivers/crypto/marvell/hash.c static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
req                93 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req                95 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
req                98 drivers/crypto/marvell/hash.c static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
req               100 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               102 drivers/crypto/marvell/hash.c 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
req               103 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
req               107 drivers/crypto/marvell/hash.c static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
req               109 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               112 drivers/crypto/marvell/hash.c 		mv_cesa_ahash_dma_cleanup(req);
req               115 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
req               117 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               120 drivers/crypto/marvell/hash.c 		mv_cesa_ahash_dma_last_cleanup(req);
req               153 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_std_step(struct ahash_request *req)
req               155 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               156 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
req               169 drivers/crypto/marvell/hash.c 		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
req               178 drivers/crypto/marvell/hash.c 	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
req               187 drivers/crypto/marvell/hash.c 		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
req               198 drivers/crypto/marvell/hash.c 	if (creq->last_req && sreq->offset == req->nbytes &&
req               253 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
req               255 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               256 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
req               258 drivers/crypto/marvell/hash.c 	if (sreq->offset < (req->nbytes - creq->cache_ptr))
req               264 drivers/crypto/marvell/hash.c static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
req               266 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               272 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
req               274 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               275 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
req               280 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_dma_step(struct ahash_request *req)
req               282 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               299 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_step(struct crypto_async_request *req)
req               301 drivers/crypto/marvell/hash.c 	struct ahash_request *ahashreq = ahash_request_cast(req);
req               310 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
req               312 drivers/crypto/marvell/hash.c 	struct ahash_request *ahashreq = ahash_request_cast(req);
req               321 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_complete(struct crypto_async_request *req)
req               323 drivers/crypto/marvell/hash.c 	struct ahash_request *ahashreq = ahash_request_cast(req);
req               370 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
req               373 drivers/crypto/marvell/hash.c 	struct ahash_request *ahashreq = ahash_request_cast(req);
req               384 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
req               386 drivers/crypto/marvell/hash.c 	struct ahash_request *ahashreq = ahash_request_cast(req);
req               408 drivers/crypto/marvell/hash.c static void mv_cesa_ahash_init(struct ahash_request *req,
req               411 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               437 drivers/crypto/marvell/hash.c static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
req               439 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               442 drivers/crypto/marvell/hash.c 	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
req               445 drivers/crypto/marvell/hash.c 		if (!req->nbytes)
req               448 drivers/crypto/marvell/hash.c 		sg_pcopy_to_buffer(req->src, creq->src_nents,
req               450 drivers/crypto/marvell/hash.c 				   req->nbytes, 0);
req               452 drivers/crypto/marvell/hash.c 		creq->cache_ptr += req->nbytes;
req               491 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
req               517 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
req               594 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
req               596 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               597 drivers/crypto/marvell/hash.c 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req               614 drivers/crypto/marvell/hash.c 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
req               623 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_req_iter_init(&iter, req);
req               696 drivers/crypto/marvell/hash.c 		creq->cache_ptr = req->nbytes + creq->cache_ptr -
req               719 drivers/crypto/marvell/hash.c 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
req               722 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_last_cleanup(req);
req               727 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
req               729 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               731 drivers/crypto/marvell/hash.c 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
req               737 drivers/crypto/marvell/hash.c 	*cached = mv_cesa_ahash_cache_req(req);
req               743 drivers/crypto/marvell/hash.c 		return mv_cesa_ahash_dma_req_init(req);
req               748 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_queue_req(struct ahash_request *req)
req               750 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               755 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahash_req_init(req, &cached);
req               762 drivers/crypto/marvell/hash.c 	engine = mv_cesa_select_engine(req->nbytes);
req               763 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_prepare(&req->base, engine);
req               765 drivers/crypto/marvell/hash.c 	ret = mv_cesa_queue_req(&req->base, &creq->base);
req               767 drivers/crypto/marvell/hash.c 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
req               768 drivers/crypto/marvell/hash.c 		mv_cesa_ahash_cleanup(req);
req               773 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_update(struct ahash_request *req)
req               775 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               777 drivers/crypto/marvell/hash.c 	creq->len += req->nbytes;
req               779 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_queue_req(req);
req               782 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_final(struct ahash_request *req)
req               784 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               789 drivers/crypto/marvell/hash.c 	req->nbytes = 0;
req               791 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_queue_req(req);
req               794 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_finup(struct ahash_request *req)
req               796 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               799 drivers/crypto/marvell/hash.c 	creq->len += req->nbytes;
req               803 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_queue_req(req);
req               806 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
req               809 drivers/crypto/marvell/hash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               810 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               824 drivers/crypto/marvell/hash.c static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
req               827 drivers/crypto/marvell/hash.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               828 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               834 drivers/crypto/marvell/hash.c 	ret = crypto_ahash_init(req);
req               858 drivers/crypto/marvell/hash.c static int mv_cesa_md5_init(struct ahash_request *req)
req               860 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               865 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_init(req, &tmpl, true);
req               875 drivers/crypto/marvell/hash.c static int mv_cesa_md5_export(struct ahash_request *req, void *out)
req               879 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_export(req, out_state->hash,
req               883 drivers/crypto/marvell/hash.c static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
req               887 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
req               891 drivers/crypto/marvell/hash.c static int mv_cesa_md5_digest(struct ahash_request *req)
req               895 drivers/crypto/marvell/hash.c 	ret = mv_cesa_md5_init(req);
req               899 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_finup(req);
req               927 drivers/crypto/marvell/hash.c static int mv_cesa_sha1_init(struct ahash_request *req)
req               929 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req               934 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_init(req, &tmpl, false);
req               945 drivers/crypto/marvell/hash.c static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
req               949 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
req               953 drivers/crypto/marvell/hash.c static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
req               957 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
req               961 drivers/crypto/marvell/hash.c static int mv_cesa_sha1_digest(struct ahash_request *req)
req               965 drivers/crypto/marvell/hash.c 	ret = mv_cesa_sha1_init(req);
req               969 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_finup(req);
req               997 drivers/crypto/marvell/hash.c static int mv_cesa_sha256_init(struct ahash_request *req)
req               999 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
req              1004 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_init(req, &tmpl, false);
req              1018 drivers/crypto/marvell/hash.c static int mv_cesa_sha256_digest(struct ahash_request *req)
req              1022 drivers/crypto/marvell/hash.c 	ret = mv_cesa_sha256_init(req);
req              1026 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_finup(req);
req              1029 drivers/crypto/marvell/hash.c static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
req              1033 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
req              1037 drivers/crypto/marvell/hash.c static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
req              1041 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
req              1075 drivers/crypto/marvell/hash.c static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
req              1078 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_result *result = req->data;
req              1087 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
req              1094 drivers/crypto/marvell/hash.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              1097 drivers/crypto/marvell/hash.c 	ahash_request_set_crypt(req, &sg, pad, blocksize);
req              1100 drivers/crypto/marvell/hash.c 	ret = crypto_ahash_init(req);
req              1104 drivers/crypto/marvell/hash.c 	ret = crypto_ahash_update(req);
req              1112 drivers/crypto/marvell/hash.c 	ret = crypto_ahash_export(req, state);
req              1119 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
req              1137 drivers/crypto/marvell/hash.c 		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              1141 drivers/crypto/marvell/hash.c 		ahash_request_set_crypt(req, &sg, ipad, keylen);
req              1144 drivers/crypto/marvell/hash.c 		ret = crypto_ahash_digest(req);
req              1156 drivers/crypto/marvell/hash.c 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
req              1174 drivers/crypto/marvell/hash.c 	struct ahash_request *req;
req              1185 drivers/crypto/marvell/hash.c 	req = ahash_request_alloc(tfm, GFP_KERNEL);
req              1186 drivers/crypto/marvell/hash.c 	if (!req) {
req              1203 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
req              1207 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
req              1211 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
req              1216 drivers/crypto/marvell/hash.c 	ahash_request_free(req);
req              1234 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
req              1236 drivers/crypto/marvell/hash.c 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req              1242 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_init(req, &tmpl, true);
req              1267 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
req              1271 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahmac_md5_init(req);
req              1275 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_finup(req);
req              1304 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
req              1306 drivers/crypto/marvell/hash.c 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req              1312 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_init(req, &tmpl, false);
req              1337 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
req              1341 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahmac_sha1_init(req);
req              1345 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_finup(req);
req              1394 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
req              1396 drivers/crypto/marvell/hash.c 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req              1402 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_init(req, &tmpl, false);
req              1407 drivers/crypto/marvell/hash.c static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
req              1411 drivers/crypto/marvell/hash.c 	ret = mv_cesa_ahmac_sha256_init(req);
req              1415 drivers/crypto/marvell/hash.c 	return mv_cesa_ahash_finup(req);
req               123 drivers/crypto/marvell/tdma.c 	struct crypto_async_request *req = NULL;
req               145 drivers/crypto/marvell/tdma.c 			if (!req)
req               146 drivers/crypto/marvell/tdma.c 				req = engine->req;
req               148 drivers/crypto/marvell/tdma.c 				req = mv_cesa_dequeue_req_locked(engine,
req               160 drivers/crypto/marvell/tdma.c 			ctx = crypto_tfm_ctx(req->tfm);
req               163 drivers/crypto/marvell/tdma.c 			res = ctx->ops->process(req, current_status);
req               164 drivers/crypto/marvell/tdma.c 			ctx->ops->complete(req);
req               168 drivers/crypto/marvell/tdma.c 									req);
req               182 drivers/crypto/marvell/tdma.c 		engine->req = req;
req               417 drivers/crypto/mediatek/mtk-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
req               453 drivers/crypto/mediatek/mtk-aes.c 	mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
req               555 drivers/crypto/mediatek/mtk-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
req               556 drivers/crypto/mediatek/mtk-aes.c 	struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               561 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
req               574 drivers/crypto/mediatek/mtk-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
req               582 drivers/crypto/mediatek/mtk-aes.c 	if (cctx->offset >= req->nbytes)
req               586 drivers/crypto/mediatek/mtk-aes.c 	datalen = req->nbytes - cctx->offset;
req               600 drivers/crypto/mediatek/mtk-aes.c 	src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
req               601 drivers/crypto/mediatek/mtk-aes.c 	dst = ((req->src == req->dst) ? src :
req               602 drivers/crypto/mediatek/mtk-aes.c 	       scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
req               623 drivers/crypto/mediatek/mtk-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
req               624 drivers/crypto/mediatek/mtk-aes.c 	struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               628 drivers/crypto/mediatek/mtk-aes.c 	memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
req               664 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
req               666 drivers/crypto/mediatek/mtk-aes.c 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
req               675 drivers/crypto/mediatek/mtk-aes.c 	rctx = ablkcipher_request_ctx(req);
req               679 drivers/crypto/mediatek/mtk-aes.c 				    &req->base);
req               682 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
req               684 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
req               687 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
req               689 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_ECB);
req               692 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
req               694 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
req               697 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
req               699 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_CBC);
req               702 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
req               704 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
req               707 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
req               709 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_CTR);
req               712 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
req               714 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
req               717 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
req               719 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_OFB);
req               722 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
req               724 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
req               727 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
req               729 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_crypt(req, AES_FLAGS_CFB128);
req               881 drivers/crypto/mediatek/mtk-aes.c 	struct aead_request *req = aead_request_cast(aes->areq);
req               885 drivers/crypto/mediatek/mtk-aes.c 	u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
req               890 drivers/crypto/mediatek/mtk-aes.c 	info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
req               891 drivers/crypto/mediatek/mtk-aes.c 	info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
req               912 drivers/crypto/mediatek/mtk-aes.c 			       AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
req               961 drivers/crypto/mediatek/mtk-aes.c 	struct aead_request *req = aead_request_cast(aes->areq);
req               962 drivers/crypto/mediatek/mtk-aes.c 	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
req               963 drivers/crypto/mediatek/mtk-aes.c 	u32 len = req->assoclen + req->cryptlen;
req               974 drivers/crypto/mediatek/mtk-aes.c 		scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
req               980 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
req               983 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
req               985 drivers/crypto/mediatek/mtk-aes.c 	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               987 drivers/crypto/mediatek/mtk-aes.c 	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
req               996 drivers/crypto/mediatek/mtk-aes.c 	gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
req               999 drivers/crypto/mediatek/mtk-aes.c 	if (!gctx->textlen && !req->assoclen)
req              1004 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_handle_queue(cryp, enc, &req->base);
req              1025 drivers/crypto/mediatek/mtk-aes.c 		struct skcipher_request req;
req              1064 drivers/crypto/mediatek/mtk-aes.c 	skcipher_request_set_tfm(&data->req, ctr);
req              1065 drivers/crypto/mediatek/mtk-aes.c 	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
req              1068 drivers/crypto/mediatek/mtk-aes.c 	skcipher_request_set_crypt(&data->req, data->sg, data->sg,
req              1071 drivers/crypto/mediatek/mtk-aes.c 	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
req              1104 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_gcm_encrypt(struct aead_request *req)
req              1106 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
req              1109 drivers/crypto/mediatek/mtk-aes.c static int mtk_aes_gcm_decrypt(struct aead_request *req)
req              1111 drivers/crypto/mediatek/mtk-aes.c 	return mtk_aes_gcm_crypt(req, 0);
req               185 drivers/crypto/mediatek/mtk-platform.h 	struct ahash_request *req;
req               125 drivers/crypto/mediatek/mtk-sha.c 				struct ahash_request *req);
req               324 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
req               356 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_finish_hmac(struct ahash_request *req)
req               358 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req               360 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               368 drivers/crypto/mediatek/mtk-sha.c 	       crypto_shash_finup(shash, req->result, ctx->ds, req->result);
req               372 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_init(struct ahash_request *req)
req               374 drivers/crypto/mediatek/mtk-sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               376 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               425 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
req               488 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
req               515 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
req               605 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
req               618 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_finish(struct ahash_request *req)
req               620 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               622 drivers/crypto/mediatek/mtk-sha.c 	u32 *result = (u32 *)req->result;
req               630 drivers/crypto/mediatek/mtk-sha.c 		return mtk_sha_finish_hmac(req);
req               640 drivers/crypto/mediatek/mtk-sha.c 		err = mtk_sha_finish(sha->req);
req               644 drivers/crypto/mediatek/mtk-sha.c 	sha->req->base.complete(&sha->req->base, err);
req               651 drivers/crypto/mediatek/mtk-sha.c 				struct ahash_request *req)
req               660 drivers/crypto/mediatek/mtk-sha.c 	if (req)
req               661 drivers/crypto/mediatek/mtk-sha.c 		ret = ahash_enqueue_request(&sha->queue, req);
req               680 drivers/crypto/mediatek/mtk-sha.c 	req = ahash_request_cast(async_req);
req               681 drivers/crypto/mediatek/mtk-sha.c 	ctx = ahash_request_ctx(req);
req               683 drivers/crypto/mediatek/mtk-sha.c 	sha->req = req;
req               703 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
req               705 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               706 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req               710 drivers/crypto/mediatek/mtk-sha.c 	return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
req               715 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
req               746 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_update(struct ahash_request *req)
req               748 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               750 drivers/crypto/mediatek/mtk-sha.c 	ctx->total = req->nbytes;
req               751 drivers/crypto/mediatek/mtk-sha.c 	ctx->sg = req->src;
req               758 drivers/crypto/mediatek/mtk-sha.c 	return mtk_sha_enqueue(req, SHA_OP_UPDATE);
req               761 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_final(struct ahash_request *req)
req               763 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               768 drivers/crypto/mediatek/mtk-sha.c 		return mtk_sha_finish(req);
req               770 drivers/crypto/mediatek/mtk-sha.c 	return mtk_sha_enqueue(req, SHA_OP_FINAL);
req               773 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_finup(struct ahash_request *req)
req               775 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               780 drivers/crypto/mediatek/mtk-sha.c 	err1 = mtk_sha_update(req);
req               782 drivers/crypto/mediatek/mtk-sha.c 	    (err1 == -EBUSY && (ahash_request_flags(req) &
req               789 drivers/crypto/mediatek/mtk-sha.c 	err2 = mtk_sha_final(req);
req               794 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_digest(struct ahash_request *req)
req               796 drivers/crypto/mediatek/mtk-sha.c 	return mtk_sha_init(req) ?: mtk_sha_finup(req);
req               832 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_export(struct ahash_request *req, void *out)
req               834 drivers/crypto/mediatek/mtk-sha.c 	const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               840 drivers/crypto/mediatek/mtk-sha.c static int mtk_sha_import(struct ahash_request *req, const void *in)
req               842 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
req               215 drivers/crypto/mxs-dcp.c 			   struct ablkcipher_request *req, int init)
req               219 drivers/crypto/mxs-dcp.c 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req               278 drivers/crypto/mxs-dcp.c 	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
req               280 drivers/crypto/mxs-dcp.c 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req               282 drivers/crypto/mxs-dcp.c 	struct scatterlist *dst = req->dst;
req               283 drivers/crypto/mxs-dcp.c 	struct scatterlist *src = req->src;
req               284 drivers/crypto/mxs-dcp.c 	const int nents = sg_nents(req->src);
req               309 drivers/crypto/mxs-dcp.c 		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
req               316 drivers/crypto/mxs-dcp.c 	for_each_sg(req->src, src, nents, i) {
req               320 drivers/crypto/mxs-dcp.c 		limit_hit = tlen > req->nbytes;
req               323 drivers/crypto/mxs-dcp.c 			len = req->nbytes - (tlen - len);
req               342 drivers/crypto/mxs-dcp.c 				ret = mxs_dcp_run_aes(actx, req, init);
req               379 drivers/crypto/mxs-dcp.c 			memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
req               382 drivers/crypto/mxs-dcp.c 			memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
req               426 drivers/crypto/mxs-dcp.c static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
req               428 drivers/crypto/mxs-dcp.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               434 drivers/crypto/mxs-dcp.c 	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
req               435 drivers/crypto/mxs-dcp.c 	skcipher_request_set_crypt(subreq, req->src, req->dst,
req               436 drivers/crypto/mxs-dcp.c 				   req->nbytes, req->info);
req               448 drivers/crypto/mxs-dcp.c static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
req               451 drivers/crypto/mxs-dcp.c 	struct crypto_async_request *arq = &req->base;
req               453 drivers/crypto/mxs-dcp.c 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
req               457 drivers/crypto/mxs-dcp.c 		return mxs_dcp_block_fallback(req, enc);
req               464 drivers/crypto/mxs-dcp.c 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
req               472 drivers/crypto/mxs-dcp.c static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
req               474 drivers/crypto/mxs-dcp.c 	return mxs_dcp_aes_enqueue(req, 0, 1);
req               477 drivers/crypto/mxs-dcp.c static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
req               479 drivers/crypto/mxs-dcp.c 	return mxs_dcp_aes_enqueue(req, 1, 1);
req               482 drivers/crypto/mxs-dcp.c static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
req               484 drivers/crypto/mxs-dcp.c 	return mxs_dcp_aes_enqueue(req, 0, 0);
req               487 drivers/crypto/mxs-dcp.c static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
req               489 drivers/crypto/mxs-dcp.c 	return mxs_dcp_aes_enqueue(req, 1, 0);
req               554 drivers/crypto/mxs-dcp.c static int mxs_dcp_run_sha(struct ahash_request *req)
req               559 drivers/crypto/mxs-dcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               561 drivers/crypto/mxs-dcp.c 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               620 drivers/crypto/mxs-dcp.c 	struct ahash_request *req = ahash_request_cast(arq);
req               621 drivers/crypto/mxs-dcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               623 drivers/crypto/mxs-dcp.c 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               638 drivers/crypto/mxs-dcp.c 	src = req->src;
req               639 drivers/crypto/mxs-dcp.c 	len = req->nbytes;
req               659 drivers/crypto/mxs-dcp.c 			ret = mxs_dcp_run_sha(req);
req               671 drivers/crypto/mxs-dcp.c 		if (!req->result)
req               674 drivers/crypto/mxs-dcp.c 		ret = mxs_dcp_run_sha(req);
req               682 drivers/crypto/mxs-dcp.c 			req->result[i] = out_buf[halg->digestsize - i - 1];
req               724 drivers/crypto/mxs-dcp.c static int dcp_sha_init(struct ahash_request *req)
req               726 drivers/crypto/mxs-dcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               751 drivers/crypto/mxs-dcp.c static int dcp_sha_update_fx(struct ahash_request *req, int fini)
req               755 drivers/crypto/mxs-dcp.c 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               756 drivers/crypto/mxs-dcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               765 drivers/crypto/mxs-dcp.c 	if (!req->nbytes && !fini)
req               778 drivers/crypto/mxs-dcp.c 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
req               787 drivers/crypto/mxs-dcp.c static int dcp_sha_update(struct ahash_request *req)
req               789 drivers/crypto/mxs-dcp.c 	return dcp_sha_update_fx(req, 0);
req               792 drivers/crypto/mxs-dcp.c static int dcp_sha_final(struct ahash_request *req)
req               794 drivers/crypto/mxs-dcp.c 	ahash_request_set_crypt(req, NULL, req->result, 0);
req               795 drivers/crypto/mxs-dcp.c 	req->nbytes = 0;
req               796 drivers/crypto/mxs-dcp.c 	return dcp_sha_update_fx(req, 1);
req               799 drivers/crypto/mxs-dcp.c static int dcp_sha_finup(struct ahash_request *req)
req               801 drivers/crypto/mxs-dcp.c 	return dcp_sha_update_fx(req, 1);
req               804 drivers/crypto/mxs-dcp.c static int dcp_sha_digest(struct ahash_request *req)
req               808 drivers/crypto/mxs-dcp.c 	ret = dcp_sha_init(req);
req               812 drivers/crypto/mxs-dcp.c 	return dcp_sha_finup(req);
req               815 drivers/crypto/mxs-dcp.c static int dcp_sha_import(struct ahash_request *req, const void *in)
req               817 drivers/crypto/mxs-dcp.c 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
req               818 drivers/crypto/mxs-dcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               830 drivers/crypto/mxs-dcp.c static int dcp_sha_export(struct ahash_request *req, void *out)
req               832 drivers/crypto/mxs-dcp.c 	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
req               833 drivers/crypto/mxs-dcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               309 drivers/crypto/n2_core.c static int n2_hash_async_init(struct ahash_request *req)
req               311 drivers/crypto/n2_core.c 	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               312 drivers/crypto/n2_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               316 drivers/crypto/n2_core.c 	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
req               321 drivers/crypto/n2_core.c static int n2_hash_async_update(struct ahash_request *req)
req               323 drivers/crypto/n2_core.c 	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               324 drivers/crypto/n2_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               328 drivers/crypto/n2_core.c 	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
req               329 drivers/crypto/n2_core.c 	rctx->fallback_req.nbytes = req->nbytes;
req               330 drivers/crypto/n2_core.c 	rctx->fallback_req.src = req->src;
req               335 drivers/crypto/n2_core.c static int n2_hash_async_final(struct ahash_request *req)
req               337 drivers/crypto/n2_core.c 	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               338 drivers/crypto/n2_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               342 drivers/crypto/n2_core.c 	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
req               343 drivers/crypto/n2_core.c 	rctx->fallback_req.result = req->result;
req               348 drivers/crypto/n2_core.c static int n2_hash_async_finup(struct ahash_request *req)
req               350 drivers/crypto/n2_core.c 	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               351 drivers/crypto/n2_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               355 drivers/crypto/n2_core.c 	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
req               356 drivers/crypto/n2_core.c 	rctx->fallback_req.nbytes = req->nbytes;
req               357 drivers/crypto/n2_core.c 	rctx->fallback_req.src = req->src;
req               358 drivers/crypto/n2_core.c 	rctx->fallback_req.result = req->result;
req               363 drivers/crypto/n2_core.c static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
req               368 drivers/crypto/n2_core.c static int n2_hash_async_noexport(struct ahash_request *req, void *out)
req               520 drivers/crypto/n2_core.c static int n2_do_async_digest(struct ahash_request *req,
req               525 drivers/crypto/n2_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               536 drivers/crypto/n2_core.c 	if (unlikely(req->nbytes > (1 << 16))) {
req               537 drivers/crypto/n2_core.c 		struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               542 drivers/crypto/n2_core.c 			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
req               543 drivers/crypto/n2_core.c 		rctx->fallback_req.nbytes = req->nbytes;
req               544 drivers/crypto/n2_core.c 		rctx->fallback_req.src = req->src;
req               545 drivers/crypto/n2_core.c 		rctx->fallback_req.result = req->result;
req               550 drivers/crypto/n2_core.c 	nbytes = crypto_hash_walk_first(req, &walk);
req               602 drivers/crypto/n2_core.c 		memcpy(req->result, hash_loc, result_size);
req               609 drivers/crypto/n2_core.c static int n2_hash_async_digest(struct ahash_request *req)
req               611 drivers/crypto/n2_core.c 	struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
req               612 drivers/crypto/n2_core.c 	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               616 drivers/crypto/n2_core.c 	if (unlikely(req->nbytes == 0)) {
req               617 drivers/crypto/n2_core.c 		memcpy(req->result, n2alg->hash_zero, ds);
req               622 drivers/crypto/n2_core.c 	return n2_do_async_digest(req, n2alg->auth_type,
req               627 drivers/crypto/n2_core.c static int n2_hmac_async_digest(struct ahash_request *req)
req               629 drivers/crypto/n2_core.c 	struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
req               630 drivers/crypto/n2_core.c 	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               631 drivers/crypto/n2_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               636 drivers/crypto/n2_core.c 	if (unlikely(req->nbytes == 0) ||
req               638 drivers/crypto/n2_core.c 		struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
req               643 drivers/crypto/n2_core.c 			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
req               644 drivers/crypto/n2_core.c 		rctx->fallback_req.nbytes = req->nbytes;
req               645 drivers/crypto/n2_core.c 		rctx->fallback_req.src = req->src;
req               646 drivers/crypto/n2_core.c 		rctx->fallback_req.result = req->result;
req               653 drivers/crypto/n2_core.c 	return n2_do_async_digest(req, n2alg->derived.hmac_type,
req               880 drivers/crypto/n2_core.c static int n2_compute_chunks(struct ablkcipher_request *req)
req               882 drivers/crypto/n2_core.c 	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
req               890 drivers/crypto/n2_core.c 	ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
req               891 drivers/crypto/n2_core.c 	err = ablkcipher_walk_phys(req, walk);
req               949 drivers/crypto/n2_core.c 		err = ablkcipher_walk_done(req, walk, nbytes - this_len);
req               961 drivers/crypto/n2_core.c static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
req               963 drivers/crypto/n2_core.c 	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
req               978 drivers/crypto/n2_core.c static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
req               980 drivers/crypto/n2_core.c 	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
req               981 drivers/crypto/n2_core.c 	struct crypto_tfm *tfm = req->base.tfm;
req               982 drivers/crypto/n2_core.c 	int err = n2_compute_chunks(req);
req              1016 drivers/crypto/n2_core.c 	n2_chunk_complete(req, NULL);
req              1020 drivers/crypto/n2_core.c static int n2_encrypt_ecb(struct ablkcipher_request *req)
req              1022 drivers/crypto/n2_core.c 	return n2_do_ecb(req, true);
req              1025 drivers/crypto/n2_core.c static int n2_decrypt_ecb(struct ablkcipher_request *req)
req              1027 drivers/crypto/n2_core.c 	return n2_do_ecb(req, false);
req              1030 drivers/crypto/n2_core.c static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
req              1032 drivers/crypto/n2_core.c 	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
req              1033 drivers/crypto/n2_core.c 	struct crypto_tfm *tfm = req->base.tfm;
req              1035 drivers/crypto/n2_core.c 	int err = n2_compute_chunks(req);
req              1106 drivers/crypto/n2_core.c 	n2_chunk_complete(req, err ? NULL : final_iv_addr);
req              1110 drivers/crypto/n2_core.c static int n2_encrypt_chaining(struct ablkcipher_request *req)
req              1112 drivers/crypto/n2_core.c 	return n2_do_chaining(req, true);
req              1115 drivers/crypto/n2_core.c static int n2_decrypt_chaining(struct ablkcipher_request *req)
req              1117 drivers/crypto/n2_core.c 	return n2_do_chaining(req, false);
req               157 drivers/crypto/nx/nx-aes-ccm.c 			struct aead_request  *req,
req               223 drivers/crypto/nx/nx-aes-ccm.c 			scatterwalk_map_and_copy(b1 + 2, req->src, 0,
req               228 drivers/crypto/nx/nx-aes-ccm.c 			scatterwalk_map_and_copy(b1 + 6, req->src, 0,
req               263 drivers/crypto/nx/nx-aes-ccm.c 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               287 drivers/crypto/nx/nx-aes-ccm.c 						    req->src, processed,
req               305 drivers/crypto/nx/nx-aes-ccm.c 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               329 drivers/crypto/nx/nx-aes-ccm.c static int ccm_nx_decrypt(struct aead_request   *req,
req               333 drivers/crypto/nx/nx-aes-ccm.c 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
req               335 drivers/crypto/nx/nx-aes-ccm.c 	unsigned int nbytes = req->cryptlen;
req               336 drivers/crypto/nx/nx-aes-ccm.c 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
req               348 drivers/crypto/nx/nx-aes-ccm.c 				 req->src, nbytes + req->assoclen, authsize,
req               351 drivers/crypto/nx/nx-aes-ccm.c 	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
req               370 drivers/crypto/nx/nx-aes-ccm.c 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
req               371 drivers/crypto/nx/nx-aes-ccm.c 				       &to_process, processed + req->assoclen,
req               377 drivers/crypto/nx/nx-aes-ccm.c 			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               407 drivers/crypto/nx/nx-aes-ccm.c static int ccm_nx_encrypt(struct aead_request   *req,
req               411 drivers/crypto/nx/nx-aes-ccm.c 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
req               413 drivers/crypto/nx/nx-aes-ccm.c 	unsigned int nbytes = req->cryptlen;
req               414 drivers/crypto/nx/nx-aes-ccm.c 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
req               421 drivers/crypto/nx/nx-aes-ccm.c 	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
req               439 drivers/crypto/nx/nx-aes-ccm.c 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
req               440 drivers/crypto/nx/nx-aes-ccm.c 				       &to_process, processed + req->assoclen,
req               446 drivers/crypto/nx/nx-aes-ccm.c 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               472 drivers/crypto/nx/nx-aes-ccm.c 				 req->dst, nbytes + req->assoclen, authsize,
req               480 drivers/crypto/nx/nx-aes-ccm.c static int ccm4309_aes_nx_encrypt(struct aead_request *req)
req               482 drivers/crypto/nx/nx-aes-ccm.c 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
req               483 drivers/crypto/nx/nx-aes-ccm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               489 drivers/crypto/nx/nx-aes-ccm.c 	memcpy(iv + 4, req->iv, 8);
req               493 drivers/crypto/nx/nx-aes-ccm.c 	return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
req               496 drivers/crypto/nx/nx-aes-ccm.c static int ccm_aes_nx_encrypt(struct aead_request *req)
req               501 drivers/crypto/nx/nx-aes-ccm.c 	desc.info = req->iv;
req               507 drivers/crypto/nx/nx-aes-ccm.c 	return ccm_nx_encrypt(req, &desc, req->assoclen);
req               510 drivers/crypto/nx/nx-aes-ccm.c static int ccm4309_aes_nx_decrypt(struct aead_request *req)
req               512 drivers/crypto/nx/nx-aes-ccm.c 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
req               513 drivers/crypto/nx/nx-aes-ccm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               519 drivers/crypto/nx/nx-aes-ccm.c 	memcpy(iv + 4, req->iv, 8);
req               523 drivers/crypto/nx/nx-aes-ccm.c 	return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
req               526 drivers/crypto/nx/nx-aes-ccm.c static int ccm_aes_nx_decrypt(struct aead_request *req)
req               531 drivers/crypto/nx/nx-aes-ccm.c 	desc.info = req->iv;
req               537 drivers/crypto/nx/nx-aes-ccm.c 	return ccm_nx_decrypt(req, &desc, req->assoclen);
req               100 drivers/crypto/nx/nx-aes-gcm.c 		  struct aead_request   *req,
req               113 drivers/crypto/nx/nx-aes-gcm.c 		scatterwalk_start(&walk, req->src);
req               138 drivers/crypto/nx/nx-aes-gcm.c 					  req->src, processed, &to_process);
req               149 drivers/crypto/nx/nx-aes-gcm.c 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               169 drivers/crypto/nx/nx-aes-gcm.c static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
req               174 drivers/crypto/nx/nx-aes-gcm.c 		crypto_aead_ctx(crypto_aead_reqtfm(req));
req               206 drivers/crypto/nx/nx-aes-gcm.c 					  req->src, processed, &to_process);
req               220 drivers/crypto/nx/nx-aes-gcm.c 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               243 drivers/crypto/nx/nx-aes-gcm.c static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
req               248 drivers/crypto/nx/nx-aes-gcm.c 		crypto_aead_ctx(crypto_aead_reqtfm(req));
req               295 drivers/crypto/nx/nx-aes-gcm.c 			crypto_aead_authsize(crypto_aead_reqtfm(req)));
req               309 drivers/crypto/nx/nx-aes-gcm.c static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
req               313 drivers/crypto/nx/nx-aes-gcm.c 		crypto_aead_ctx(crypto_aead_reqtfm(req));
req               314 drivers/crypto/nx/nx-aes-gcm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               317 drivers/crypto/nx/nx-aes-gcm.c 	unsigned int nbytes = req->cryptlen;
req               330 drivers/crypto/nx/nx-aes-gcm.c 			rc = gcm_empty(req, &desc, enc);
req               332 drivers/crypto/nx/nx-aes-gcm.c 			rc = gmac(req, &desc, assoclen);
req               342 drivers/crypto/nx/nx-aes-gcm.c 		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
req               354 drivers/crypto/nx/nx-aes-gcm.c 		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
req               361 drivers/crypto/nx/nx-aes-gcm.c 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
req               362 drivers/crypto/nx/nx-aes-gcm.c 				       req->src, &to_process,
req               363 drivers/crypto/nx/nx-aes-gcm.c 				       processed + req->assoclen,
req               376 drivers/crypto/nx/nx-aes-gcm.c 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
req               400 drivers/crypto/nx/nx-aes-gcm.c 			req->dst, req->assoclen + nbytes,
req               401 drivers/crypto/nx/nx-aes-gcm.c 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
req               408 drivers/crypto/nx/nx-aes-gcm.c 			itag, req->src, req->assoclen + nbytes,
req               409 drivers/crypto/nx/nx-aes-gcm.c 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
req               412 drivers/crypto/nx/nx-aes-gcm.c 			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
req               420 drivers/crypto/nx/nx-aes-gcm.c static int gcm_aes_nx_encrypt(struct aead_request *req)
req               422 drivers/crypto/nx/nx-aes-gcm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               425 drivers/crypto/nx/nx-aes-gcm.c 	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
req               427 drivers/crypto/nx/nx-aes-gcm.c 	return gcm_aes_nx_crypt(req, 1, req->assoclen);
req               430 drivers/crypto/nx/nx-aes-gcm.c static int gcm_aes_nx_decrypt(struct aead_request *req)
req               432 drivers/crypto/nx/nx-aes-gcm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               435 drivers/crypto/nx/nx-aes-gcm.c 	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
req               437 drivers/crypto/nx/nx-aes-gcm.c 	return gcm_aes_nx_crypt(req, 0, req->assoclen);
req               440 drivers/crypto/nx/nx-aes-gcm.c static int gcm4106_aes_nx_encrypt(struct aead_request *req)
req               443 drivers/crypto/nx/nx-aes-gcm.c 		crypto_aead_ctx(crypto_aead_reqtfm(req));
req               444 drivers/crypto/nx/nx-aes-gcm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               449 drivers/crypto/nx/nx-aes-gcm.c 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
req               451 drivers/crypto/nx/nx-aes-gcm.c 	if (req->assoclen < 8)
req               454 drivers/crypto/nx/nx-aes-gcm.c 	return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
req               457 drivers/crypto/nx/nx-aes-gcm.c static int gcm4106_aes_nx_decrypt(struct aead_request *req)
req               460 drivers/crypto/nx/nx-aes-gcm.c 		crypto_aead_ctx(crypto_aead_reqtfm(req));
req               461 drivers/crypto/nx/nx-aes-gcm.c 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
req               466 drivers/crypto/nx/nx-aes-gcm.c 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
req               468 drivers/crypto/nx/nx-aes-gcm.c 	if (req->assoclen < 8)
req               471 drivers/crypto/nx/nx-aes-gcm.c 	return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
req                26 drivers/crypto/omap-aes-gcm.c 				     struct aead_request *req);
req                30 drivers/crypto/omap-aes-gcm.c 	struct aead_request *req = dd->aead_req;
req                36 drivers/crypto/omap-aes-gcm.c 	req->base.complete(&req->base, ret);
req                88 drivers/crypto/omap-aes-gcm.c 				     struct aead_request *req)
req                91 drivers/crypto/omap-aes-gcm.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req                97 drivers/crypto/omap-aes-gcm.c 	assoclen = req->assoclen;
req                98 drivers/crypto/omap-aes-gcm.c 	cryptlen = req->cryptlen;
req               115 drivers/crypto/omap-aes-gcm.c 		tmp = req->src;
req               126 drivers/crypto/omap-aes-gcm.c 		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
req               142 drivers/crypto/omap-aes-gcm.c 	dd->out_sg = req->dst;
req               143 drivers/crypto/omap-aes-gcm.c 	dd->orig_out = req->dst;
req               145 drivers/crypto/omap-aes-gcm.c 	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
req               148 drivers/crypto/omap-aes-gcm.c 	if (req->src == req->dst || dd->out_sg == sg_arr)
req               164 drivers/crypto/omap-aes-gcm.c static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
req               166 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_gcm_result *res = req->data;
req               175 drivers/crypto/omap-aes-gcm.c static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
req               180 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               247 drivers/crypto/omap-aes-gcm.c 				     struct aead_request *req)
req               256 drivers/crypto/omap-aes-gcm.c 	if (req)
req               257 drivers/crypto/omap-aes-gcm.c 		ret = aead_enqueue_request(&dd->aead_queue, req);
req               264 drivers/crypto/omap-aes-gcm.c 	req = aead_dequeue_request(&dd->aead_queue);
req               265 drivers/crypto/omap-aes-gcm.c 	if (req)
req               269 drivers/crypto/omap-aes-gcm.c 	if (!req)
req               275 drivers/crypto/omap-aes-gcm.c 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               276 drivers/crypto/omap-aes-gcm.c 	rctx = aead_request_ctx(req);
req               280 drivers/crypto/omap-aes-gcm.c 	dd->aead_req = req;
req               285 drivers/crypto/omap-aes-gcm.c 	err = omap_aes_gcm_copy_buffers(dd, req);
req               301 drivers/crypto/omap-aes-gcm.c static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
req               303 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
req               304 drivers/crypto/omap-aes-gcm.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               313 drivers/crypto/omap-aes-gcm.c 	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
req               318 drivers/crypto/omap-aes-gcm.c 		assoclen = req->assoclen - 8;
req               320 drivers/crypto/omap-aes-gcm.c 		assoclen = req->assoclen;
req               321 drivers/crypto/omap-aes-gcm.c 	if (assoclen + req->cryptlen == 0) {
req               322 drivers/crypto/omap-aes-gcm.c 		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
req               332 drivers/crypto/omap-aes-gcm.c 	return omap_aes_gcm_handle_queue(dd, req);
req               335 drivers/crypto/omap-aes-gcm.c int omap_aes_gcm_encrypt(struct aead_request *req)
req               337 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
req               339 drivers/crypto/omap-aes-gcm.c 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
req               340 drivers/crypto/omap-aes-gcm.c 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
req               343 drivers/crypto/omap-aes-gcm.c int omap_aes_gcm_decrypt(struct aead_request *req)
req               345 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
req               347 drivers/crypto/omap-aes-gcm.c 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
req               348 drivers/crypto/omap-aes-gcm.c 	return omap_aes_gcm_crypt(req, FLAGS_GCM);
req               351 drivers/crypto/omap-aes-gcm.c int omap_aes_4106gcm_encrypt(struct aead_request *req)
req               353 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               354 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
req               357 drivers/crypto/omap-aes-gcm.c 	memcpy(rctx->iv + 4, req->iv, 8);
req               358 drivers/crypto/omap-aes-gcm.c 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
req               362 drivers/crypto/omap-aes-gcm.c int omap_aes_4106gcm_decrypt(struct aead_request *req)
req               364 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               365 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
req               368 drivers/crypto/omap-aes-gcm.c 	memcpy(rctx->iv + 4, req->iv, 8);
req               369 drivers/crypto/omap-aes-gcm.c 	return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
req               145 drivers/crypto/omap-aes.c 	if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
req               146 drivers/crypto/omap-aes.c 		omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
req               385 drivers/crypto/omap-aes.c 	struct ablkcipher_request *req = dd->req;
req               389 drivers/crypto/omap-aes.c 	crypto_finalize_ablkcipher_request(dd->engine, req, err);
req               406 drivers/crypto/omap-aes.c 				 struct ablkcipher_request *req)
req               408 drivers/crypto/omap-aes.c 	if (req)
req               409 drivers/crypto/omap-aes.c 		return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
req               417 drivers/crypto/omap-aes.c 	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
req               419 drivers/crypto/omap-aes.c 			crypto_ablkcipher_reqtfm(req));
req               420 drivers/crypto/omap-aes.c 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               429 drivers/crypto/omap-aes.c 	dd->req = req;
req               430 drivers/crypto/omap-aes.c 	dd->total = req->nbytes;
req               431 drivers/crypto/omap-aes.c 	dd->total_save = req->nbytes;
req               432 drivers/crypto/omap-aes.c 	dd->in_sg = req->src;
req               433 drivers/crypto/omap-aes.c 	dd->out_sg = req->dst;
req               434 drivers/crypto/omap-aes.c 	dd->orig_out = req->dst;
req               437 drivers/crypto/omap-aes.c 	if (req->src == req->dst)
req               472 drivers/crypto/omap-aes.c 	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
req               473 drivers/crypto/omap-aes.c 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               508 drivers/crypto/omap-aes.c static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
req               511 drivers/crypto/omap-aes.c 			crypto_ablkcipher_reqtfm(req));
req               512 drivers/crypto/omap-aes.c 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               516 drivers/crypto/omap-aes.c 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
req               520 drivers/crypto/omap-aes.c 	if (req->nbytes < aes_fallback_sz) {
req               524 drivers/crypto/omap-aes.c 		skcipher_request_set_callback(subreq, req->base.flags, NULL,
req               526 drivers/crypto/omap-aes.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               527 drivers/crypto/omap-aes.c 					   req->nbytes, req->info);
req               543 drivers/crypto/omap-aes.c 	return omap_aes_handle_queue(dd, req);
req               574 drivers/crypto/omap-aes.c static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
req               576 drivers/crypto/omap-aes.c 	return omap_aes_crypt(req, FLAGS_ENCRYPT);
req               579 drivers/crypto/omap-aes.c static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
req               581 drivers/crypto/omap-aes.c 	return omap_aes_crypt(req, 0);
req               584 drivers/crypto/omap-aes.c static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
req               586 drivers/crypto/omap-aes.c 	return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
req               589 drivers/crypto/omap-aes.c static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
req               591 drivers/crypto/omap-aes.c 	return omap_aes_crypt(req, FLAGS_CBC);
req               594 drivers/crypto/omap-aes.c static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
req               596 drivers/crypto/omap-aes.c 	return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
req               599 drivers/crypto/omap-aes.c static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
req               601 drivers/crypto/omap-aes.c 	return omap_aes_crypt(req, FLAGS_CTR);
req               605 drivers/crypto/omap-aes.c 				void *req);
req               607 drivers/crypto/omap-aes.c 			      void *req);
req               165 drivers/crypto/omap-aes.h 	struct ablkcipher_request	*req;
req               203 drivers/crypto/omap-aes.h int omap_aes_gcm_encrypt(struct aead_request *req);
req               204 drivers/crypto/omap-aes.h int omap_aes_gcm_decrypt(struct aead_request *req);
req               205 drivers/crypto/omap-aes.h int omap_aes_4106gcm_encrypt(struct aead_request *req);
req               206 drivers/crypto/omap-aes.h int omap_aes_4106gcm_decrypt(struct aead_request *req);
req               142 drivers/crypto/omap-des.c 	struct ablkcipher_request	*req;
req               264 drivers/crypto/omap-des.c 	if ((dd->flags & FLAGS_CBC) && dd->req->info)
req               265 drivers/crypto/omap-des.c 		omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
req               460 drivers/crypto/omap-des.c 					crypto_ablkcipher_reqtfm(dd->req));
req               494 drivers/crypto/omap-des.c 	struct ablkcipher_request *req = dd->req;
req               498 drivers/crypto/omap-des.c 	crypto_finalize_ablkcipher_request(dd->engine, req, err);
req               517 drivers/crypto/omap-des.c 				 struct ablkcipher_request *req)
req               519 drivers/crypto/omap-des.c 	if (req)
req               520 drivers/crypto/omap-des.c 		return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
req               528 drivers/crypto/omap-des.c 	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
req               530 drivers/crypto/omap-des.c 			crypto_ablkcipher_reqtfm(req));
req               540 drivers/crypto/omap-des.c 	dd->req = req;
req               541 drivers/crypto/omap-des.c 	dd->total = req->nbytes;
req               542 drivers/crypto/omap-des.c 	dd->total_save = req->nbytes;
req               543 drivers/crypto/omap-des.c 	dd->in_sg = req->src;
req               544 drivers/crypto/omap-des.c 	dd->out_sg = req->dst;
req               545 drivers/crypto/omap-des.c 	dd->orig_out = req->dst;
req               548 drivers/crypto/omap-des.c 	if (req->src == req->dst)
req               571 drivers/crypto/omap-des.c 	rctx = ablkcipher_request_ctx(req);
req               572 drivers/crypto/omap-des.c 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
req               585 drivers/crypto/omap-des.c 	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
req               587 drivers/crypto/omap-des.c 			crypto_ablkcipher_reqtfm(req));
req               622 drivers/crypto/omap-des.c static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
req               625 drivers/crypto/omap-des.c 			crypto_ablkcipher_reqtfm(req));
req               626 drivers/crypto/omap-des.c 	struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req);
req               629 drivers/crypto/omap-des.c 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
req               633 drivers/crypto/omap-des.c 	if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
req               644 drivers/crypto/omap-des.c 	return omap_des_handle_queue(dd, req);
req               685 drivers/crypto/omap-des.c static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
req               687 drivers/crypto/omap-des.c 	return omap_des_crypt(req, FLAGS_ENCRYPT);
req               690 drivers/crypto/omap-des.c static int omap_des_ecb_decrypt(struct ablkcipher_request *req)
req               692 drivers/crypto/omap-des.c 	return omap_des_crypt(req, 0);
req               695 drivers/crypto/omap-des.c static int omap_des_cbc_encrypt(struct ablkcipher_request *req)
req               697 drivers/crypto/omap-des.c 	return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
req               700 drivers/crypto/omap-des.c static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
req               702 drivers/crypto/omap-des.c 	return omap_des_crypt(req, FLAGS_CBC);
req               192 drivers/crypto/omap-sham.c 	void		(*copy_hash)(struct ahash_request *req, int out);
req               231 drivers/crypto/omap-sham.c 	struct ahash_request	*req;
req               281 drivers/crypto/omap-sham.c static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
req               283 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req               296 drivers/crypto/omap-sham.c static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
req               298 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req               303 drivers/crypto/omap-sham.c 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
req               318 drivers/crypto/omap-sham.c 	omap_sham_copy_hash_omap2(req, out);
req               321 drivers/crypto/omap-sham.c static void omap_sham_copy_ready_hash(struct ahash_request *req)
req               323 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req               325 drivers/crypto/omap-sham.c 	u32 *hash = (u32 *)req->result;
req               386 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
req               455 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
req               465 drivers/crypto/omap-sham.c 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
req               518 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
req               585 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
req               808 drivers/crypto/omap-sham.c static int omap_sham_prepare_request(struct ahash_request *req, bool update)
req               810 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
req               820 drivers/crypto/omap-sham.c 		nbytes = req->nbytes;
req               834 drivers/crypto/omap-sham.c 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
req               844 drivers/crypto/omap-sham.c 	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
req               868 drivers/crypto/omap-sham.c 		sg_chain(rctx->sgl, 2, req->src);
req               886 drivers/crypto/omap-sham.c 		if (hash_later > req->nbytes) {
req               888 drivers/crypto/omap-sham.c 			       hash_later - req->nbytes);
req               889 drivers/crypto/omap-sham.c 			offset = hash_later - req->nbytes;
req               892 drivers/crypto/omap-sham.c 		if (req->nbytes) {
req               894 drivers/crypto/omap-sham.c 						 req->src,
req               895 drivers/crypto/omap-sham.c 						 offset + req->nbytes -
req               912 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
req               921 drivers/crypto/omap-sham.c static int omap_sham_init(struct ahash_request *req)
req               923 drivers/crypto/omap-sham.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               925 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req               998 drivers/crypto/omap-sham.c 	struct ahash_request *req = dd->req;
req               999 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1023 drivers/crypto/omap-sham.c 	struct ahash_request *req = dd->req;
req              1024 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1046 drivers/crypto/omap-sham.c static int omap_sham_finish_hmac(struct ahash_request *req)
req              1048 drivers/crypto/omap-sham.c 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req              1058 drivers/crypto/omap-sham.c 	       crypto_shash_finup(shash, req->result, ds, req->result);
req              1061 drivers/crypto/omap-sham.c static int omap_sham_finish(struct ahash_request *req)
req              1063 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1068 drivers/crypto/omap-sham.c 		omap_sham_copy_ready_hash(req);
req              1071 drivers/crypto/omap-sham.c 			err = omap_sham_finish_hmac(req);
req              1079 drivers/crypto/omap-sham.c static void omap_sham_finish_req(struct ahash_request *req, int err)
req              1081 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1096 drivers/crypto/omap-sham.c 		dd->pdata->copy_hash(req, 1);
req              1098 drivers/crypto/omap-sham.c 			err = omap_sham_finish(req);
req              1110 drivers/crypto/omap-sham.c 	if (req->base.complete)
req              1111 drivers/crypto/omap-sham.c 		req->base.complete(&req->base, err);
req              1115 drivers/crypto/omap-sham.c 				  struct ahash_request *req)
req              1124 drivers/crypto/omap-sham.c 	if (req)
req              1125 drivers/crypto/omap-sham.c 		ret = ahash_enqueue_request(&dd->queue, req);
req              1142 drivers/crypto/omap-sham.c 	req = ahash_request_cast(async_req);
req              1143 drivers/crypto/omap-sham.c 	dd->req = req;
req              1144 drivers/crypto/omap-sham.c 	ctx = ahash_request_ctx(req);
req              1146 drivers/crypto/omap-sham.c 	err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
req              1151 drivers/crypto/omap-sham.c 						ctx->op, req->nbytes);
req              1159 drivers/crypto/omap-sham.c 		dd->pdata->copy_hash(req, 0);
req              1174 drivers/crypto/omap-sham.c 		omap_sham_finish_req(req, err);
req              1175 drivers/crypto/omap-sham.c 		req = NULL;
req              1187 drivers/crypto/omap-sham.c static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
req              1189 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1190 drivers/crypto/omap-sham.c 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req              1195 drivers/crypto/omap-sham.c 	return omap_sham_handle_queue(dd, req);
req              1198 drivers/crypto/omap-sham.c static int omap_sham_update(struct ahash_request *req)
req              1200 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1203 drivers/crypto/omap-sham.c 	if (!req->nbytes)
req              1206 drivers/crypto/omap-sham.c 	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
req              1207 drivers/crypto/omap-sham.c 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
req              1208 drivers/crypto/omap-sham.c 					 0, req->nbytes, 0);
req              1209 drivers/crypto/omap-sham.c 		ctx->bufcnt += req->nbytes;
req              1216 drivers/crypto/omap-sham.c 	return omap_sham_enqueue(req, OP_UPDATE);
req              1229 drivers/crypto/omap-sham.c static int omap_sham_final_shash(struct ahash_request *req)
req              1231 drivers/crypto/omap-sham.c 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req              1232 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1244 drivers/crypto/omap-sham.c 	return omap_sham_shash_digest(tctx->fallback, req->base.flags,
req              1246 drivers/crypto/omap-sham.c 				      ctx->bufcnt - offset, req->result);
req              1249 drivers/crypto/omap-sham.c static int omap_sham_final(struct ahash_request *req)
req              1251 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1266 drivers/crypto/omap-sham.c 		return omap_sham_final_shash(req);
req              1268 drivers/crypto/omap-sham.c 		return omap_sham_enqueue(req, OP_FINAL);
req              1271 drivers/crypto/omap-sham.c 	return omap_sham_finish(req);
req              1274 drivers/crypto/omap-sham.c static int omap_sham_finup(struct ahash_request *req)
req              1276 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
req              1281 drivers/crypto/omap-sham.c 	err1 = omap_sham_update(req);
req              1288 drivers/crypto/omap-sham.c 	err2 = omap_sham_final(req);
req              1293 drivers/crypto/omap-sham.c static int omap_sham_digest(struct ahash_request *req)
req              1295 drivers/crypto/omap-sham.c 	return omap_sham_init(req) ?: omap_sham_finup(req);
req              1431 drivers/crypto/omap-sham.c static int omap_sham_export(struct ahash_request *req, void *out)
req              1433 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
req              1440 drivers/crypto/omap-sham.c static int omap_sham_import(struct ahash_request *req, const void *in)
req              1442 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
req              1761 drivers/crypto/omap-sham.c 	omap_sham_finish_req(dd->req, err);
req                82 drivers/crypto/picoxcell_crypto.c 	struct crypto_async_request	*req;
req                88 drivers/crypto/picoxcell_crypto.c 	void				(*complete)(struct spacc_req *req);
req               174 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_submit(struct spacc_req *req);
req               313 drivers/crypto/picoxcell_crypto.c 	struct spacc_req *req = aead_request_ctx(areq);
req               314 drivers/crypto/picoxcell_crypto.c 	struct spacc_engine *engine = req->engine;
req               322 drivers/crypto/picoxcell_crypto.c 	if (req->is_encrypt)
req               344 drivers/crypto/picoxcell_crypto.c 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
req               348 drivers/crypto/picoxcell_crypto.c 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
req               352 drivers/crypto/picoxcell_crypto.c 	req->src_ddt = src_ddt;
req               353 drivers/crypto/picoxcell_crypto.c 	req->dst_ddt = dst_ddt;
req               385 drivers/crypto/picoxcell_crypto.c 	total = req->is_encrypt ? 0 : areq->assoclen;
req               403 drivers/crypto/picoxcell_crypto.c 	dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
req               405 drivers/crypto/picoxcell_crypto.c 	dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
req               410 drivers/crypto/picoxcell_crypto.c static void spacc_aead_free_ddts(struct spacc_req *req)
req               412 drivers/crypto/picoxcell_crypto.c 	struct aead_request *areq = container_of(req->req, struct aead_request,
req               416 drivers/crypto/picoxcell_crypto.c 			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
req               438 drivers/crypto/picoxcell_crypto.c 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
req               439 drivers/crypto/picoxcell_crypto.c 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
req               442 drivers/crypto/picoxcell_crypto.c static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
req               449 drivers/crypto/picoxcell_crypto.c 		dev_err(req->engine->dev, "Invalid numbers of SG.\n");
req               453 drivers/crypto/picoxcell_crypto.c 	dma_unmap_sg(req->engine->dev, payload, nents, dir);
req               454 drivers/crypto/picoxcell_crypto.c 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
req               531 drivers/crypto/picoxcell_crypto.c static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
req               534 drivers/crypto/picoxcell_crypto.c 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
req               536 drivers/crypto/picoxcell_crypto.c 	struct aead_request *subreq = aead_request_ctx(req);
req               539 drivers/crypto/picoxcell_crypto.c 	aead_request_set_callback(subreq, req->base.flags,
req               540 drivers/crypto/picoxcell_crypto.c 				  req->base.complete, req->base.data);
req               541 drivers/crypto/picoxcell_crypto.c 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req               542 drivers/crypto/picoxcell_crypto.c 			       req->iv);
req               543 drivers/crypto/picoxcell_crypto.c 	aead_request_set_ad(subreq, req->assoclen);
req               549 drivers/crypto/picoxcell_crypto.c static void spacc_aead_complete(struct spacc_req *req)
req               551 drivers/crypto/picoxcell_crypto.c 	spacc_aead_free_ddts(req);
req               552 drivers/crypto/picoxcell_crypto.c 	req->req->complete(req->req, req->result);
req               555 drivers/crypto/picoxcell_crypto.c static int spacc_aead_submit(struct spacc_req *req)
req               558 drivers/crypto/picoxcell_crypto.c 		container_of(req->req, struct aead_request, base);
req               567 drivers/crypto/picoxcell_crypto.c 	req->result = -EINPROGRESS;
req               568 drivers/crypto/picoxcell_crypto.c 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
req               573 drivers/crypto/picoxcell_crypto.c 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
req               574 drivers/crypto/picoxcell_crypto.c 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
req               584 drivers/crypto/picoxcell_crypto.c 	if (!req->is_encrypt)
req               593 drivers/crypto/picoxcell_crypto.c 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
req               595 drivers/crypto/picoxcell_crypto.c 	if (req->is_encrypt)
req               607 drivers/crypto/picoxcell_crypto.c static int spacc_req_submit(struct spacc_req *req);
req               611 drivers/crypto/picoxcell_crypto.c 	struct spacc_req *req;
req               617 drivers/crypto/picoxcell_crypto.c 		req = list_first_entry(&engine->pending, struct spacc_req,
req               619 drivers/crypto/picoxcell_crypto.c 		list_move_tail(&req->list, &engine->in_progress);
req               621 drivers/crypto/picoxcell_crypto.c 		req->result = spacc_req_submit(req);
req               629 drivers/crypto/picoxcell_crypto.c static int spacc_aead_setup(struct aead_request *req,
req               632 drivers/crypto/picoxcell_crypto.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               635 drivers/crypto/picoxcell_crypto.c 	struct spacc_req *dev_req = aead_request_ctx(req);
req               639 drivers/crypto/picoxcell_crypto.c 	dev_req->req		= &req->base;
req               645 drivers/crypto/picoxcell_crypto.c 	if (unlikely(spacc_aead_need_fallback(req) ||
req               646 drivers/crypto/picoxcell_crypto.c 		     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
req               647 drivers/crypto/picoxcell_crypto.c 		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
req               656 drivers/crypto/picoxcell_crypto.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req               676 drivers/crypto/picoxcell_crypto.c static int spacc_aead_encrypt(struct aead_request *req)
req               678 drivers/crypto/picoxcell_crypto.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               681 drivers/crypto/picoxcell_crypto.c 	return spacc_aead_setup(req, alg->type, 1);
req               684 drivers/crypto/picoxcell_crypto.c static int spacc_aead_decrypt(struct aead_request *req)
req               686 drivers/crypto/picoxcell_crypto.c 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
req               689 drivers/crypto/picoxcell_crypto.c 	return spacc_aead_setup(req, alg->type, 0);
req               845 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_need_fallback(struct spacc_req *req)
req               848 drivers/crypto/picoxcell_crypto.c 	struct crypto_tfm *tfm = req->req->tfm;
req               849 drivers/crypto/picoxcell_crypto.c 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
req               860 drivers/crypto/picoxcell_crypto.c static void spacc_ablk_complete(struct spacc_req *req)
req               862 drivers/crypto/picoxcell_crypto.c 	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
req               865 drivers/crypto/picoxcell_crypto.c 		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
req               867 drivers/crypto/picoxcell_crypto.c 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
req               870 drivers/crypto/picoxcell_crypto.c 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
req               873 drivers/crypto/picoxcell_crypto.c 	req->req->complete(req->req, req->result);
req               876 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_submit(struct spacc_req *req)
req               878 drivers/crypto/picoxcell_crypto.c 	struct crypto_tfm *tfm = req->req->tfm;
req               880 drivers/crypto/picoxcell_crypto.c 	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
req               881 drivers/crypto/picoxcell_crypto.c 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
req               886 drivers/crypto/picoxcell_crypto.c 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
req               890 drivers/crypto/picoxcell_crypto.c 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
req               891 drivers/crypto/picoxcell_crypto.c 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
req               899 drivers/crypto/picoxcell_crypto.c 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
req               900 drivers/crypto/picoxcell_crypto.c 		(req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
req               910 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
req               914 drivers/crypto/picoxcell_crypto.c 	    crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
req               925 drivers/crypto/picoxcell_crypto.c 	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
req               926 drivers/crypto/picoxcell_crypto.c 	skcipher_request_set_crypt(subreq, req->src, req->dst,
req               927 drivers/crypto/picoxcell_crypto.c 				   req->nbytes, req->info);
req               935 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
req               938 drivers/crypto/picoxcell_crypto.c 	struct crypto_alg *alg = req->base.tfm->__crt_alg;
req               940 drivers/crypto/picoxcell_crypto.c 	struct spacc_req *dev_req = ablkcipher_request_ctx(req);
req               944 drivers/crypto/picoxcell_crypto.c 	dev_req->req		= &req->base;
req               951 drivers/crypto/picoxcell_crypto.c 		return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
req               957 drivers/crypto/picoxcell_crypto.c 	if (req->src != req->dst) {
req               958 drivers/crypto/picoxcell_crypto.c 		dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
req               959 drivers/crypto/picoxcell_crypto.c 			req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
req               963 drivers/crypto/picoxcell_crypto.c 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
req               964 drivers/crypto/picoxcell_crypto.c 			req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
req               968 drivers/crypto/picoxcell_crypto.c 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
req               969 drivers/crypto/picoxcell_crypto.c 			req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
req               986 drivers/crypto/picoxcell_crypto.c 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
req              1001 drivers/crypto/picoxcell_crypto.c 	spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
req              1002 drivers/crypto/picoxcell_crypto.c 		       req->nbytes, req->src == req->dst ?
req              1005 drivers/crypto/picoxcell_crypto.c 	if (req->src != req->dst)
req              1007 drivers/crypto/picoxcell_crypto.c 			       req->src, req->nbytes, DMA_TO_DEVICE);
req              1045 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_encrypt(struct ablkcipher_request *req)
req              1047 drivers/crypto/picoxcell_crypto.c 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
req              1051 drivers/crypto/picoxcell_crypto.c 	return spacc_ablk_setup(req, alg->type, 1);
req              1054 drivers/crypto/picoxcell_crypto.c static int spacc_ablk_decrypt(struct ablkcipher_request *req)
req              1056 drivers/crypto/picoxcell_crypto.c 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
req              1060 drivers/crypto/picoxcell_crypto.c 	return spacc_ablk_setup(req, alg->type, 0);
req              1071 drivers/crypto/picoxcell_crypto.c 	struct spacc_req *req;
req              1077 drivers/crypto/picoxcell_crypto.c 		req = list_first_entry(&engine->in_progress, struct spacc_req,
req              1079 drivers/crypto/picoxcell_crypto.c 		list_move_tail(&req->list, &engine->completed);
req              1084 drivers/crypto/picoxcell_crypto.c 		req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
req              1091 drivers/crypto/picoxcell_crypto.c 		if (unlikely(req->result)) {
req              1092 drivers/crypto/picoxcell_crypto.c 			switch (req->result) {
req              1094 drivers/crypto/picoxcell_crypto.c 				req->result = -EBADMSG;
req              1100 drivers/crypto/picoxcell_crypto.c 				req->result = -EFAULT;
req              1106 drivers/crypto/picoxcell_crypto.c 				req->result = -EIO;
req              1135 drivers/crypto/picoxcell_crypto.c static int spacc_req_submit(struct spacc_req *req)
req              1137 drivers/crypto/picoxcell_crypto.c 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
req              1140 drivers/crypto/picoxcell_crypto.c 		return spacc_aead_submit(req);
req              1142 drivers/crypto/picoxcell_crypto.c 		return spacc_ablk_submit(req);
req              1148 drivers/crypto/picoxcell_crypto.c 	struct spacc_req *req, *tmp;
req              1161 drivers/crypto/picoxcell_crypto.c 	list_for_each_entry_safe(req, tmp, &completed, list) {
req              1162 drivers/crypto/picoxcell_crypto.c 		list_del(&req->list);
req              1163 drivers/crypto/picoxcell_crypto.c 		req->complete(req);
req               194 drivers/crypto/qat/qat_common/adf_admin.c 	struct icp_qat_fw_init_admin_req req;
req               198 drivers/crypto/qat/qat_common/adf_admin.c 	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
req               199 drivers/crypto/qat/qat_common/adf_admin.c 	req.init_admin_cmd_id = cmd;
req               202 drivers/crypto/qat/qat_common/adf_admin.c 		req.init_cfg_sz = 1024;
req               203 drivers/crypto/qat/qat_common/adf_admin.c 		req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
req               207 drivers/crypto/qat/qat_common/adf_admin.c 		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
req               467 drivers/crypto/qat/qat_common/qat_algs.c 					struct icp_qat_fw_la_bulk_req *req,
req               471 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
req               472 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
req               473 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
req               493 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
req               494 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
req               496 drivers/crypto/qat/qat_common/qat_algs.c 	qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
req               506 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
req               507 drivers/crypto/qat/qat_common/qat_algs.c 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
req               509 drivers/crypto/qat/qat_common/qat_algs.c 	qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
req               881 drivers/crypto/qat/qat_common/qat_algs.c 	msg = &qat_req->req;
req               886 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
req               887 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
req               888 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
req               889 drivers/crypto/qat/qat_common/qat_algs.c 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
req               923 drivers/crypto/qat/qat_common/qat_algs.c 	msg = &qat_req->req;
req               928 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
req               929 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
req               930 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
req               931 drivers/crypto/qat/qat_common/qat_algs.c 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
req              1048 drivers/crypto/qat/qat_common/qat_algs.c static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
req              1050 drivers/crypto/qat/qat_common/qat_algs.c 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
req              1053 drivers/crypto/qat/qat_common/qat_algs.c 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
req              1059 drivers/crypto/qat/qat_common/qat_algs.c 	if (req->nbytes == 0)
req              1067 drivers/crypto/qat/qat_common/qat_algs.c 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
req              1074 drivers/crypto/qat/qat_common/qat_algs.c 	msg = &qat_req->req;
req              1077 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->ablkcipher_req = req;
req              1079 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
req              1080 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
req              1081 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
req              1082 drivers/crypto/qat/qat_common/qat_algs.c 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
req              1083 drivers/crypto/qat/qat_common/qat_algs.c 	cipher_param->cipher_length = req->nbytes;
req              1086 drivers/crypto/qat/qat_common/qat_algs.c 	memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
req              1100 drivers/crypto/qat/qat_common/qat_algs.c static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
req              1102 drivers/crypto/qat/qat_common/qat_algs.c 	if (req->nbytes % AES_BLOCK_SIZE != 0)
req              1105 drivers/crypto/qat/qat_common/qat_algs.c 	return qat_alg_ablkcipher_encrypt(req);
req              1108 drivers/crypto/qat/qat_common/qat_algs.c static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
req              1110 drivers/crypto/qat/qat_common/qat_algs.c 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
req              1113 drivers/crypto/qat/qat_common/qat_algs.c 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
req              1119 drivers/crypto/qat/qat_common/qat_algs.c 	if (req->nbytes == 0)
req              1127 drivers/crypto/qat/qat_common/qat_algs.c 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
req              1134 drivers/crypto/qat/qat_common/qat_algs.c 	msg = &qat_req->req;
req              1137 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->ablkcipher_req = req;
req              1139 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
req              1140 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
req              1141 drivers/crypto/qat/qat_common/qat_algs.c 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
req              1142 drivers/crypto/qat/qat_common/qat_algs.c 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
req              1143 drivers/crypto/qat/qat_common/qat_algs.c 	cipher_param->cipher_length = req->nbytes;
req              1146 drivers/crypto/qat/qat_common/qat_algs.c 	memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
req              1160 drivers/crypto/qat/qat_common/qat_algs.c static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
req              1162 drivers/crypto/qat/qat_common/qat_algs.c 	if (req->nbytes % AES_BLOCK_SIZE != 0)
req              1165 drivers/crypto/qat/qat_common/qat_algs.c 	return qat_alg_ablkcipher_decrypt(req);
req               172 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct icp_qat_fw_pke_request req;
req               187 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
req               188 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct kpp_request *areq = req->areq.dh;
req               189 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
req               196 drivers/crypto/qat/qat_common/qat_asym_algs.c 		if (req->src_align)
req               197 drivers/crypto/qat/qat_common/qat_asym_algs.c 			dma_free_coherent(dev, req->ctx.dh->p_size,
req               198 drivers/crypto/qat/qat_common/qat_asym_algs.c 					  req->src_align, req->in.dh.in.b);
req               200 drivers/crypto/qat/qat_common/qat_asym_algs.c 			dma_unmap_single(dev, req->in.dh.in.b,
req               201 drivers/crypto/qat/qat_common/qat_asym_algs.c 					 req->ctx.dh->p_size, DMA_TO_DEVICE);
req               204 drivers/crypto/qat/qat_common/qat_asym_algs.c 	areq->dst_len = req->ctx.dh->p_size;
req               205 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->dst_align) {
req               206 drivers/crypto/qat/qat_common/qat_asym_algs.c 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
req               209 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
req               210 drivers/crypto/qat/qat_common/qat_asym_algs.c 				  req->out.dh.r);
req               212 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
req               216 drivers/crypto/qat/qat_common/qat_asym_algs.c 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
req               218 drivers/crypto/qat/qat_common/qat_asym_algs.c 	dma_unmap_single(dev, req->phy_out,
req               257 drivers/crypto/qat/qat_common/qat_asym_algs.c static int qat_dh_compute_value(struct kpp_request *req)
req               259 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req               264 drivers/crypto/qat/qat_common/qat_asym_algs.c 			PTR_ALIGN(kpp_request_ctx(req), 64);
req               265 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
req               272 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->dst_len < ctx->p_size) {
req               273 drivers/crypto/qat/qat_common/qat_asym_algs.c 		req->dst_len = ctx->p_size;
req               281 drivers/crypto/qat/qat_common/qat_asym_algs.c 						    !req->src && ctx->g2);
req               287 drivers/crypto/qat/qat_common/qat_asym_algs.c 	qat_req->areq.dh = req;
req               296 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->src) {
req               314 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->src) {
req               322 drivers/crypto/qat/qat_common/qat_asym_algs.c 		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
req               325 drivers/crypto/qat/qat_common/qat_asym_algs.c 							     sg_virt(req->src),
req               326 drivers/crypto/qat/qat_common/qat_asym_algs.c 							     req->src_len,
req               333 drivers/crypto/qat/qat_common/qat_asym_algs.c 			int shift = ctx->p_size - req->src_len;
req               343 drivers/crypto/qat/qat_common/qat_asym_algs.c 						 req->src, 0, req->src_len, 0);
req               353 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
req               355 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
req               356 drivers/crypto/qat/qat_common/qat_asym_algs.c 						   req->dst_len,
req               416 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->src) {
req               555 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
req               556 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct akcipher_request *areq = req->areq.rsa;
req               557 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
req               563 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->src_align)
req               564 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
req               565 drivers/crypto/qat/qat_common/qat_asym_algs.c 				  req->in.rsa.enc.m);
req               567 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
req               570 drivers/crypto/qat/qat_common/qat_asym_algs.c 	areq->dst_len = req->ctx.rsa->key_sz;
req               571 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->dst_align) {
req               572 drivers/crypto/qat/qat_common/qat_asym_algs.c 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
req               575 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
req               576 drivers/crypto/qat/qat_common/qat_asym_algs.c 				  req->out.rsa.enc.c);
req               578 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
req               582 drivers/crypto/qat/qat_common/qat_asym_algs.c 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
req               584 drivers/crypto/qat/qat_common/qat_asym_algs.c 	dma_unmap_single(dev, req->phy_out,
req               686 drivers/crypto/qat/qat_common/qat_asym_algs.c static int qat_rsa_enc(struct akcipher_request *req)
req               688 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               693 drivers/crypto/qat/qat_common/qat_asym_algs.c 			PTR_ALIGN(akcipher_request_ctx(req), 64);
req               694 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
req               700 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->dst_len < ctx->key_sz) {
req               701 drivers/crypto/qat/qat_common/qat_asym_algs.c 		req->dst_len = ctx->key_sz;
req               713 drivers/crypto/qat/qat_common/qat_asym_algs.c 	qat_req->areq.rsa = req;
req               730 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
req               732 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
req               733 drivers/crypto/qat/qat_common/qat_asym_algs.c 						   req->src_len, DMA_TO_DEVICE);
req               738 drivers/crypto/qat/qat_common/qat_asym_algs.c 		int shift = ctx->key_sz - req->src_len;
req               746 drivers/crypto/qat/qat_common/qat_asym_algs.c 		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
req               747 drivers/crypto/qat/qat_common/qat_asym_algs.c 					 0, req->src_len, 0);
req               749 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
req               751 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
req               752 drivers/crypto/qat/qat_common/qat_asym_algs.c 							req->dst_len,
req               820 drivers/crypto/qat/qat_common/qat_asym_algs.c static int qat_rsa_dec(struct akcipher_request *req)
req               822 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               827 drivers/crypto/qat/qat_common/qat_asym_algs.c 			PTR_ALIGN(akcipher_request_ctx(req), 64);
req               828 drivers/crypto/qat/qat_common/qat_asym_algs.c 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
req               834 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (req->dst_len < ctx->key_sz) {
req               835 drivers/crypto/qat/qat_common/qat_asym_algs.c 		req->dst_len = ctx->key_sz;
req               849 drivers/crypto/qat/qat_common/qat_asym_algs.c 	qat_req->areq.rsa = req;
req               874 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
req               876 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
req               877 drivers/crypto/qat/qat_common/qat_asym_algs.c 						   req->dst_len, DMA_TO_DEVICE);
req               882 drivers/crypto/qat/qat_common/qat_asym_algs.c 		int shift = ctx->key_sz - req->src_len;
req               890 drivers/crypto/qat/qat_common/qat_asym_algs.c 		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
req               891 drivers/crypto/qat/qat_common/qat_asym_algs.c 					 0, req->src_len, 0);
req               893 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
req               895 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
req               896 drivers/crypto/qat/qat_common/qat_asym_algs.c 						    req->dst_len,
req                79 drivers/crypto/qat/qat_common/qat_crypto.h 	struct icp_qat_fw_la_bulk_req req;
req                90 drivers/crypto/qat/qat_common/qat_crypto.h 		   struct qat_crypto_request *req);
req                20 drivers/crypto/qce/ablkcipher.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req                21 drivers/crypto/qce/ablkcipher.c 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
req                29 drivers/crypto/qce/ablkcipher.c 	diff_dst = (req->src != req->dst) ? true : false;
req                54 drivers/crypto/qce/ablkcipher.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req                55 drivers/crypto/qce/ablkcipher.c 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
req                56 drivers/crypto/qce/ablkcipher.c 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
req                65 drivers/crypto/qce/ablkcipher.c 	rctx->iv = req->info;
req                67 drivers/crypto/qce/ablkcipher.c 	rctx->cryptlen = req->nbytes;
req                69 drivers/crypto/qce/ablkcipher.c 	diff_dst = (req->src != req->dst) ? true : false;
req                73 drivers/crypto/qce/ablkcipher.c 	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
req                75 drivers/crypto/qce/ablkcipher.c 		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
req                89 drivers/crypto/qce/ablkcipher.c 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
req                98 drivers/crypto/qce/ablkcipher.c 	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
req               118 drivers/crypto/qce/ablkcipher.c 		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
req               121 drivers/crypto/qce/ablkcipher.c 		rctx->src_sg = req->src;
req               134 drivers/crypto/qce/ablkcipher.c 	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
req               144 drivers/crypto/qce/ablkcipher.c 		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
req               210 drivers/crypto/qce/ablkcipher.c static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
req               213 drivers/crypto/qce/ablkcipher.c 			crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
req               215 drivers/crypto/qce/ablkcipher.c 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
req               227 drivers/crypto/qce/ablkcipher.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               229 drivers/crypto/qce/ablkcipher.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               230 drivers/crypto/qce/ablkcipher.c 					   req->nbytes, req->info);
req               237 drivers/crypto/qce/ablkcipher.c 	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
req               240 drivers/crypto/qce/ablkcipher.c static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
req               242 drivers/crypto/qce/ablkcipher.c 	return qce_ablkcipher_crypt(req, 1);
req               245 drivers/crypto/qce/ablkcipher.c static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
req               247 drivers/crypto/qce/ablkcipher.c 	return qce_ablkcipher_crypt(req, 0);
req               225 drivers/crypto/qce/common.c 	struct ahash_request *req = ahash_request_cast(async_req);
req               227 drivers/crypto/qce/common.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               238 drivers/crypto/qce/common.c 	if (!rctx->last_blk && req->nbytes % blocksize)
req               293 drivers/crypto/qce/common.c 	qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
req               296 drivers/crypto/qce/common.c 	qce_write(qce, REG_SEG_SIZE, req->nbytes);
req               310 drivers/crypto/qce/common.c 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
req               311 drivers/crypto/qce/common.c 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
req                73 drivers/crypto/qce/core.c 			    struct crypto_async_request *req)
req                81 drivers/crypto/qce/core.c 	if (req)
req                82 drivers/crypto/qce/core.c 		ret = crypto_enqueue_request(&qce->queue, req);
req                85 drivers/crypto/qce/core.c 	if (qce->req) {
req                93 drivers/crypto/qce/core.c 		qce->req = async_req;
req               118 drivers/crypto/qce/core.c 	struct crypto_async_request *req;
req               122 drivers/crypto/qce/core.c 	req = qce->req;
req               123 drivers/crypto/qce/core.c 	qce->req = NULL;
req               126 drivers/crypto/qce/core.c 	if (req)
req               127 drivers/crypto/qce/core.c 		req->complete(req, qce->result);
req               133 drivers/crypto/qce/core.c 				     struct crypto_async_request *req)
req               135 drivers/crypto/qce/core.c 	return qce_handle_queue(qce, req);
req                33 drivers/crypto/qce/core.h 	struct crypto_async_request *req;
req                42 drivers/crypto/qce/core.h 				 struct crypto_async_request *req);
req                32 drivers/crypto/qce/sha.c 	struct ahash_request *req = ahash_request_cast(async_req);
req                33 drivers/crypto/qce/sha.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req                34 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req                46 drivers/crypto/qce/sha.c 	dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
req                50 drivers/crypto/qce/sha.c 	if (req->result)
req                51 drivers/crypto/qce/sha.c 		memcpy(req->result, result->auth_iv, digestsize);
req                60 drivers/crypto/qce/sha.c 	req->src = rctx->src_orig;
req                61 drivers/crypto/qce/sha.c 	req->nbytes = rctx->nbytes_orig;
req                70 drivers/crypto/qce/sha.c 	struct ahash_request *req = ahash_request_cast(async_req);
req                71 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req                86 drivers/crypto/qce/sha.c 	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
req                92 drivers/crypto/qce/sha.c 	ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
req               102 drivers/crypto/qce/sha.c 	ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
req               120 drivers/crypto/qce/sha.c 	dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
req               124 drivers/crypto/qce/sha.c static int qce_ahash_init(struct ahash_request *req)
req               126 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               127 drivers/crypto/qce/sha.c 	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
req               139 drivers/crypto/qce/sha.c static int qce_ahash_export(struct ahash_request *req, void *out)
req               141 drivers/crypto/qce/sha.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               142 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               169 drivers/crypto/qce/sha.c static int qce_import_common(struct ahash_request *req, u64 in_count,
req               172 drivers/crypto/qce/sha.c 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
req               173 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               204 drivers/crypto/qce/sha.c static int qce_ahash_import(struct ahash_request *req, const void *in)
req               206 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               214 drivers/crypto/qce/sha.c 		ret = qce_import_common(req, state->count, state->state,
req               219 drivers/crypto/qce/sha.c 		ret = qce_import_common(req, state->count, state->state,
req               226 drivers/crypto/qce/sha.c static int qce_ahash_update(struct ahash_request *req)
req               228 drivers/crypto/qce/sha.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               229 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               230 drivers/crypto/qce/sha.c 	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
req               239 drivers/crypto/qce/sha.c 	rctx->count += req->nbytes;
req               242 drivers/crypto/qce/sha.c 	total = req->nbytes + rctx->buflen;
req               245 drivers/crypto/qce/sha.c 		scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
req               246 drivers/crypto/qce/sha.c 					 0, req->nbytes, 0);
req               247 drivers/crypto/qce/sha.c 		rctx->buflen += req->nbytes;
req               252 drivers/crypto/qce/sha.c 	rctx->src_orig = req->src;
req               253 drivers/crypto/qce/sha.c 	rctx->nbytes_orig = req->nbytes;
req               265 drivers/crypto/qce/sha.c 		unsigned int src_offset = req->nbytes - hash_later;
req               266 drivers/crypto/qce/sha.c 		scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
req               274 drivers/crypto/qce/sha.c 	sg = sg_last = req->src;
req               292 drivers/crypto/qce/sha.c 		sg_chain(rctx->sg, 2, req->src);
req               293 drivers/crypto/qce/sha.c 		req->src = rctx->sg;
req               296 drivers/crypto/qce/sha.c 	req->nbytes = nbytes;
req               299 drivers/crypto/qce/sha.c 	return qce->async_req_enqueue(tmpl->qce, &req->base);
req               302 drivers/crypto/qce/sha.c static int qce_ahash_final(struct ahash_request *req)
req               304 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               305 drivers/crypto/qce/sha.c 	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
req               313 drivers/crypto/qce/sha.c 	rctx->src_orig = req->src;
req               314 drivers/crypto/qce/sha.c 	rctx->nbytes_orig = req->nbytes;
req               319 drivers/crypto/qce/sha.c 	req->src = rctx->sg;
req               320 drivers/crypto/qce/sha.c 	req->nbytes = rctx->buflen;
req               322 drivers/crypto/qce/sha.c 	return qce->async_req_enqueue(tmpl->qce, &req->base);
req               325 drivers/crypto/qce/sha.c static int qce_ahash_digest(struct ahash_request *req)
req               327 drivers/crypto/qce/sha.c 	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
req               328 drivers/crypto/qce/sha.c 	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
req               332 drivers/crypto/qce/sha.c 	ret = qce_ahash_init(req);
req               336 drivers/crypto/qce/sha.c 	rctx->src_orig = req->src;
req               337 drivers/crypto/qce/sha.c 	rctx->nbytes_orig = req->nbytes;
req               341 drivers/crypto/qce/sha.c 	return qce->async_req_enqueue(tmpl->qce, &req->base);
req               350 drivers/crypto/qce/sha.c 	struct ahash_request *req;
req               377 drivers/crypto/qce/sha.c 	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
req               378 drivers/crypto/qce/sha.c 	if (!req) {
req               384 drivers/crypto/qce/sha.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               396 drivers/crypto/qce/sha.c 	ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
req               398 drivers/crypto/qce/sha.c 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
req               404 drivers/crypto/qce/sha.c 	ahash_request_free(req);
req                22 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 			 struct ablkcipher_request *req)
req                24 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	if (!IS_ALIGNED(req->nbytes, dev->align_size))
req                27 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 		return dev->enqueue(dev, &req->base);
req                76 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
req                78 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req                83 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req                86 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
req                88 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req                93 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req                96 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
req                98 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               103 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               106 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
req               108 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               113 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               116 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
req               118 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               123 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               126 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
req               128 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               133 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               136 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
req               138 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               143 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               146 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
req               148 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               153 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               156 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
req               158 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               163 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               166 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
req               168 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               173 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               176 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
req               178 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               183 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               186 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
req               188 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               194 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	return rk_handle_req(dev, req);
req               199 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct ablkcipher_request *req =
req               201 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
req               214 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
req               226 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
req               247 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct ablkcipher_request *req =
req               249 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               260 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
req               272 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct ablkcipher_request *req =
req               277 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->left_bytes = req->nbytes;
req               278 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->total = req->nbytes;
req               279 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->sg_src = req->src;
req               280 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->first = req->src;
req               281 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->src_nents = sg_nents(req->src);
req               282 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->sg_dst = req->dst;
req               283 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	dev->dst_nents = sg_nents(req->dst);
req               295 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct ablkcipher_request *req =
req               297 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               304 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 			memcpy(req->info, sg_virt(dev->sg_dst) +
req               307 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 			memcpy(req->info, dev->addr_vir +
req               315 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct ablkcipher_request *req =
req               317 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               342 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	struct ablkcipher_request *req =
req               347 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 		if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
req                18 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int zero_message_process(struct ahash_request *req)
req                20 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                25 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
req                28 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
req                31 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
req                48 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct ahash_request *req = ahash_request_cast(dev->async_req);
req                49 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req                79 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_init(struct ahash_request *req)
req                81 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req                82 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                86 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.base.flags = req->base.flags &
req                92 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_update(struct ahash_request *req)
req                94 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req                95 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req                99 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.base.flags = req->base.flags &
req               101 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.nbytes = req->nbytes;
req               102 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.src = req->src;
req               107 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_final(struct ahash_request *req)
req               109 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req               110 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               114 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.base.flags = req->base.flags &
req               116 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.result = req->result;
req               121 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_finup(struct ahash_request *req)
req               123 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req               124 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               128 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.base.flags = req->base.flags &
req               131 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.nbytes = req->nbytes;
req               132 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.src = req->src;
req               133 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.result = req->result;
req               138 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_import(struct ahash_request *req, const void *in)
req               140 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req               141 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               145 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.base.flags = req->base.flags &
req               151 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_export(struct ahash_request *req, void *out)
req               153 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
req               154 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               158 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx->fallback_req.base.flags = req->base.flags &
req               164 drivers/crypto/rockchip/rk3288_crypto_ahash.c static int rk_ahash_digest(struct ahash_request *req)
req               166 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req               169 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	if (!req->nbytes)
req               170 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		return zero_message_process(req);
req               172 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		return dev->enqueue(dev, &req->base);
req               195 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct ahash_request *req = ahash_request_cast(dev->async_req);
req               199 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	dev->total = req->nbytes;
req               200 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	dev->left_bytes = req->nbytes;
req               204 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	dev->sg_src = req->src;
req               205 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	dev->first = req->src;
req               206 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	dev->src_nents = sg_nents(req->src);
req               207 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	rctx = ahash_request_ctx(req);
req               210 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	tfm = crypto_ahash_reqtfm(req);
req               232 drivers/crypto/rockchip/rk3288_crypto_ahash.c 	struct ahash_request *req = ahash_request_cast(dev->async_req);
req               261 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		tfm = crypto_ahash_reqtfm(req);
req               262 drivers/crypto/rockchip/rk3288_crypto_ahash.c 		memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
req               306 drivers/crypto/s5p-sss.c 	struct ablkcipher_request	*req;
req               459 drivers/crypto/s5p-sss.c 	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
req               481 drivers/crypto/s5p-sss.c 	struct ablkcipher_request *req = dev->req;
req               482 drivers/crypto/s5p-sss.c 	struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
req               487 drivers/crypto/s5p-sss.c 			dev->req->nbytes);
req               488 drivers/crypto/s5p-sss.c 		s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
req               489 drivers/crypto/s5p-sss.c 				dev->req->nbytes, 1);
req               494 drivers/crypto/s5p-sss.c 		memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
req               497 drivers/crypto/s5p-sss.c 		memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
req               501 drivers/crypto/s5p-sss.c static void s5p_aes_complete(struct ablkcipher_request *req, int err)
req               503 drivers/crypto/s5p-sss.c 	req->base.complete(&req->base, err);
req               526 drivers/crypto/s5p-sss.c 	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
req               534 drivers/crypto/s5p-sss.c 	s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
req               663 drivers/crypto/s5p-sss.c 	struct ablkcipher_request *req;
req               735 drivers/crypto/s5p-sss.c 		s5p_aes_complete(dev->req, 0);
req               760 drivers/crypto/s5p-sss.c 	req = dev->req;
req               765 drivers/crypto/s5p-sss.c 	s5p_aes_complete(req, err);
req               786 drivers/crypto/s5p-sss.c static void s5p_hash_read_msg(struct ahash_request *req)
req               788 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req               816 drivers/crypto/s5p-sss.c static void s5p_hash_write_iv(struct ahash_request *req)
req               818 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req               827 drivers/crypto/s5p-sss.c static void s5p_hash_copy_result(struct ahash_request *req)
req               829 drivers/crypto/s5p-sss.c 	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req               831 drivers/crypto/s5p-sss.c 	if (!req->result)
req               834 drivers/crypto/s5p-sss.c 	memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
req              1214 drivers/crypto/s5p-sss.c static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
req              1216 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1222 drivers/crypto/s5p-sss.c 		nbytes = req->nbytes;
req              1237 drivers/crypto/s5p-sss.c 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
req              1261 drivers/crypto/s5p-sss.c 		scatterwalk_map_and_copy(ctx->buffer, req->src,
req              1262 drivers/crypto/s5p-sss.c 					 req->nbytes - hash_later,
req              1267 drivers/crypto/s5p-sss.c 		ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
req              1275 drivers/crypto/s5p-sss.c 			scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
req              1311 drivers/crypto/s5p-sss.c static void s5p_hash_finish(struct ahash_request *req)
req              1313 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1317 drivers/crypto/s5p-sss.c 		s5p_hash_copy_result(req);
req              1327 drivers/crypto/s5p-sss.c static void s5p_hash_finish_req(struct ahash_request *req, int err)
req              1329 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1345 drivers/crypto/s5p-sss.c 		s5p_hash_read_msg(req);
req              1347 drivers/crypto/s5p-sss.c 			s5p_hash_finish(req);
req              1358 drivers/crypto/s5p-sss.c 	if (req->base.complete)
req              1359 drivers/crypto/s5p-sss.c 		req->base.complete(&req->base, err);
req              1373 drivers/crypto/s5p-sss.c 				 struct ahash_request *req)
req              1382 drivers/crypto/s5p-sss.c 	if (req)
req              1383 drivers/crypto/s5p-sss.c 		ret = ahash_enqueue_request(&dd->hash_queue, req);
req              1403 drivers/crypto/s5p-sss.c 	req = ahash_request_cast(async_req);
req              1404 drivers/crypto/s5p-sss.c 	dd->hash_req = req;
req              1405 drivers/crypto/s5p-sss.c 	ctx = ahash_request_ctx(req);
req              1407 drivers/crypto/s5p-sss.c 	err = s5p_hash_prepare_request(req, ctx->op_update);
req              1412 drivers/crypto/s5p-sss.c 		ctx->op_update, req->nbytes);
req              1416 drivers/crypto/s5p-sss.c 		s5p_hash_write_iv(req); /* restore hash IV */
req              1429 drivers/crypto/s5p-sss.c 		s5p_hash_finish_req(req, err);
req              1430 drivers/crypto/s5p-sss.c 		req = NULL;
req              1487 drivers/crypto/s5p-sss.c static int s5p_hash_enqueue(struct ahash_request *req, bool op)
req              1489 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1490 drivers/crypto/s5p-sss.c 	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req              1494 drivers/crypto/s5p-sss.c 	return s5p_hash_handle_queue(tctx->dd, req);
req              1506 drivers/crypto/s5p-sss.c static int s5p_hash_update(struct ahash_request *req)
req              1508 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1510 drivers/crypto/s5p-sss.c 	if (!req->nbytes)
req              1513 drivers/crypto/s5p-sss.c 	if (ctx->bufcnt + req->nbytes <= BUFLEN) {
req              1514 drivers/crypto/s5p-sss.c 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
req              1515 drivers/crypto/s5p-sss.c 					 0, req->nbytes, 0);
req              1516 drivers/crypto/s5p-sss.c 		ctx->bufcnt += req->nbytes;
req              1520 drivers/crypto/s5p-sss.c 	return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
req              1545 drivers/crypto/s5p-sss.c static int s5p_hash_final_shash(struct ahash_request *req)
req              1547 drivers/crypto/s5p-sss.c 	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
req              1548 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1550 drivers/crypto/s5p-sss.c 	return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
req              1551 drivers/crypto/s5p-sss.c 				     ctx->buffer, ctx->bufcnt, req->result);
req              1577 drivers/crypto/s5p-sss.c static int s5p_hash_final(struct ahash_request *req)
req              1579 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1586 drivers/crypto/s5p-sss.c 		return s5p_hash_final_shash(req);
req              1588 drivers/crypto/s5p-sss.c 	return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
req              1597 drivers/crypto/s5p-sss.c static int s5p_hash_finup(struct ahash_request *req)
req              1599 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1604 drivers/crypto/s5p-sss.c 	err1 = s5p_hash_update(req);
req              1613 drivers/crypto/s5p-sss.c 	err2 = s5p_hash_final(req);
req              1624 drivers/crypto/s5p-sss.c static int s5p_hash_init(struct ahash_request *req)
req              1626 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1627 drivers/crypto/s5p-sss.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1668 drivers/crypto/s5p-sss.c static int s5p_hash_digest(struct ahash_request *req)
req              1670 drivers/crypto/s5p-sss.c 	return s5p_hash_init(req) ?: s5p_hash_finup(req);
req              1725 drivers/crypto/s5p-sss.c static int s5p_hash_export(struct ahash_request *req, void *out)
req              1727 drivers/crypto/s5p-sss.c 	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1739 drivers/crypto/s5p-sss.c static int s5p_hash_import(struct ahash_request *req, const void *in)
req              1741 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
req              1742 drivers/crypto/s5p-sss.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1873 drivers/crypto/s5p-sss.c 				struct ablkcipher_request *req)
req              1879 drivers/crypto/s5p-sss.c 	sg = req->src;
req              1900 drivers/crypto/s5p-sss.c 				 struct ablkcipher_request *req)
req              1906 drivers/crypto/s5p-sss.c 	sg = req->dst;
req              1928 drivers/crypto/s5p-sss.c 	struct ablkcipher_request *req = dev->req;
req              1941 drivers/crypto/s5p-sss.c 		iv = req->info;
req              1946 drivers/crypto/s5p-sss.c 		ctr = req->info;
req              1972 drivers/crypto/s5p-sss.c 	err = s5p_set_indata_start(dev, req);
req              1976 drivers/crypto/s5p-sss.c 	err = s5p_set_outdata_start(dev, req);
req              2000 drivers/crypto/s5p-sss.c 	s5p_aes_complete(req, err);
req              2024 drivers/crypto/s5p-sss.c 	dev->req = ablkcipher_request_cast(async_req);
req              2025 drivers/crypto/s5p-sss.c 	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
req              2026 drivers/crypto/s5p-sss.c 	reqctx   = ablkcipher_request_ctx(dev->req);
req              2032 drivers/crypto/s5p-sss.c 			      struct ablkcipher_request *req)
req              2038 drivers/crypto/s5p-sss.c 	err = ablkcipher_enqueue_request(&dev->queue, req);
req              2052 drivers/crypto/s5p-sss.c static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
req              2054 drivers/crypto/s5p-sss.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req              2055 drivers/crypto/s5p-sss.c 	struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
req              2059 drivers/crypto/s5p-sss.c 	if (!req->nbytes)
req              2062 drivers/crypto/s5p-sss.c 	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
req              2070 drivers/crypto/s5p-sss.c 	return s5p_aes_handle_req(dev, req);
req              2090 drivers/crypto/s5p-sss.c static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
req              2092 drivers/crypto/s5p-sss.c 	return s5p_aes_crypt(req, 0);
req              2095 drivers/crypto/s5p-sss.c static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
req              2097 drivers/crypto/s5p-sss.c 	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
req              2100 drivers/crypto/s5p-sss.c static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
req              2102 drivers/crypto/s5p-sss.c 	return s5p_aes_crypt(req, FLAGS_AES_CBC);
req              2105 drivers/crypto/s5p-sss.c static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
req              2107 drivers/crypto/s5p-sss.c 	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
req              2110 drivers/crypto/s5p-sss.c static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
req              2112 drivers/crypto/s5p-sss.c 	return s5p_aes_crypt(req, FLAGS_AES_CTR);
req               550 drivers/crypto/sahara.c static int sahara_aes_process(struct ablkcipher_request *req)
req               561 drivers/crypto/sahara.c 		req->nbytes, req->src, req->dst);
req               564 drivers/crypto/sahara.c 	dev->total = req->nbytes;
req               565 drivers/crypto/sahara.c 	dev->in_sg = req->src;
req               566 drivers/crypto/sahara.c 	dev->out_sg = req->dst;
req               568 drivers/crypto/sahara.c 	rctx = ablkcipher_request_ctx(req);
req               569 drivers/crypto/sahara.c 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
req               573 drivers/crypto/sahara.c 	if ((dev->flags & FLAGS_CBC) && req->info)
req               574 drivers/crypto/sahara.c 		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
req               633 drivers/crypto/sahara.c static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
req               635 drivers/crypto/sahara.c 	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
req               640 drivers/crypto/sahara.c 		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
req               642 drivers/crypto/sahara.c 	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
req               651 drivers/crypto/sahara.c 	err = ablkcipher_enqueue_request(&dev->queue, req);
req               659 drivers/crypto/sahara.c static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
req               662 drivers/crypto/sahara.c 		crypto_ablkcipher_reqtfm(req));
req               669 drivers/crypto/sahara.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               671 drivers/crypto/sahara.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               672 drivers/crypto/sahara.c 					   req->nbytes, req->info);
req               678 drivers/crypto/sahara.c 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
req               681 drivers/crypto/sahara.c static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
req               684 drivers/crypto/sahara.c 		crypto_ablkcipher_reqtfm(req));
req               691 drivers/crypto/sahara.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               693 drivers/crypto/sahara.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               694 drivers/crypto/sahara.c 					   req->nbytes, req->info);
req               700 drivers/crypto/sahara.c 	return sahara_aes_crypt(req, 0);
req               703 drivers/crypto/sahara.c static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
req               706 drivers/crypto/sahara.c 		crypto_ablkcipher_reqtfm(req));
req               713 drivers/crypto/sahara.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               715 drivers/crypto/sahara.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               716 drivers/crypto/sahara.c 					   req->nbytes, req->info);
req               722 drivers/crypto/sahara.c 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
req               725 drivers/crypto/sahara.c static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
req               728 drivers/crypto/sahara.c 		crypto_ablkcipher_reqtfm(req));
req               735 drivers/crypto/sahara.c 		skcipher_request_set_callback(subreq, req->base.flags,
req               737 drivers/crypto/sahara.c 		skcipher_request_set_crypt(subreq, req->src, req->dst,
req               738 drivers/crypto/sahara.c 					   req->nbytes, req->info);
req               744 drivers/crypto/sahara.c 	return sahara_aes_crypt(req, FLAGS_CBC);
req               836 drivers/crypto/sahara.c 						struct ahash_request *req,
req               889 drivers/crypto/sahara.c 						struct ahash_request *req,
req               924 drivers/crypto/sahara.c static int sahara_sha_prepare_request(struct ahash_request *req)
req               926 drivers/crypto/sahara.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               927 drivers/crypto/sahara.c 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
req               935 drivers/crypto/sahara.c 	len = rctx->buf_cnt + req->nbytes;
req               940 drivers/crypto/sahara.c 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
req               941 drivers/crypto/sahara.c 					 0, req->nbytes, 0);
req               942 drivers/crypto/sahara.c 		rctx->buf_cnt += req->nbytes;
req               954 drivers/crypto/sahara.c 		unsigned int offset = req->nbytes - hash_later;
req               956 drivers/crypto/sahara.c 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
req               961 drivers/crypto/sahara.c 	req->nbytes = req->nbytes - hash_later;
req               963 drivers/crypto/sahara.c 	sahara_walk_and_recalc(req->src, req->nbytes);
req               966 drivers/crypto/sahara.c 	if (rctx->buf_cnt && req->nbytes) {
req               970 drivers/crypto/sahara.c 		sg_chain(rctx->in_sg_chain, 2, req->src);
req               972 drivers/crypto/sahara.c 		rctx->total = req->nbytes + rctx->buf_cnt;
req               975 drivers/crypto/sahara.c 		req->src = rctx->in_sg_chain;
req               978 drivers/crypto/sahara.c 		if (req->src)
req               979 drivers/crypto/sahara.c 			rctx->in_sg = req->src;
req               987 drivers/crypto/sahara.c 		rctx->in_sg = req->src;
req               988 drivers/crypto/sahara.c 		rctx->total = req->nbytes;
req               989 drivers/crypto/sahara.c 		req->src = rctx->in_sg;
req               998 drivers/crypto/sahara.c static int sahara_sha_process(struct ahash_request *req)
req              1001 drivers/crypto/sahara.c 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
req              1005 drivers/crypto/sahara.c 	ret = sahara_sha_prepare_request(req);
req              1010 drivers/crypto/sahara.c 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
req              1016 drivers/crypto/sahara.c 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
req              1018 drivers/crypto/sahara.c 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
req              1042 drivers/crypto/sahara.c 	if (req->result)
req              1043 drivers/crypto/sahara.c 		memcpy(req->result, rctx->context, rctx->digest_size);
req              1069 drivers/crypto/sahara.c 				struct ahash_request *req =
req              1072 drivers/crypto/sahara.c 				ret = sahara_sha_process(req);
req              1074 drivers/crypto/sahara.c 				struct ablkcipher_request *req =
req              1077 drivers/crypto/sahara.c 				ret = sahara_aes_process(req);
req              1091 drivers/crypto/sahara.c static int sahara_sha_enqueue(struct ahash_request *req, int last)
req              1093 drivers/crypto/sahara.c 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
req              1097 drivers/crypto/sahara.c 	if (!req->nbytes && !last)
req              1108 drivers/crypto/sahara.c 	ret = crypto_enqueue_request(&dev->queue, &req->base);
req              1116 drivers/crypto/sahara.c static int sahara_sha_init(struct ahash_request *req)
req              1118 drivers/crypto/sahara.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1119 drivers/crypto/sahara.c 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
req              1142 drivers/crypto/sahara.c static int sahara_sha_update(struct ahash_request *req)
req              1144 drivers/crypto/sahara.c 	return sahara_sha_enqueue(req, 0);
req              1147 drivers/crypto/sahara.c static int sahara_sha_final(struct ahash_request *req)
req              1149 drivers/crypto/sahara.c 	req->nbytes = 0;
req              1150 drivers/crypto/sahara.c 	return sahara_sha_enqueue(req, 1);
req              1153 drivers/crypto/sahara.c static int sahara_sha_finup(struct ahash_request *req)
req              1155 drivers/crypto/sahara.c 	return sahara_sha_enqueue(req, 1);
req              1158 drivers/crypto/sahara.c static int sahara_sha_digest(struct ahash_request *req)
req              1160 drivers/crypto/sahara.c 	sahara_sha_init(req);
req              1162 drivers/crypto/sahara.c 	return sahara_sha_finup(req);
req              1165 drivers/crypto/sahara.c static int sahara_sha_export(struct ahash_request *req, void *out)
req              1167 drivers/crypto/sahara.c 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
req              1174 drivers/crypto/sahara.c static int sahara_sha_import(struct ahash_request *req, const void *in)
req              1176 drivers/crypto/sahara.c 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
req               140 drivers/crypto/stm32/stm32-cryp.c 	struct ablkcipher_request *req;
req               398 drivers/crypto/stm32/stm32-cryp.c 	struct ablkcipher_request *req = cryp->req;
req               399 drivers/crypto/stm32/stm32-cryp.c 	u32 *tmp = req->info;
req               619 drivers/crypto/stm32/stm32-cryp.c 		stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
req               670 drivers/crypto/stm32/stm32-cryp.c 		crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
req               717 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
req               720 drivers/crypto/stm32/stm32-cryp.c 			crypto_ablkcipher_reqtfm(req));
req               721 drivers/crypto/stm32/stm32-cryp.c 	struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
req               729 drivers/crypto/stm32/stm32-cryp.c 	return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
req               732 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
req               734 drivers/crypto/stm32/stm32-cryp.c 	struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               735 drivers/crypto/stm32/stm32-cryp.c 	struct stm32_cryp_reqctx *rctx = aead_request_ctx(req);
req               743 drivers/crypto/stm32/stm32-cryp.c 	return crypto_transfer_aead_request_to_engine(cryp->engine, req);
req               821 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
req               823 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
req               826 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
req               828 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
req               831 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
req               833 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
req               836 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
req               838 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
req               841 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
req               843 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
req               846 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
req               848 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
req               851 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_gcm_encrypt(struct aead_request *req)
req               853 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM | FLG_ENCRYPT);
req               856 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
req               858 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
req               861 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
req               863 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
req               866 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
req               868 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
req               871 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
req               873 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
req               876 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
req               878 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
req               881 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
req               883 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
req               886 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
req               888 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
req               891 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
req               893 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
req               896 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
req               898 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
req               901 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
req               903 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
req               906 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
req               908 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
req               911 drivers/crypto/stm32/stm32-cryp.c static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
req               919 drivers/crypto/stm32/stm32-cryp.c 	if (!req && !areq)
req               922 drivers/crypto/stm32/stm32-cryp.c 	ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
req               930 drivers/crypto/stm32/stm32-cryp.c 	rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
req               939 drivers/crypto/stm32/stm32-cryp.c 	if (req) {
req               940 drivers/crypto/stm32/stm32-cryp.c 		cryp->req = req;
req               942 drivers/crypto/stm32/stm32-cryp.c 		cryp->total_in = req->nbytes;
req               967 drivers/crypto/stm32/stm32-cryp.c 		cryp->req = NULL;
req               981 drivers/crypto/stm32/stm32-cryp.c 	cryp->in_sg = req ? req->src : areq->src;
req               982 drivers/crypto/stm32/stm32-cryp.c 	cryp->out_sg = req ? req->dst : areq->dst;
req              1019 drivers/crypto/stm32/stm32-cryp.c 	struct ablkcipher_request *req = container_of(areq,
req              1023 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_prepare_req(req, NULL);
req              1028 drivers/crypto/stm32/stm32-cryp.c 	struct ablkcipher_request *req = container_of(areq,
req              1032 drivers/crypto/stm32/stm32-cryp.c 			crypto_ablkcipher_reqtfm(req));
req              1043 drivers/crypto/stm32/stm32-cryp.c 	struct aead_request *req = container_of(areq, struct aead_request,
req              1046 drivers/crypto/stm32/stm32-cryp.c 	return stm32_cryp_prepare_req(NULL, req);
req              1051 drivers/crypto/stm32/stm32-cryp.c 	struct aead_request *req = container_of(areq, struct aead_request,
req              1053 drivers/crypto/stm32/stm32-cryp.c 	struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
req               170 drivers/crypto/stm32/stm32-hash.c 	struct ahash_request	*req;
req               225 drivers/crypto/stm32/stm32-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
req               252 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
req               253 drivers/crypto/stm32/stm32-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
req               380 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
req               478 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
req               479 drivers/crypto/stm32/stm32-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
req               542 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
req               548 drivers/crypto/stm32/stm32-hash.c 	rctx->sg = hdev->req->src;
req               549 drivers/crypto/stm32/stm32-hash.c 	rctx->total = hdev->req->nbytes;
req               653 drivers/crypto/stm32/stm32-hash.c static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
req               656 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               660 drivers/crypto/stm32/stm32-hash.c 	if (req->nbytes <= HASH_DMA_THRESHOLD)
req               663 drivers/crypto/stm32/stm32-hash.c 	if (sg_nents(req->src) > 1) {
req               666 drivers/crypto/stm32/stm32-hash.c 		for_each_sg(req->src, sg, sg_nents(req->src), i) {
req               673 drivers/crypto/stm32/stm32-hash.c 	if (req->src->offset % 4)
req               679 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_init(struct ahash_request *req)
req               681 drivers/crypto/stm32/stm32-hash.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               683 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               731 drivers/crypto/stm32/stm32-hash.c 	struct ahash_request *req = hdev->req;
req               732 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               747 drivers/crypto/stm32/stm32-hash.c static void stm32_hash_copy_hash(struct ahash_request *req)
req               749 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               775 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_finish(struct ahash_request *req)
req               777 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               779 drivers/crypto/stm32/stm32-hash.c 	if (!req->result)
req               782 drivers/crypto/stm32/stm32-hash.c 	memcpy(req->result, rctx->digest, rctx->digcnt);
req               787 drivers/crypto/stm32/stm32-hash.c static void stm32_hash_finish_req(struct ahash_request *req, int err)
req               789 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               793 drivers/crypto/stm32/stm32-hash.c 		stm32_hash_copy_hash(req);
req               794 drivers/crypto/stm32/stm32-hash.c 		err = stm32_hash_finish(req);
req               807 drivers/crypto/stm32/stm32-hash.c 	crypto_finalize_hash_request(hdev->engine, req, err);
req               830 drivers/crypto/stm32/stm32-hash.c 				   struct ahash_request *req)
req               832 drivers/crypto/stm32/stm32-hash.c 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
req               837 drivers/crypto/stm32/stm32-hash.c 	struct ahash_request *req = container_of(areq, struct ahash_request,
req               839 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               846 drivers/crypto/stm32/stm32-hash.c 	hdev->req = req;
req               848 drivers/crypto/stm32/stm32-hash.c 	rctx = ahash_request_ctx(req);
req               851 drivers/crypto/stm32/stm32-hash.c 		rctx->op, req->nbytes);
req               858 drivers/crypto/stm32/stm32-hash.c 	struct ahash_request *req = container_of(areq, struct ahash_request,
req               860 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               868 drivers/crypto/stm32/stm32-hash.c 	hdev->req = req;
req               870 drivers/crypto/stm32/stm32-hash.c 	rctx = ahash_request_ctx(req);
req               879 drivers/crypto/stm32/stm32-hash.c 		stm32_hash_finish_req(req, err);
req               884 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
req               886 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               887 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
req               892 drivers/crypto/stm32/stm32-hash.c 	return stm32_hash_handle_queue(hdev, req);
req               895 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_update(struct ahash_request *req)
req               897 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               899 drivers/crypto/stm32/stm32-hash.c 	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
req               902 drivers/crypto/stm32/stm32-hash.c 	rctx->total = req->nbytes;
req               903 drivers/crypto/stm32/stm32-hash.c 	rctx->sg = req->src;
req               911 drivers/crypto/stm32/stm32-hash.c 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
req               914 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_final(struct ahash_request *req)
req               916 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               920 drivers/crypto/stm32/stm32-hash.c 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
req               923 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_finup(struct ahash_request *req)
req               925 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               926 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               932 drivers/crypto/stm32/stm32-hash.c 	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
req               935 drivers/crypto/stm32/stm32-hash.c 	err1 = stm32_hash_update(req);
req               944 drivers/crypto/stm32/stm32-hash.c 	err2 = stm32_hash_final(req);
req               949 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_digest(struct ahash_request *req)
req               951 drivers/crypto/stm32/stm32-hash.c 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
req               954 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_export(struct ahash_request *req, void *out)
req               956 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               957 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req               987 drivers/crypto/stm32/stm32-hash.c static int stm32_hash_import(struct ahash_request *req, const void *in)
req               989 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
req               990 drivers/crypto/stm32/stm32-hash.c 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
req              1098 drivers/crypto/stm32/stm32-hash.c 	stm32_hash_finish_req(hdev->req, 0);
req              1050 drivers/crypto/talitos.c 	struct aead_request *req = context;
req              1051 drivers/crypto/talitos.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req              1058 drivers/crypto/talitos.c 	ipsec_esp_unmap(dev, edesc, req, false);
req              1070 drivers/crypto/talitos.c 	aead_request_complete(req, err);
req              1077 drivers/crypto/talitos.c 	struct aead_request *req = context;
req              1082 drivers/crypto/talitos.c 	ipsec_esp_unmap(dev, edesc, req, false);
req              1091 drivers/crypto/talitos.c 	aead_request_complete(req, err);
req              1434 drivers/crypto/talitos.c static int aead_encrypt(struct aead_request *req)
req              1436 drivers/crypto/talitos.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req              1441 drivers/crypto/talitos.c 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
req              1448 drivers/crypto/talitos.c 	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
req              1451 drivers/crypto/talitos.c static int aead_decrypt(struct aead_request *req)
req              1453 drivers/crypto/talitos.c 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
req              1461 drivers/crypto/talitos.c 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
req              1477 drivers/crypto/talitos.c 		return ipsec_esp(edesc, req, false,
req              1487 drivers/crypto/talitos.c 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
req              1488 drivers/crypto/talitos.c 			   req->assoclen + req->cryptlen - authsize);
req              1490 drivers/crypto/talitos.c 	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
req              2190 drivers/crypto/talitos.c 	struct ahash_request *req;
req              2196 drivers/crypto/talitos.c 	req = ahash_request_alloc(tfm, GFP_KERNEL);
req              2197 drivers/crypto/talitos.c 	if (!req)
req              2202 drivers/crypto/talitos.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              2207 drivers/crypto/talitos.c 	ahash_request_set_crypt(req, sg, hash, keylen);
req              2208 drivers/crypto/talitos.c 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
req              2210 drivers/crypto/talitos.c 	ahash_request_free(req);
req               390 drivers/crypto/ux500/hash/hash_alg.h int hash_hw_update(struct ahash_request *req);
req               549 drivers/crypto/ux500/hash/hash_core.c static int hash_init(struct ahash_request *req)
req               551 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               553 drivers/crypto/ux500/hash/hash_core.c 	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
req               561 drivers/crypto/ux500/hash/hash_core.c 		if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
req               567 drivers/crypto/ux500/hash/hash_core.c 			if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
req               568 drivers/crypto/ux500/hash/hash_core.c 			    hash_dma_valid_data(req->src, req->nbytes)) {
req               853 drivers/crypto/ux500/hash/hash_core.c static int hash_dma_final(struct ahash_request *req)
req               856 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               858 drivers/crypto/ux500/hash/hash_core.c 	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
req               906 drivers/crypto/ux500/hash/hash_core.c 		HASH_SET_NBLW((req->nbytes * 8) % 32);
req               911 drivers/crypto/ux500/hash/hash_core.c 	ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
req               919 drivers/crypto/ux500/hash/hash_core.c 	bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
req               920 drivers/crypto/ux500/hash/hash_core.c 	if (bytes_written != req->nbytes) {
req               943 drivers/crypto/ux500/hash/hash_core.c 	memcpy(req->result, digest, ctx->digestsize);
req               960 drivers/crypto/ux500/hash/hash_core.c static int hash_hw_final(struct ahash_request *req)
req               963 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               965 drivers/crypto/ux500/hash/hash_core.c 	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
req               984 drivers/crypto/ux500/hash/hash_core.c 	} else if (req->nbytes == 0 && ctx->keylen == 0) {
req               996 drivers/crypto/ux500/hash/hash_core.c 			memcpy(req->result, &zero_hash[0], ctx->digestsize);
req              1011 drivers/crypto/ux500/hash/hash_core.c 	} else if (req->nbytes == 0 && ctx->keylen > 0) {
req              1045 drivers/crypto/ux500/hash/hash_core.c 	memcpy(req->result, digest, ctx->digestsize);
req              1064 drivers/crypto/ux500/hash/hash_core.c int hash_hw_update(struct ahash_request *req)
req              1071 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1073 drivers/crypto/ux500/hash/hash_core.c 	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1075 drivers/crypto/ux500/hash/hash_core.c 	int msg_length = crypto_hash_walk_first(req, &walk);
req              1294 drivers/crypto/ux500/hash/hash_core.c static int ahash_update(struct ahash_request *req)
req              1297 drivers/crypto/ux500/hash/hash_core.c 	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1300 drivers/crypto/ux500/hash/hash_core.c 		ret = hash_hw_update(req);
req              1314 drivers/crypto/ux500/hash/hash_core.c static int ahash_final(struct ahash_request *req)
req              1317 drivers/crypto/ux500/hash/hash_core.c 	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
req              1319 drivers/crypto/ux500/hash/hash_core.c 	pr_debug("%s: data size: %d\n", __func__, req->nbytes);
req              1322 drivers/crypto/ux500/hash/hash_core.c 		ret = hash_dma_final(req);
req              1324 drivers/crypto/ux500/hash/hash_core.c 		ret = hash_hw_final(req);
req              1353 drivers/crypto/ux500/hash/hash_core.c static int ahash_sha1_init(struct ahash_request *req)
req              1355 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1363 drivers/crypto/ux500/hash/hash_core.c 	return hash_init(req);
req              1366 drivers/crypto/ux500/hash/hash_core.c static int ahash_sha256_init(struct ahash_request *req)
req              1368 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1376 drivers/crypto/ux500/hash/hash_core.c 	return hash_init(req);
req              1379 drivers/crypto/ux500/hash/hash_core.c static int ahash_sha1_digest(struct ahash_request *req)
req              1383 drivers/crypto/ux500/hash/hash_core.c 	ret1 = ahash_sha1_init(req);
req              1387 drivers/crypto/ux500/hash/hash_core.c 	ret1 = ahash_update(req);
req              1388 drivers/crypto/ux500/hash/hash_core.c 	ret2 = ahash_final(req);
req              1394 drivers/crypto/ux500/hash/hash_core.c static int ahash_sha256_digest(struct ahash_request *req)
req              1398 drivers/crypto/ux500/hash/hash_core.c 	ret1 = ahash_sha256_init(req);
req              1402 drivers/crypto/ux500/hash/hash_core.c 	ret1 = ahash_update(req);
req              1403 drivers/crypto/ux500/hash/hash_core.c 	ret2 = ahash_final(req);
req              1409 drivers/crypto/ux500/hash/hash_core.c static int ahash_noimport(struct ahash_request *req, const void *in)
req              1414 drivers/crypto/ux500/hash/hash_core.c static int ahash_noexport(struct ahash_request *req, void *out)
req              1419 drivers/crypto/ux500/hash/hash_core.c static int hmac_sha1_init(struct ahash_request *req)
req              1421 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1429 drivers/crypto/ux500/hash/hash_core.c 	return hash_init(req);
req              1432 drivers/crypto/ux500/hash/hash_core.c static int hmac_sha256_init(struct ahash_request *req)
req              1434 drivers/crypto/ux500/hash/hash_core.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req              1442 drivers/crypto/ux500/hash/hash_core.c 	return hash_init(req);
req              1445 drivers/crypto/ux500/hash/hash_core.c static int hmac_sha1_digest(struct ahash_request *req)
req              1449 drivers/crypto/ux500/hash/hash_core.c 	ret1 = hmac_sha1_init(req);
req              1453 drivers/crypto/ux500/hash/hash_core.c 	ret1 = ahash_update(req);
req              1454 drivers/crypto/ux500/hash/hash_core.c 	ret2 = ahash_final(req);
req              1460 drivers/crypto/ux500/hash/hash_core.c static int hmac_sha256_digest(struct ahash_request *req)
req              1464 drivers/crypto/ux500/hash/hash_core.c 	ret1 = hmac_sha256_init(req);
req              1468 drivers/crypto/ux500/hash/hash_core.c 	ret1 = ahash_update(req);
req              1469 drivers/crypto/ux500/hash/hash_core.c 	ret2 = ahash_final(req);
req                54 drivers/crypto/virtio/virtio_crypto_algs.c 	struct ablkcipher_request *req,
req               343 drivers/crypto/virtio/virtio_crypto_algs.c 		struct ablkcipher_request *req,
req               346 drivers/crypto/virtio/virtio_crypto_algs.c 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
req               362 drivers/crypto/virtio/virtio_crypto_algs.c 	src_nents = sg_nents_for_len(req->src, req->nbytes);
req               368 drivers/crypto/virtio/virtio_crypto_algs.c 	dst_nents = sg_nents(req->dst);
req               404 drivers/crypto/virtio/virtio_crypto_algs.c 			cpu_to_le32(req->nbytes);
req               406 drivers/crypto/virtio/virtio_crypto_algs.c 	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
req               413 drivers/crypto/virtio/virtio_crypto_algs.c 	dst_len = min_t(unsigned int, req->nbytes, dst_len);
req               415 drivers/crypto/virtio/virtio_crypto_algs.c 			req->nbytes, dst_len);
req               417 drivers/crypto/virtio/virtio_crypto_algs.c 	if (unlikely(req->nbytes + dst_len + ivsize +
req               443 drivers/crypto/virtio/virtio_crypto_algs.c 	memcpy(iv, req->info, ivsize);
req               445 drivers/crypto/virtio/virtio_crypto_algs.c 		scatterwalk_map_and_copy(req->info, req->src,
req               446 drivers/crypto/virtio/virtio_crypto_algs.c 					 req->nbytes - AES_BLOCK_SIZE,
req               454 drivers/crypto/virtio/virtio_crypto_algs.c 	for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
req               458 drivers/crypto/virtio/virtio_crypto_algs.c 	for (sg = req->dst; sg; sg = sg_next(sg))
req               485 drivers/crypto/virtio/virtio_crypto_algs.c static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
req               487 drivers/crypto/virtio/virtio_crypto_algs.c 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
req               490 drivers/crypto/virtio/virtio_crypto_algs.c 				ablkcipher_request_ctx(req);
req               496 drivers/crypto/virtio/virtio_crypto_algs.c 	if (!req->nbytes)
req               498 drivers/crypto/virtio/virtio_crypto_algs.c 	if (req->nbytes % AES_BLOCK_SIZE)
req               504 drivers/crypto/virtio/virtio_crypto_algs.c 	vc_sym_req->ablkcipher_req = req;
req               507 drivers/crypto/virtio/virtio_crypto_algs.c 	return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
req               510 drivers/crypto/virtio/virtio_crypto_algs.c static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
req               512 drivers/crypto/virtio/virtio_crypto_algs.c 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
req               515 drivers/crypto/virtio/virtio_crypto_algs.c 				ablkcipher_request_ctx(req);
req               521 drivers/crypto/virtio/virtio_crypto_algs.c 	if (!req->nbytes)
req               523 drivers/crypto/virtio/virtio_crypto_algs.c 	if (req->nbytes % AES_BLOCK_SIZE)
req               529 drivers/crypto/virtio/virtio_crypto_algs.c 	vc_sym_req->ablkcipher_req = req;
req               532 drivers/crypto/virtio/virtio_crypto_algs.c 	return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
req               564 drivers/crypto/virtio/virtio_crypto_algs.c 	struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
req               566 drivers/crypto/virtio/virtio_crypto_algs.c 				ablkcipher_request_ctx(req);
req               571 drivers/crypto/virtio/virtio_crypto_algs.c 	ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
req               582 drivers/crypto/virtio/virtio_crypto_algs.c 	struct ablkcipher_request *req,
req               586 drivers/crypto/virtio/virtio_crypto_algs.c 		scatterwalk_map_and_copy(req->info, req->dst,
req               587 drivers/crypto/virtio/virtio_crypto_algs.c 					 req->nbytes - AES_BLOCK_SIZE,
req               593 drivers/crypto/virtio/virtio_crypto_algs.c 					   req, err);
req                71 drivers/crypto/vmx/aes_cbc.c static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
req                73 drivers/crypto/vmx/aes_cbc.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                80 drivers/crypto/vmx/aes_cbc.c 		struct skcipher_request *subreq = skcipher_request_ctx(req);
req                82 drivers/crypto/vmx/aes_cbc.c 		*subreq = *req;
req                88 drivers/crypto/vmx/aes_cbc.c 	ret = skcipher_walk_virt(&walk, req, false);
req               107 drivers/crypto/vmx/aes_cbc.c static int p8_aes_cbc_encrypt(struct skcipher_request *req)
req               109 drivers/crypto/vmx/aes_cbc.c 	return p8_aes_cbc_crypt(req, 1);
req               112 drivers/crypto/vmx/aes_cbc.c static int p8_aes_cbc_decrypt(struct skcipher_request *req)
req               114 drivers/crypto/vmx/aes_cbc.c 	return p8_aes_cbc_crypt(req, 0);
req                90 drivers/crypto/vmx/aes_ctr.c static int p8_aes_ctr_crypt(struct skcipher_request *req)
req                92 drivers/crypto/vmx/aes_ctr.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                99 drivers/crypto/vmx/aes_ctr.c 		struct skcipher_request *subreq = skcipher_request_ctx(req);
req               101 drivers/crypto/vmx/aes_ctr.c 		*subreq = *req;
req               106 drivers/crypto/vmx/aes_ctr.c 	ret = skcipher_walk_virt(&walk, req, false);
req                78 drivers/crypto/vmx/aes_xts.c static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
req                80 drivers/crypto/vmx/aes_xts.c 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                87 drivers/crypto/vmx/aes_xts.c 	if (req->cryptlen < AES_BLOCK_SIZE)
req                90 drivers/crypto/vmx/aes_xts.c 	if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
req                91 drivers/crypto/vmx/aes_xts.c 		struct skcipher_request *subreq = skcipher_request_ctx(req);
req                93 drivers/crypto/vmx/aes_xts.c 		*subreq = *req;
req                99 drivers/crypto/vmx/aes_xts.c 	ret = skcipher_walk_virt(&walk, req, false);
req               136 drivers/crypto/vmx/aes_xts.c static int p8_aes_xts_encrypt(struct skcipher_request *req)
req               138 drivers/crypto/vmx/aes_xts.c 	return p8_aes_xts_crypt(req, 1);
req               141 drivers/crypto/vmx/aes_xts.c static int p8_aes_xts_decrypt(struct skcipher_request *req)
req               143 drivers/crypto/vmx/aes_xts.c 	return p8_aes_xts_crypt(req, 0);
req               207 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req               210 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(req, &sba->reqs_free_list, node) {
req               211 drivers/dma/bcm-sba-raid.c 		if (async_tx_test_ack(&req->tx)) {
req               212 drivers/dma/bcm-sba-raid.c 			list_move_tail(&req->node, &sba->reqs_alloc_list);
req               230 drivers/dma/bcm-sba-raid.c 	req->flags = SBA_REQUEST_STATE_ALLOCED;
req               231 drivers/dma/bcm-sba-raid.c 	req->first = req;
req               232 drivers/dma/bcm-sba-raid.c 	INIT_LIST_HEAD(&req->next);
req               233 drivers/dma/bcm-sba-raid.c 	atomic_set(&req->next_pending_count, 1);
req               235 drivers/dma/bcm-sba-raid.c 	dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
req               236 drivers/dma/bcm-sba-raid.c 	async_tx_ack(&req->tx);
req               238 drivers/dma/bcm-sba-raid.c 	return req;
req               243 drivers/dma/bcm-sba-raid.c 				 struct sba_request *req)
req               246 drivers/dma/bcm-sba-raid.c 	req->flags &= ~SBA_REQUEST_STATE_MASK;
req               247 drivers/dma/bcm-sba-raid.c 	req->flags |= SBA_REQUEST_STATE_PENDING;
req               248 drivers/dma/bcm-sba-raid.c 	list_move_tail(&req->node, &sba->reqs_pending_list);
req               255 drivers/dma/bcm-sba-raid.c 				struct sba_request *req)
req               262 drivers/dma/bcm-sba-raid.c 	req->flags &= ~SBA_REQUEST_STATE_MASK;
req               263 drivers/dma/bcm-sba-raid.c 	req->flags |= SBA_REQUEST_STATE_ACTIVE;
req               264 drivers/dma/bcm-sba-raid.c 	list_move_tail(&req->node, &sba->reqs_active_list);
req               265 drivers/dma/bcm-sba-raid.c 	if (req->flags & SBA_REQUEST_FENCE)
req               272 drivers/dma/bcm-sba-raid.c 			       struct sba_request *req)
req               275 drivers/dma/bcm-sba-raid.c 	req->flags &= ~SBA_REQUEST_STATE_MASK;
req               276 drivers/dma/bcm-sba-raid.c 	req->flags |= SBA_REQUEST_STATE_ABORTED;
req               277 drivers/dma/bcm-sba-raid.c 	list_move_tail(&req->node, &sba->reqs_aborted_list);
req               284 drivers/dma/bcm-sba-raid.c 			      struct sba_request *req)
req               287 drivers/dma/bcm-sba-raid.c 	req->flags &= ~SBA_REQUEST_STATE_MASK;
req               288 drivers/dma/bcm-sba-raid.c 	req->flags |= SBA_REQUEST_STATE_FREE;
req               289 drivers/dma/bcm-sba-raid.c 	list_move_tail(&req->node, &sba->reqs_free_list);
req               294 drivers/dma/bcm-sba-raid.c static void sba_free_chained_requests(struct sba_request *req)
req               298 drivers/dma/bcm-sba-raid.c 	struct sba_device *sba = req->sba;
req               302 drivers/dma/bcm-sba-raid.c 	_sba_free_request(sba, req);
req               303 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(nreq, &req->next, next)
req               310 drivers/dma/bcm-sba-raid.c 			      struct sba_request *req)
req               313 drivers/dma/bcm-sba-raid.c 	struct sba_device *sba = req->sba;
req               317 drivers/dma/bcm-sba-raid.c 	list_add_tail(&req->next, &first->next);
req               318 drivers/dma/bcm-sba-raid.c 	req->first = first;
req               327 drivers/dma/bcm-sba-raid.c 	struct sba_request *req, *req1;
req               332 drivers/dma/bcm-sba-raid.c 	list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
req               333 drivers/dma/bcm-sba-raid.c 		_sba_free_request(sba, req);
req               336 drivers/dma/bcm-sba-raid.c 	list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
req               337 drivers/dma/bcm-sba-raid.c 		_sba_abort_request(sba, req);
req               350 drivers/dma/bcm-sba-raid.c 	struct sba_request *req, *req1;
req               355 drivers/dma/bcm-sba-raid.c 	list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
req               356 drivers/dma/bcm-sba-raid.c 		_sba_free_request(sba, req);
req               362 drivers/dma/bcm-sba-raid.c 				 struct sba_request *req)
req               367 drivers/dma/bcm-sba-raid.c 	req->msg.error = 0;
req               368 drivers/dma/bcm-sba-raid.c 	ret = mbox_send_message(sba->mchan, &req->msg);
req               375 drivers/dma/bcm-sba-raid.c 	ret = req->msg.error;
req               391 drivers/dma/bcm-sba-raid.c 	struct sba_request *req;
req               397 drivers/dma/bcm-sba-raid.c 		req = list_first_entry(&sba->reqs_pending_list,
req               401 drivers/dma/bcm-sba-raid.c 		if (!_sba_active_request(sba, req))
req               405 drivers/dma/bcm-sba-raid.c 		ret = sba_send_mbox_request(sba, req);
req               407 drivers/dma/bcm-sba-raid.c 			_sba_pending_request(sba, req);
req               416 drivers/dma/bcm-sba-raid.c 					 struct sba_request *req)
req               420 drivers/dma/bcm-sba-raid.c 	struct sba_request *nreq, *first = req->first;
req               460 drivers/dma/bcm-sba-raid.c 	struct sba_request *req;
req               466 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(req, &sba->reqs_free_list, node)
req               467 drivers/dma/bcm-sba-raid.c 		if (async_tx_test_ack(&req->tx))
req               470 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(req, &sba->reqs_alloc_list, node)
req               473 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(req, &sba->reqs_pending_list, node)
req               476 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(req, &sba->reqs_active_list, node)
req               479 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(req, &sba->reqs_aborted_list, node)
req               528 drivers/dma/bcm-sba-raid.c 	struct sba_request *req, *nreq;
req               534 drivers/dma/bcm-sba-raid.c 	req = to_sba_request(tx);
req               539 drivers/dma/bcm-sba-raid.c 	_sba_pending_request(sba, req);
req               540 drivers/dma/bcm-sba-raid.c 	list_for_each_entry(nreq, &req->next, next)
req               563 drivers/dma/bcm-sba-raid.c static void sba_fillup_interrupt_msg(struct sba_request *req,
req               569 drivers/dma/bcm-sba-raid.c 	dma_addr_t resp_dma = req->tx.phys;
req               575 drivers/dma/bcm-sba-raid.c 	cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
req               586 drivers/dma/bcm-sba-raid.c 	cmdsp->data_len = req->sba->hw_resp_size;
req               592 drivers/dma/bcm-sba-raid.c 	cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
req               604 drivers/dma/bcm-sba-raid.c 	if (req->sba->hw_resp_size) {
req               607 drivers/dma/bcm-sba-raid.c 		cmdsp->resp_len = req->sba->hw_resp_size;
req               611 drivers/dma/bcm-sba-raid.c 	cmdsp->data_len = req->sba->hw_resp_size;
req               618 drivers/dma/bcm-sba-raid.c 	msg->ctx = req;
req               625 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req               629 drivers/dma/bcm-sba-raid.c 	req = sba_alloc_request(sba);
req               630 drivers/dma/bcm-sba-raid.c 	if (!req)
req               637 drivers/dma/bcm-sba-raid.c 	req->flags |= SBA_REQUEST_FENCE;
req               640 drivers/dma/bcm-sba-raid.c 	sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
req               643 drivers/dma/bcm-sba-raid.c 	req->tx.flags = flags;
req               644 drivers/dma/bcm-sba-raid.c 	req->tx.cookie = -EBUSY;
req               646 drivers/dma/bcm-sba-raid.c 	return &req->tx;
req               649 drivers/dma/bcm-sba-raid.c static void sba_fillup_memcpy_msg(struct sba_request *req,
req               657 drivers/dma/bcm-sba-raid.c 	dma_addr_t resp_dma = req->tx.phys;
req               692 drivers/dma/bcm-sba-raid.c 	if (req->sba->hw_resp_size) {
req               695 drivers/dma/bcm-sba-raid.c 		cmdsp->resp_len = req->sba->hw_resp_size;
req               706 drivers/dma/bcm-sba-raid.c 	msg->ctx = req;
req               715 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req               718 drivers/dma/bcm-sba-raid.c 	req = sba_alloc_request(sba);
req               719 drivers/dma/bcm-sba-raid.c 	if (!req)
req               722 drivers/dma/bcm-sba-raid.c 		req->flags |= SBA_REQUEST_FENCE;
req               725 drivers/dma/bcm-sba-raid.c 	sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
req               729 drivers/dma/bcm-sba-raid.c 	req->tx.flags = flags;
req               730 drivers/dma/bcm-sba-raid.c 	req->tx.cookie = -EBUSY;
req               732 drivers/dma/bcm-sba-raid.c 	return req;
req               742 drivers/dma/bcm-sba-raid.c 	struct sba_request *first = NULL, *req;
req               748 drivers/dma/bcm-sba-raid.c 		req = sba_prep_dma_memcpy_req(sba, off, dst, src,
req               750 drivers/dma/bcm-sba-raid.c 		if (!req) {
req               757 drivers/dma/bcm-sba-raid.c 			sba_chain_request(first, req);
req               759 drivers/dma/bcm-sba-raid.c 			first = req;
req               768 drivers/dma/bcm-sba-raid.c static void sba_fillup_xor_msg(struct sba_request *req,
req               777 drivers/dma/bcm-sba-raid.c 	dma_addr_t resp_dma = req->tx.phys;
req               831 drivers/dma/bcm-sba-raid.c 	if (req->sba->hw_resp_size) {
req               834 drivers/dma/bcm-sba-raid.c 		cmdsp->resp_len = req->sba->hw_resp_size;
req               845 drivers/dma/bcm-sba-raid.c 	msg->ctx = req;
req               854 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req               857 drivers/dma/bcm-sba-raid.c 	req = sba_alloc_request(sba);
req               858 drivers/dma/bcm-sba-raid.c 	if (!req)
req               861 drivers/dma/bcm-sba-raid.c 		req->flags |= SBA_REQUEST_FENCE;
req               864 drivers/dma/bcm-sba-raid.c 	sba_fillup_xor_msg(req, req->cmds, &req->msg,
req               868 drivers/dma/bcm-sba-raid.c 	req->tx.flags = flags;
req               869 drivers/dma/bcm-sba-raid.c 	req->tx.cookie = -EBUSY;
req               871 drivers/dma/bcm-sba-raid.c 	return req;
req               881 drivers/dma/bcm-sba-raid.c 	struct sba_request *first = NULL, *req;
req               891 drivers/dma/bcm-sba-raid.c 		req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
req               893 drivers/dma/bcm-sba-raid.c 		if (!req) {
req               900 drivers/dma/bcm-sba-raid.c 			sba_chain_request(first, req);
req               902 drivers/dma/bcm-sba-raid.c 			first = req;
req               911 drivers/dma/bcm-sba-raid.c static void sba_fillup_pq_msg(struct sba_request *req,
req               922 drivers/dma/bcm-sba-raid.c 	dma_addr_t resp_dma = req->tx.phys;
req              1014 drivers/dma/bcm-sba-raid.c 		if (req->sba->hw_resp_size) {
req              1017 drivers/dma/bcm-sba-raid.c 			cmdsp->resp_len = req->sba->hw_resp_size;
req              1041 drivers/dma/bcm-sba-raid.c 		if (req->sba->hw_resp_size) {
req              1044 drivers/dma/bcm-sba-raid.c 			cmdsp->resp_len = req->sba->hw_resp_size;
req              1056 drivers/dma/bcm-sba-raid.c 	msg->ctx = req;
req              1065 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req              1068 drivers/dma/bcm-sba-raid.c 	req = sba_alloc_request(sba);
req              1069 drivers/dma/bcm-sba-raid.c 	if (!req)
req              1072 drivers/dma/bcm-sba-raid.c 		req->flags |= SBA_REQUEST_FENCE;
req              1075 drivers/dma/bcm-sba-raid.c 	sba_fillup_pq_msg(req, dmaf_continue(flags),
req              1076 drivers/dma/bcm-sba-raid.c 			  req->cmds, &req->msg,
req              1080 drivers/dma/bcm-sba-raid.c 	req->tx.flags = flags;
req              1081 drivers/dma/bcm-sba-raid.c 	req->tx.cookie = -EBUSY;
req              1083 drivers/dma/bcm-sba-raid.c 	return req;
req              1086 drivers/dma/bcm-sba-raid.c static void sba_fillup_pq_single_msg(struct sba_request *req,
req              1097 drivers/dma/bcm-sba-raid.c 	dma_addr_t resp_dma = req->tx.phys;
req              1174 drivers/dma/bcm-sba-raid.c 	if (req->sba->hw_resp_size) {
req              1177 drivers/dma/bcm-sba-raid.c 		cmdsp->resp_len = req->sba->hw_resp_size;
req              1202 drivers/dma/bcm-sba-raid.c 	pos = (dpos < req->sba->max_pq_coefs) ?
req              1203 drivers/dma/bcm-sba-raid.c 		dpos : (req->sba->max_pq_coefs - 1);
req              1231 drivers/dma/bcm-sba-raid.c 		pos = (dpos < req->sba->max_pq_coefs) ?
req              1232 drivers/dma/bcm-sba-raid.c 			dpos : (req->sba->max_pq_coefs - 1);
req              1295 drivers/dma/bcm-sba-raid.c 	if (req->sba->hw_resp_size) {
req              1298 drivers/dma/bcm-sba-raid.c 		cmdsp->resp_len = req->sba->hw_resp_size;
req              1310 drivers/dma/bcm-sba-raid.c 	msg->ctx = req;
req              1320 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req              1323 drivers/dma/bcm-sba-raid.c 	req = sba_alloc_request(sba);
req              1324 drivers/dma/bcm-sba-raid.c 	if (!req)
req              1327 drivers/dma/bcm-sba-raid.c 		req->flags |= SBA_REQUEST_FENCE;
req              1330 drivers/dma/bcm-sba-raid.c 	sba_fillup_pq_single_msg(req,  dmaf_continue(flags),
req              1331 drivers/dma/bcm-sba-raid.c 				 req->cmds, &req->msg, off, len,
req              1335 drivers/dma/bcm-sba-raid.c 	req->tx.flags = flags;
req              1336 drivers/dma/bcm-sba-raid.c 	req->tx.cookie = -EBUSY;
req              1338 drivers/dma/bcm-sba-raid.c 	return req;
req              1351 drivers/dma/bcm-sba-raid.c 	struct sba_request *first = NULL, *req;
req              1384 drivers/dma/bcm-sba-raid.c 				req = sba_prep_dma_pq_single_req(sba,
req              1387 drivers/dma/bcm-sba-raid.c 				if (!req)
req              1391 drivers/dma/bcm-sba-raid.c 					sba_chain_request(first, req);
req              1393 drivers/dma/bcm-sba-raid.c 					first = req;
req              1402 drivers/dma/bcm-sba-raid.c 				req = sba_prep_dma_pq_single_req(sba,
req              1405 drivers/dma/bcm-sba-raid.c 				if (!req)
req              1409 drivers/dma/bcm-sba-raid.c 					sba_chain_request(first, req);
req              1411 drivers/dma/bcm-sba-raid.c 					first = req;
req              1416 drivers/dma/bcm-sba-raid.c 			req = sba_prep_dma_pq_req(sba, off,
req              1419 drivers/dma/bcm-sba-raid.c 			if (!req)
req              1423 drivers/dma/bcm-sba-raid.c 				sba_chain_request(first, req);
req              1425 drivers/dma/bcm-sba-raid.c 				first = req;
req              1445 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = m->ctx;
req              1446 drivers/dma/bcm-sba-raid.c 	struct sba_device *sba = req->sba;
req              1454 drivers/dma/bcm-sba-raid.c 	sba_process_received_request(sba, req);
req              1474 drivers/dma/bcm-sba-raid.c 	struct sba_request *req = NULL;
req              1499 drivers/dma/bcm-sba-raid.c 		req = devm_kzalloc(sba->dev,
req              1500 drivers/dma/bcm-sba-raid.c 				   struct_size(req, cmds, sba->max_cmd_per_req),
req              1502 drivers/dma/bcm-sba-raid.c 		if (!req) {
req              1506 drivers/dma/bcm-sba-raid.c 		INIT_LIST_HEAD(&req->node);
req              1507 drivers/dma/bcm-sba-raid.c 		req->sba = sba;
req              1508 drivers/dma/bcm-sba-raid.c 		req->flags = SBA_REQUEST_STATE_FREE;
req              1509 drivers/dma/bcm-sba-raid.c 		INIT_LIST_HEAD(&req->next);
req              1510 drivers/dma/bcm-sba-raid.c 		atomic_set(&req->next_pending_count, 0);
req              1512 drivers/dma/bcm-sba-raid.c 			req->cmds[j].cmd = 0;
req              1513 drivers/dma/bcm-sba-raid.c 			req->cmds[j].cmd_dma = sba->cmds_base +
req              1515 drivers/dma/bcm-sba-raid.c 			req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
req              1517 drivers/dma/bcm-sba-raid.c 			req->cmds[j].flags = 0;
req              1519 drivers/dma/bcm-sba-raid.c 		memset(&req->msg, 0, sizeof(req->msg));
req              1520 drivers/dma/bcm-sba-raid.c 		dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
req              1521 drivers/dma/bcm-sba-raid.c 		async_tx_ack(&req->tx);
req              1522 drivers/dma/bcm-sba-raid.c 		req->tx.tx_submit = sba_tx_submit;
req              1523 drivers/dma/bcm-sba-raid.c 		req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
req              1524 drivers/dma/bcm-sba-raid.c 		list_add_tail(&req->node, &sba->reqs_free_list);
req               225 drivers/dma/ioat/dca.c 	u32 req;
req               233 drivers/dma/ioat/dca.c 		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
req               235 drivers/dma/ioat/dca.c 	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
req               378 drivers/dma/pl330.c 	struct _pl330_req req[2];
req               553 drivers/dma/pl330.c 	return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
req               996 drivers/dma/pl330.c 	struct _pl330_req *req;
req              1008 drivers/dma/pl330.c 	if (thrd->req[idx].desc != NULL) {
req              1009 drivers/dma/pl330.c 		req = &thrd->req[idx];
req              1012 drivers/dma/pl330.c 		if (thrd->req[idx].desc != NULL)
req              1013 drivers/dma/pl330.c 			req = &thrd->req[idx];
req              1015 drivers/dma/pl330.c 			req = NULL;
req              1019 drivers/dma/pl330.c 	if (!req)
req              1026 drivers/dma/pl330.c 	desc = req->desc;
req              1036 drivers/dma/pl330.c 	go.addr = req->mc_bus;
req              1404 drivers/dma/pl330.c 	struct _pl330_req *req = &thrd->req[index];
req              1405 drivers/dma/pl330.c 	u8 *buf = req->mc_cpu;
req              1408 drivers/dma/pl330.c 	PL330_DBGMC_START(req->mc_bus);
req              1515 drivers/dma/pl330.c 	idx = thrd->req[0].desc == NULL ? 0 : 1;
req              1534 drivers/dma/pl330.c 	thrd->req[idx].desc = desc;
req              1608 drivers/dma/pl330.c 			dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
req              1609 drivers/dma/pl330.c 			dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
req              1612 drivers/dma/pl330.c 			thrd->req[0].desc = NULL;
req              1613 drivers/dma/pl330.c 			thrd->req[1].desc = NULL;
req              1693 drivers/dma/pl330.c 			descdone = thrd->req[active].desc;
req              1694 drivers/dma/pl330.c 			thrd->req[active].desc = NULL;
req              1770 drivers/dma/pl330.c 				thrd->req[0].desc = NULL;
req              1771 drivers/dma/pl330.c 				thrd->req[1].desc = NULL;
req              1800 drivers/dma/pl330.c 	dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
req              1801 drivers/dma/pl330.c 	dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
req              1856 drivers/dma/pl330.c 	thrd->req[0].mc_cpu = pl330->mcode_cpu
req              1858 drivers/dma/pl330.c 	thrd->req[0].mc_bus = pl330->mcode_bus
req              1860 drivers/dma/pl330.c 	thrd->req[0].desc = NULL;
req              1862 drivers/dma/pl330.c 	thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
req              1864 drivers/dma/pl330.c 	thrd->req[1].mc_bus = thrd->req[0].mc_bus
req              1866 drivers/dma/pl330.c 	thrd->req[1].desc = NULL;
req              2283 drivers/dma/pl330.c 	pch->thread->req[0].desc = NULL;
req              2284 drivers/dma/pl330.c 	pch->thread->req[1].desc = NULL;
req              2413 drivers/dma/pl330.c 		running = pch->thread->req[pch->thread->req_running].desc;
req              2415 drivers/dma/pl330.c 	last_enq = pch->thread->req[pch->thread->lstenq].desc;
req               166 drivers/dma/sprd-dma.c 	u32 req;
req               506 drivers/dma/sprd-dma.c 	writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
req              1664 drivers/dma/ti/omap-dma.c 		unsigned req = *(unsigned *)param;
req              1666 drivers/dma/ti/omap-dma.c 		if (req <= od->dma_requests) {
req              1667 drivers/dma/ti/omap-dma.c 			c->dma_sig = req;
req               100 drivers/extcon/extcon-usbc-cros-ec.c 	struct ec_params_usb_pd_power_info req;
req               104 drivers/extcon/extcon-usbc-cros-ec.c 	req.port = info->port_id;
req               106 drivers/extcon/extcon-usbc-cros-ec.c 				 &req, sizeof(req), &resp, sizeof(resp));
req               121 drivers/extcon/extcon-usbc-cros-ec.c 	struct ec_params_usb_pd_mux_info req;
req               125 drivers/extcon/extcon-usbc-cros-ec.c 	req.port = info->port_id;
req               127 drivers/extcon/extcon-usbc-cros-ec.c 				 &req, sizeof(req),
req               178 drivers/firewire/core-cdev.c 	} req;
req               708 drivers/firewire/core-cdev.c 		struct fw_cdev_event_request *req = &e->req.request;
req               713 drivers/firewire/core-cdev.c 		req->type	= FW_CDEV_EVENT_REQUEST;
req               714 drivers/firewire/core-cdev.c 		req->tcode	= tcode;
req               715 drivers/firewire/core-cdev.c 		req->offset	= offset;
req               716 drivers/firewire/core-cdev.c 		req->length	= length;
req               717 drivers/firewire/core-cdev.c 		req->handle	= r->resource.handle;
req               718 drivers/firewire/core-cdev.c 		req->closure	= handler->closure;
req               719 drivers/firewire/core-cdev.c 		event_size0	= sizeof(*req);
req               721 drivers/firewire/core-cdev.c 		struct fw_cdev_event_request2 *req = &e->req.request2;
req               723 drivers/firewire/core-cdev.c 		req->type	= FW_CDEV_EVENT_REQUEST2;
req               724 drivers/firewire/core-cdev.c 		req->tcode	= tcode;
req               725 drivers/firewire/core-cdev.c 		req->offset	= offset;
req               726 drivers/firewire/core-cdev.c 		req->source_node_id = source;
req               727 drivers/firewire/core-cdev.c 		req->destination_node_id = destination;
req               728 drivers/firewire/core-cdev.c 		req->card	= card->index;
req               729 drivers/firewire/core-cdev.c 		req->generation	= generation;
req               730 drivers/firewire/core-cdev.c 		req->length	= length;
req               731 drivers/firewire/core-cdev.c 		req->handle	= r->resource.handle;
req               732 drivers/firewire/core-cdev.c 		req->closure	= handler->closure;
req               733 drivers/firewire/core-cdev.c 		event_size0	= sizeof(*req);
req               737 drivers/firewire/core-cdev.c 		    &e->req, event_size0, r->data, length);
req                26 drivers/firmware/imx/imx-scu-irq.c 		} __packed req;
req                79 drivers/firmware/imx/imx-scu-irq.c 		msg.data.req.resource = mu_resource_id;
req                80 drivers/firmware/imx/imx-scu-irq.c 		msg.data.req.group = i;
req               435 drivers/firmware/qcom_scm-32.c int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
req               442 drivers/firmware/qcom_scm-32.c 		req, req_cnt * sizeof(*req), resp, sizeof(*resp));
req               562 drivers/firmware/qcom_scm-32.c 	} req;
req               566 drivers/firmware/qcom_scm-32.c 	req.state = cpu_to_le32(state);
req               567 drivers/firmware/qcom_scm-32.c 	req.id = cpu_to_le32(id);
req               570 drivers/firmware/qcom_scm-32.c 			    &req, sizeof(req), &scm_ret, sizeof(scm_ret));
req               215 drivers/firmware/qcom_scm-64.c int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
req               225 drivers/firmware/qcom_scm-64.c 	desc.args[0] = req[0].addr;
req               226 drivers/firmware/qcom_scm-64.c 	desc.args[1] = req[0].val;
req               227 drivers/firmware/qcom_scm-64.c 	desc.args[2] = req[1].addr;
req               228 drivers/firmware/qcom_scm-64.c 	desc.args[3] = req[1].val;
req               229 drivers/firmware/qcom_scm-64.c 	desc.args[4] = req[2].addr;
req               230 drivers/firmware/qcom_scm-64.c 	desc.args[5] = req[2].val;
req               231 drivers/firmware/qcom_scm-64.c 	desc.args[6] = req[3].addr;
req               232 drivers/firmware/qcom_scm-64.c 	desc.args[7] = req[3].val;
req               233 drivers/firmware/qcom_scm-64.c 	desc.args[8] = req[4].addr;
req               234 drivers/firmware/qcom_scm-64.c 	desc.args[9] = req[4].val;
req               161 drivers/firmware/qcom_scm.c int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
req               168 drivers/firmware/qcom_scm.c 	ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
req                41 drivers/firmware/qcom_scm.h 		struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
req               104 drivers/firmware/tegra/bpmp-debugfs.c 	struct mrq_debugfs_request req = {
req               117 drivers/firmware/tegra/bpmp-debugfs.c 			.data = &req,
req               118 drivers/firmware/tegra/bpmp-debugfs.c 			.size = sizeof(req),
req               140 drivers/firmware/tegra/bpmp-debugfs.c 	const struct mrq_debugfs_request req = {
req               152 drivers/firmware/tegra/bpmp-debugfs.c 			.data = &req,
req               153 drivers/firmware/tegra/bpmp-debugfs.c 			.size = sizeof(req),
req               163 drivers/firmware/tegra/bpmp-debugfs.c 	const struct mrq_debugfs_request req = {
req               174 drivers/firmware/tegra/bpmp-debugfs.c 			.data = &req,
req               175 drivers/firmware/tegra/bpmp-debugfs.c 			.size = sizeof(req),
req               504 drivers/firmware/tegra/bpmp.c 	struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
req               509 drivers/firmware/tegra/bpmp.c 			.data = &req,
req               510 drivers/firmware/tegra/bpmp.c 			.size = sizeof(req),
req               524 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_device_state *req;
req               540 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req               546 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
req               547 drivers/firmware/ti_sci.c 	req->id = id;
req               548 drivers/firmware/ti_sci.c 	req->state = state;
req               582 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_device_state *req;
req               601 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req               607 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
req               608 drivers/firmware/ti_sci.c 	req->id = id;
req               887 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_device_resets *req;
req               903 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req               909 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
req               910 drivers/firmware/ti_sci.c 	req->id = id;
req               911 drivers/firmware/ti_sci.c 	req->resets = reset_state;
req               962 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_clock_state *req;
req               978 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req               984 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
req               985 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req               987 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req               989 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req               990 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req               992 drivers/firmware/ti_sci.c 	req->request_state = state;
req              1027 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_clock_state *req;
req              1046 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1052 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
req              1053 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1055 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1057 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1058 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1259 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_clock_parent *req;
req              1275 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1281 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
req              1282 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1284 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1286 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1287 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1290 drivers/firmware/ti_sci.c 		req->parent_id = parent_id;
req              1292 drivers/firmware/ti_sci.c 		req->parent_id = 255;
req              1293 drivers/firmware/ti_sci.c 		req->parent_id_32 = parent_id;
req              1327 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_clock_parent *req;
req              1343 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1349 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
req              1350 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1352 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1354 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1355 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1397 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_clock_num_parents *req;
req              1413 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1419 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
req              1420 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1422 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1424 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1425 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1476 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_query_clock_freq *req;
req              1492 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1498 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
req              1499 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1501 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1503 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1504 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1506 drivers/firmware/ti_sci.c 	req->min_freq_hz = min_freq;
req              1507 drivers/firmware/ti_sci.c 	req->target_freq_hz = target_freq;
req              1508 drivers/firmware/ti_sci.c 	req->max_freq_hz = max_freq;
req              1552 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_clock_freq *req;
req              1568 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1574 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
req              1575 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1577 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1579 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1580 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1582 drivers/firmware/ti_sci.c 	req->min_freq_hz = min_freq;
req              1583 drivers/firmware/ti_sci.c 	req->target_freq_hz = target_freq;
req              1584 drivers/firmware/ti_sci.c 	req->max_freq_hz = max_freq;
req              1617 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_clock_freq *req;
req              1633 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1639 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
req              1640 drivers/firmware/ti_sci.c 	req->dev_id = dev_id;
req              1642 drivers/firmware/ti_sci.c 		req->clk_id = clk_id;
req              1644 drivers/firmware/ti_sci.c 		req->clk_id = 255;
req              1645 drivers/firmware/ti_sci.c 		req->clk_id_32 = clk_id;
req              1670 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_reboot *req;
req              1686 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1692 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
req              1759 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_resource_range *req;
req              1776 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1789 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
req              1790 drivers/firmware/ti_sci.c 	req->secondary_host = s_host;
req              1791 drivers/firmware/ti_sci.c 	req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
req              1792 drivers/firmware/ti_sci.c 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
req              1885 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_manage_irq *req;
req              1901 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              1907 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
req              1908 drivers/firmware/ti_sci.c 	req->valid_params = valid_params;
req              1909 drivers/firmware/ti_sci.c 	req->src_id = src_id;
req              1910 drivers/firmware/ti_sci.c 	req->src_index = src_index;
req              1911 drivers/firmware/ti_sci.c 	req->dst_id = dst_id;
req              1912 drivers/firmware/ti_sci.c 	req->dst_host_irq = dst_host_irq;
req              1913 drivers/firmware/ti_sci.c 	req->ia_id = ia_id;
req              1914 drivers/firmware/ti_sci.c 	req->vint = vint;
req              1915 drivers/firmware/ti_sci.c 	req->global_event = global_event;
req              1916 drivers/firmware/ti_sci.c 	req->vint_status_bit = vint_status_bit;
req              1917 drivers/firmware/ti_sci.c 	req->secondary_host = s_host;
req              2123 drivers/firmware/ti_sci.c 	struct ti_sci_msg_rm_ring_cfg_req *req;
req              2138 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2144 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
req              2145 drivers/firmware/ti_sci.c 	req->valid_params = valid_params;
req              2146 drivers/firmware/ti_sci.c 	req->nav_id = nav_id;
req              2147 drivers/firmware/ti_sci.c 	req->index = index;
req              2148 drivers/firmware/ti_sci.c 	req->addr_lo = addr_lo;
req              2149 drivers/firmware/ti_sci.c 	req->addr_hi = addr_hi;
req              2150 drivers/firmware/ti_sci.c 	req->count = count;
req              2151 drivers/firmware/ti_sci.c 	req->mode = mode;
req              2152 drivers/firmware/ti_sci.c 	req->size = size;
req              2153 drivers/firmware/ti_sci.c 	req->order_id = order_id;
req              2193 drivers/firmware/ti_sci.c 	struct ti_sci_msg_rm_ring_get_cfg_req *req;
req              2207 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2214 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
req              2215 drivers/firmware/ti_sci.c 	req->nav_id = nav_id;
req              2216 drivers/firmware/ti_sci.c 	req->index = index;
req              2262 drivers/firmware/ti_sci.c 	struct ti_sci_msg_psil_pair *req;
req              2279 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2285 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
req              2286 drivers/firmware/ti_sci.c 	req->nav_id = nav_id;
req              2287 drivers/firmware/ti_sci.c 	req->src_thread = src_thread;
req              2288 drivers/firmware/ti_sci.c 	req->dst_thread = dst_thread;
req              2318 drivers/firmware/ti_sci.c 	struct ti_sci_msg_psil_unpair *req;
req              2335 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2341 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
req              2342 drivers/firmware/ti_sci.c 	req->nav_id = nav_id;
req              2343 drivers/firmware/ti_sci.c 	req->src_thread = src_thread;
req              2344 drivers/firmware/ti_sci.c 	req->dst_thread = dst_thread;
req              2375 drivers/firmware/ti_sci.c 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
req              2390 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2396 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
req              2397 drivers/firmware/ti_sci.c 	req->valid_params = params->valid_params;
req              2398 drivers/firmware/ti_sci.c 	req->nav_id = params->nav_id;
req              2399 drivers/firmware/ti_sci.c 	req->index = params->index;
req              2400 drivers/firmware/ti_sci.c 	req->tx_pause_on_err = params->tx_pause_on_err;
req              2401 drivers/firmware/ti_sci.c 	req->tx_filt_einfo = params->tx_filt_einfo;
req              2402 drivers/firmware/ti_sci.c 	req->tx_filt_pswords = params->tx_filt_pswords;
req              2403 drivers/firmware/ti_sci.c 	req->tx_atype = params->tx_atype;
req              2404 drivers/firmware/ti_sci.c 	req->tx_chan_type = params->tx_chan_type;
req              2405 drivers/firmware/ti_sci.c 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
req              2406 drivers/firmware/ti_sci.c 	req->tx_fetch_size = params->tx_fetch_size;
req              2407 drivers/firmware/ti_sci.c 	req->tx_credit_count = params->tx_credit_count;
req              2408 drivers/firmware/ti_sci.c 	req->txcq_qnum = params->txcq_qnum;
req              2409 drivers/firmware/ti_sci.c 	req->tx_priority = params->tx_priority;
req              2410 drivers/firmware/ti_sci.c 	req->tx_qos = params->tx_qos;
req              2411 drivers/firmware/ti_sci.c 	req->tx_orderid = params->tx_orderid;
req              2412 drivers/firmware/ti_sci.c 	req->fdepth = params->fdepth;
req              2413 drivers/firmware/ti_sci.c 	req->tx_sched_priority = params->tx_sched_priority;
req              2414 drivers/firmware/ti_sci.c 	req->tx_burst_size = params->tx_burst_size;
req              2445 drivers/firmware/ti_sci.c 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
req              2460 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2466 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
req              2467 drivers/firmware/ti_sci.c 	req->valid_params = params->valid_params;
req              2468 drivers/firmware/ti_sci.c 	req->nav_id = params->nav_id;
req              2469 drivers/firmware/ti_sci.c 	req->index = params->index;
req              2470 drivers/firmware/ti_sci.c 	req->rx_fetch_size = params->rx_fetch_size;
req              2471 drivers/firmware/ti_sci.c 	req->rxcq_qnum = params->rxcq_qnum;
req              2472 drivers/firmware/ti_sci.c 	req->rx_priority = params->rx_priority;
req              2473 drivers/firmware/ti_sci.c 	req->rx_qos = params->rx_qos;
req              2474 drivers/firmware/ti_sci.c 	req->rx_orderid = params->rx_orderid;
req              2475 drivers/firmware/ti_sci.c 	req->rx_sched_priority = params->rx_sched_priority;
req              2476 drivers/firmware/ti_sci.c 	req->flowid_start = params->flowid_start;
req              2477 drivers/firmware/ti_sci.c 	req->flowid_cnt = params->flowid_cnt;
req              2478 drivers/firmware/ti_sci.c 	req->rx_pause_on_err = params->rx_pause_on_err;
req              2479 drivers/firmware/ti_sci.c 	req->rx_atype = params->rx_atype;
req              2480 drivers/firmware/ti_sci.c 	req->rx_chan_type = params->rx_chan_type;
req              2481 drivers/firmware/ti_sci.c 	req->rx_ignore_short = params->rx_ignore_short;
req              2482 drivers/firmware/ti_sci.c 	req->rx_ignore_long = params->rx_ignore_long;
req              2483 drivers/firmware/ti_sci.c 	req->rx_burst_size = params->rx_burst_size;
req              2514 drivers/firmware/ti_sci.c 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
req              2529 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2535 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
req              2536 drivers/firmware/ti_sci.c 	req->valid_params = params->valid_params;
req              2537 drivers/firmware/ti_sci.c 	req->nav_id = params->nav_id;
req              2538 drivers/firmware/ti_sci.c 	req->flow_index = params->flow_index;
req              2539 drivers/firmware/ti_sci.c 	req->rx_einfo_present = params->rx_einfo_present;
req              2540 drivers/firmware/ti_sci.c 	req->rx_psinfo_present = params->rx_psinfo_present;
req              2541 drivers/firmware/ti_sci.c 	req->rx_error_handling = params->rx_error_handling;
req              2542 drivers/firmware/ti_sci.c 	req->rx_desc_type = params->rx_desc_type;
req              2543 drivers/firmware/ti_sci.c 	req->rx_sop_offset = params->rx_sop_offset;
req              2544 drivers/firmware/ti_sci.c 	req->rx_dest_qnum = params->rx_dest_qnum;
req              2545 drivers/firmware/ti_sci.c 	req->rx_src_tag_hi = params->rx_src_tag_hi;
req              2546 drivers/firmware/ti_sci.c 	req->rx_src_tag_lo = params->rx_src_tag_lo;
req              2547 drivers/firmware/ti_sci.c 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
req              2548 drivers/firmware/ti_sci.c 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
req              2549 drivers/firmware/ti_sci.c 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
req              2550 drivers/firmware/ti_sci.c 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
req              2551 drivers/firmware/ti_sci.c 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
req              2552 drivers/firmware/ti_sci.c 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
req              2553 drivers/firmware/ti_sci.c 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
req              2554 drivers/firmware/ti_sci.c 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
req              2555 drivers/firmware/ti_sci.c 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
req              2556 drivers/firmware/ti_sci.c 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
req              2557 drivers/firmware/ti_sci.c 	req->rx_ps_location = params->rx_ps_location;
req              2584 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_proc_request *req;
req              2601 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2607 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
req              2608 drivers/firmware/ti_sci.c 	req->processor_id = proc_id;
req              2636 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_proc_release *req;
req              2653 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2659 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
req              2660 drivers/firmware/ti_sci.c 	req->processor_id = proc_id;
req              2691 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_proc_handover *req;
req              2708 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2714 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
req              2715 drivers/firmware/ti_sci.c 	req->processor_id = proc_id;
req              2716 drivers/firmware/ti_sci.c 	req->host_id = host_id;
req              2749 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_config *req;
req              2766 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2772 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
req              2773 drivers/firmware/ti_sci.c 	req->processor_id = proc_id;
req              2774 drivers/firmware/ti_sci.c 	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
req              2775 drivers/firmware/ti_sci.c 	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
req              2777 drivers/firmware/ti_sci.c 	req->config_flags_set = config_flags_set;
req              2778 drivers/firmware/ti_sci.c 	req->config_flags_clear = config_flags_clear;
req              2810 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_set_ctrl *req;
req              2827 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2833 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
req              2834 drivers/firmware/ti_sci.c 	req->processor_id = proc_id;
req              2835 drivers/firmware/ti_sci.c 	req->control_flags_set = control_flags_set;
req              2836 drivers/firmware/ti_sci.c 	req->control_flags_clear = control_flags_clear;
req              2866 drivers/firmware/ti_sci.c 	struct ti_sci_msg_req_get_status *req;
req              2882 drivers/firmware/ti_sci.c 				   sizeof(*req), sizeof(*resp));
req              2888 drivers/firmware/ti_sci.c 	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
req              2889 drivers/firmware/ti_sci.c 	req->processor_id = proc_id;
req                90 drivers/gpio/gpio-dln2.c 	struct dln2_gpio_pin req = {
req                94 drivers/gpio/gpio-dln2.c 	return dln2_transfer_tx(dln2->pdev, cmd, &req, sizeof(req));
req               100 drivers/gpio/gpio-dln2.c 	struct dln2_gpio_pin req = {
req               106 drivers/gpio/gpio-dln2.c 	ret = dln2_transfer(dln2->pdev, cmd, &req, sizeof(req), &rsp, &len);
req               109 drivers/gpio/gpio-dln2.c 	if (len < sizeof(rsp) || req.pin != rsp.pin)
req               138 drivers/gpio/gpio-dln2.c 	struct dln2_gpio_pin_val req = {
req               143 drivers/gpio/gpio-dln2.c 	return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
req               144 drivers/gpio/gpio-dln2.c 				sizeof(req));
req               153 drivers/gpio/gpio-dln2.c 	struct dln2_gpio_pin req = {
req               166 drivers/gpio/gpio-dln2.c 			    &req, sizeof(req), &rsp, &len);
req               169 drivers/gpio/gpio-dln2.c 	if (len < sizeof(rsp) || req.pin != rsp.pin) {
req               234 drivers/gpio/gpio-dln2.c 	struct dln2_gpio_pin_val req = {
req               241 drivers/gpio/gpio-dln2.c 			       &req, sizeof(req));
req               292 drivers/gpio/gpio-dln2.c 	} __packed req = {
req               299 drivers/gpio/gpio-dln2.c 				&req, sizeof(req));
req               372 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 					  struct atif_sbios_requests *req)
req               388 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 	memset(req, 0, sizeof(*req));
req               390 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 	size = min(sizeof(*req), size);
req               391 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 	memcpy(req, info->buffer.pointer, size);
req               392 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
req               394 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 	count = hweight32(req->pending);
req               437 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 		struct atif_sbios_requests req;
req               440 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 		count = amdgpu_atif_get_sbios_requests(atif, &req);
req               448 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 		if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
req               456 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 						 req.backlight_level);
req               458 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 				amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
req               466 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 		if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
req                59 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h 	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
req              4436 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
req              4441 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
req               205 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	u32 req = 0;
req               208 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
req               210 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req               211 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req               212 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req               213 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req               214 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req               215 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req               216 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
req               219 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	return req;
req               373 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
req               398 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 					    req, 1 << vmid);
req               435 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	u32 req = 0;
req               437 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
req               439 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req               440 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req               441 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req               442 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req               443 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req               444 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req               445 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
req               448 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	return req;
req               504 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		uint32_t req = hub->vm_inv_eng0_req + eng;
req               507 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
req               573 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
req               597 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 					    req, 1 << vmid);
req               118 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
req               141 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 			    MSGBUF_DATA, req);
req               164 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c         u32 req, val, size;
req               171 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c                 req = IDH_IRQ_GET_PP_SCLK;
req               174 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c                 req = IDH_IRQ_GET_PP_MCLK;
req               182 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c         xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
req               214 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c         u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
req               220 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c         xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
req               238 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 					enum idh_request req)
req               242 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
req               245 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	if (req == IDH_REQ_GPU_INIT_ACCESS ||
req               246 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 		req == IDH_REQ_GPU_FINI_ACCESS ||
req               247 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 		req == IDH_REQ_GPU_RESET_ACCESS) {
req               254 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
req               272 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	enum idh_request req;
req               274 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
req               275 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	return xgpu_ai_send_access_requests(adev, req);
req               281 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	enum idh_request req;
req               284 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
req               285 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 	r = xgpu_ai_send_access_requests(adev, req);
req               352 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 				      enum idh_request req)
req               358 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 			    MSGBUF_DATA, req);
req               470 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 	enum idh_request req;
req               472 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
req               473 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 	return xgpu_vi_send_access_requests(adev, req);
req               479 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 	enum idh_request req;
req               482 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
req               483 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 	r = xgpu_vi_send_access_requests(adev, req);
req               427 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 	struct i2c_request_transaction_data *req)
req               439 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 		switch (req->action) {
req               443 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 				req->address, req->length, req->data);
req               448 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 				req->address, req->length, req->data);
req               459 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 		(req->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) ||
req               460 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 		(req->action == DCE_I2C_TRANSACTION_ACTION_I2C_READ))
req               464 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 	req->status = result ?
req              1291 drivers/gpu/drm/bridge/tc358767.c 	u32 req, avail;
req              1298 drivers/gpu/drm/bridge/tc358767.c 	req = mode->clock * bits_per_pixel / 8;
req              1301 drivers/gpu/drm/bridge/tc358767.c 	if (req > avail)
req               265 drivers/gpu/drm/drm_dp_mst_topology.c static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
req               271 drivers/gpu/drm/drm_dp_mst_topology.c 	buf[idx++] = req->req_type & 0x7f;
req               273 drivers/gpu/drm/drm_dp_mst_topology.c 	switch (req->req_type) {
req               275 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
req               279 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
req               280 drivers/gpu/drm/drm_dp_mst_topology.c 			(req->u.allocate_payload.number_sdp_streams & 0xf);
req               282 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
req               284 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
req               286 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
req               288 drivers/gpu/drm/drm_dp_mst_topology.c 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
req               289 drivers/gpu/drm/drm_dp_mst_topology.c 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
req               290 drivers/gpu/drm/drm_dp_mst_topology.c 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
req               293 drivers/gpu/drm/drm_dp_mst_topology.c 		if (req->u.allocate_payload.number_sdp_streams & 1) {
req               294 drivers/gpu/drm/drm_dp_mst_topology.c 			i = req->u.allocate_payload.number_sdp_streams - 1;
req               295 drivers/gpu/drm/drm_dp_mst_topology.c 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
req               300 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
req               302 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
req               306 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
req               307 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
req               309 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
req               311 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
req               313 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_read.num_bytes);
req               318 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
req               319 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
req               321 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
req               323 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
req               325 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.dpcd_write.num_bytes);
req               327 drivers/gpu/drm/drm_dp_mst_topology.c 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
req               328 drivers/gpu/drm/drm_dp_mst_topology.c 		idx += req->u.dpcd_write.num_bytes;
req               331 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
req               332 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
req               334 drivers/gpu/drm/drm_dp_mst_topology.c 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
req               335 drivers/gpu/drm/drm_dp_mst_topology.c 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
req               337 drivers/gpu/drm/drm_dp_mst_topology.c 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
req               339 drivers/gpu/drm/drm_dp_mst_topology.c 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
req               340 drivers/gpu/drm/drm_dp_mst_topology.c 			idx += req->u.i2c_read.transactions[i].num_bytes;
req               342 drivers/gpu/drm/drm_dp_mst_topology.c 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
req               343 drivers/gpu/drm/drm_dp_mst_topology.c 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
req               346 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
req               348 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.i2c_read.num_bytes_read);
req               353 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
req               355 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
req               357 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.i2c_write.num_bytes);
req               359 drivers/gpu/drm/drm_dp_mst_topology.c 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
req               360 drivers/gpu/drm/drm_dp_mst_topology.c 		idx += req->u.i2c_write.num_bytes;
req               365 drivers/gpu/drm/drm_dp_mst_topology.c 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
req               737 drivers/gpu/drm/drm_dp_mst_topology.c 	struct drm_dp_sideband_msg_req_body req;
req               739 drivers/gpu/drm/drm_dp_mst_topology.c 	req.req_type = DP_REMOTE_DPCD_WRITE;
req               740 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_write.port_number = port_num;
req               741 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_write.dpcd_address = offset;
req               742 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_write.num_bytes = num_bytes;
req               743 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_write.bytes = bytes;
req               744 drivers/gpu/drm/drm_dp_mst_topology.c 	drm_dp_encode_sideband_req(&req, msg);
req               751 drivers/gpu/drm/drm_dp_mst_topology.c 	struct drm_dp_sideband_msg_req_body req;
req               753 drivers/gpu/drm/drm_dp_mst_topology.c 	req.req_type = DP_LINK_ADDRESS;
req               754 drivers/gpu/drm/drm_dp_mst_topology.c 	drm_dp_encode_sideband_req(&req, msg);
req               760 drivers/gpu/drm/drm_dp_mst_topology.c 	struct drm_dp_sideband_msg_req_body req;
req               762 drivers/gpu/drm/drm_dp_mst_topology.c 	req.req_type = DP_ENUM_PATH_RESOURCES;
req               763 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.port_num.port_number = port_num;
req               764 drivers/gpu/drm/drm_dp_mst_topology.c 	drm_dp_encode_sideband_req(&req, msg);
req               774 drivers/gpu/drm/drm_dp_mst_topology.c 	struct drm_dp_sideband_msg_req_body req;
req               775 drivers/gpu/drm/drm_dp_mst_topology.c 	memset(&req, 0, sizeof(req));
req               776 drivers/gpu/drm/drm_dp_mst_topology.c 	req.req_type = DP_ALLOCATE_PAYLOAD;
req               777 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.allocate_payload.port_number = port_num;
req               778 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.allocate_payload.vcpi = vcpi;
req               779 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.allocate_payload.pbn = pbn;
req               780 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
req               781 drivers/gpu/drm/drm_dp_mst_topology.c 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
req               783 drivers/gpu/drm/drm_dp_mst_topology.c 	drm_dp_encode_sideband_req(&req, msg);
req               791 drivers/gpu/drm/drm_dp_mst_topology.c 	struct drm_dp_sideband_msg_req_body req;
req               794 drivers/gpu/drm/drm_dp_mst_topology.c 		req.req_type = DP_POWER_UP_PHY;
req               796 drivers/gpu/drm/drm_dp_mst_topology.c 		req.req_type = DP_POWER_DOWN_PHY;
req               798 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.port_num.port_number = port_num;
req               799 drivers/gpu/drm/drm_dp_mst_topology.c 	drm_dp_encode_sideband_req(&req, msg);
req              1913 drivers/gpu/drm/drm_dp_mst_topology.c 	struct drm_dp_sideband_msg_req_body req;
req              1915 drivers/gpu/drm/drm_dp_mst_topology.c 	req.req_type = DP_REMOTE_DPCD_READ;
req              1916 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_read.port_number = port_num;
req              1917 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_read.dpcd_address = offset;
req              1918 drivers/gpu/drm/drm_dp_mst_topology.c 	req.u.dpcd_read.num_bytes = num_bytes;
req              1919 drivers/gpu/drm/drm_dp_mst_topology.c 	drm_dp_encode_sideband_req(&req, msg);
req               525 drivers/gpu/drm/drm_ioc32.c 	struct drm_ctx_priv_map req;
req               533 drivers/gpu/drm/drm_ioc32.c 	req.ctx_id = req32.ctx_id;
req               534 drivers/gpu/drm/drm_ioc32.c 	err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
req               538 drivers/gpu/drm/drm_ioc32.c 	req32.handle = ptr_to_compat((void __user *)req.handle);
req               847 drivers/gpu/drm/drm_ioc32.c 	union drm_wait_vblank req;
req               853 drivers/gpu/drm/drm_ioc32.c 	req.request.type = req32.request.type;
req               854 drivers/gpu/drm/drm_ioc32.c 	req.request.sequence = req32.request.sequence;
req               855 drivers/gpu/drm/drm_ioc32.c 	req.request.signal = req32.request.signal;
req               856 drivers/gpu/drm/drm_ioc32.c 	err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
req               860 drivers/gpu/drm/drm_ioc32.c 	req32.reply.type = req.reply.type;
req               861 drivers/gpu/drm/drm_ioc32.c 	req32.reply.sequence = req.reply.sequence;
req               862 drivers/gpu/drm/drm_ioc32.c 	req32.reply.tval_sec = req.reply.tval_sec;
req               863 drivers/gpu/drm/drm_ioc32.c 	req32.reply.tval_usec = req.reply.tval_usec;
req               237 drivers/gpu/drm/drm_ioctl.c 	struct drm_get_cap *req = data;
req               240 drivers/gpu/drm/drm_ioctl.c 	req->value = 0;
req               243 drivers/gpu/drm/drm_ioctl.c 	switch (req->capability) {
req               245 drivers/gpu/drm/drm_ioctl.c 		req->value = 1;
req               248 drivers/gpu/drm/drm_ioctl.c 		req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req               249 drivers/gpu/drm/drm_ioctl.c 		req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
req               252 drivers/gpu/drm/drm_ioctl.c 		req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
req               255 drivers/gpu/drm/drm_ioctl.c 		req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE);
req               263 drivers/gpu/drm/drm_ioctl.c 	switch (req->capability) {
req               266 drivers/gpu/drm/drm_ioctl.c 			req->value = 1;
req               269 drivers/gpu/drm/drm_ioctl.c 		req->value = 1;
req               272 drivers/gpu/drm/drm_ioctl.c 		req->value = dev->mode_config.preferred_depth;
req               275 drivers/gpu/drm/drm_ioctl.c 		req->value = dev->mode_config.prefer_shadow;
req               278 drivers/gpu/drm/drm_ioctl.c 		req->value = dev->mode_config.async_page_flip;
req               281 drivers/gpu/drm/drm_ioctl.c 		req->value = 1;
req               284 drivers/gpu/drm/drm_ioctl.c 				req->value = 0;
req               289 drivers/gpu/drm/drm_ioctl.c 			req->value = dev->mode_config.cursor_width;
req               291 drivers/gpu/drm/drm_ioctl.c 			req->value = 64;
req               295 drivers/gpu/drm/drm_ioctl.c 			req->value = dev->mode_config.cursor_height;
req               297 drivers/gpu/drm/drm_ioctl.c 			req->value = 64;
req               300 drivers/gpu/drm/drm_ioctl.c 		req->value = dev->mode_config.allow_fb_modifiers;
req               303 drivers/gpu/drm/drm_ioctl.c 		req->value = 1;
req               317 drivers/gpu/drm/drm_ioctl.c 	struct drm_set_client_cap *req = data;
req               325 drivers/gpu/drm/drm_ioctl.c 	switch (req->capability) {
req               327 drivers/gpu/drm/drm_ioctl.c 		if (req->value > 1)
req               329 drivers/gpu/drm/drm_ioctl.c 		file_priv->stereo_allowed = req->value;
req               332 drivers/gpu/drm/drm_ioctl.c 		if (req->value > 1)
req               334 drivers/gpu/drm/drm_ioctl.c 		file_priv->universal_planes = req->value;
req               340 drivers/gpu/drm/drm_ioctl.c 		if (current->comm[0] == 'X' && req->value == 1) {
req               344 drivers/gpu/drm/drm_ioctl.c 		if (req->value > 2)
req               346 drivers/gpu/drm/drm_ioctl.c 		file_priv->atomic = req->value;
req               347 drivers/gpu/drm/drm_ioctl.c 		file_priv->universal_planes = req->value;
req               351 drivers/gpu/drm/drm_ioctl.c 		file_priv->aspect_ratio_allowed = req->value;
req               354 drivers/gpu/drm/drm_ioctl.c 		if (req->value > 1)
req               356 drivers/gpu/drm/drm_ioctl.c 		file_priv->aspect_ratio_allowed = req->value;
req               361 drivers/gpu/drm/drm_ioctl.c 		if (req->value > 1)
req               363 drivers/gpu/drm/drm_ioctl.c 		file_priv->writeback_connectors = req->value;
req               843 drivers/gpu/drm/drm_plane.c 				     struct drm_mode_cursor2 *req,
req               851 drivers/gpu/drm/drm_plane.c 		.width = req->width,
req               852 drivers/gpu/drm/drm_plane.c 		.height = req->height,
req               854 drivers/gpu/drm/drm_plane.c 		.pitches = { req->width * 4 },
req               855 drivers/gpu/drm/drm_plane.c 		.handles = { req->handle },
req               870 drivers/gpu/drm/drm_plane.c 	if (req->flags & DRM_MODE_CURSOR_BO) {
req               871 drivers/gpu/drm/drm_plane.c 		if (req->handle) {
req               878 drivers/gpu/drm/drm_plane.c 			fb->hot_x = req->hot_x;
req               879 drivers/gpu/drm/drm_plane.c 			fb->hot_y = req->hot_y;
req               893 drivers/gpu/drm/drm_plane.c 	if (req->flags & DRM_MODE_CURSOR_MOVE) {
req               894 drivers/gpu/drm/drm_plane.c 		crtc_x = req->x;
req               895 drivers/gpu/drm/drm_plane.c 		crtc_y = req->y;
req               921 drivers/gpu/drm/drm_plane.c 	if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
req               922 drivers/gpu/drm/drm_plane.c 		crtc->cursor_x = req->x;
req               923 drivers/gpu/drm/drm_plane.c 		crtc->cursor_y = req->y;
req               930 drivers/gpu/drm/drm_plane.c 				  struct drm_mode_cursor2 *req,
req               940 drivers/gpu/drm/drm_plane.c 	if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
req               943 drivers/gpu/drm/drm_plane.c 	crtc = drm_crtc_find(dev, file_priv, req->crtc_id);
req               945 drivers/gpu/drm/drm_plane.c 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
req               968 drivers/gpu/drm/drm_plane.c 		ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
req               972 drivers/gpu/drm/drm_plane.c 	if (req->flags & DRM_MODE_CURSOR_BO) {
req               979 drivers/gpu/drm/drm_plane.c 			ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
req               980 drivers/gpu/drm/drm_plane.c 						      req->width, req->height, req->hot_x, req->hot_y);
req               982 drivers/gpu/drm/drm_plane.c 			ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
req               983 drivers/gpu/drm/drm_plane.c 						      req->width, req->height);
req               986 drivers/gpu/drm/drm_plane.c 	if (req->flags & DRM_MODE_CURSOR_MOVE) {
req               988 drivers/gpu/drm/drm_plane.c 			ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
req              1012 drivers/gpu/drm/drm_plane.c 	struct drm_mode_cursor *req = data;
req              1015 drivers/gpu/drm/drm_plane.c 	memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
req              1029 drivers/gpu/drm/drm_plane.c 	struct drm_mode_cursor2 *req = data;
req              1031 drivers/gpu/drm/drm_plane.c 	return drm_mode_cursor_common(dev, req, file_priv);
req              1142 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	struct drm_exynos_g2d_set_cmdlist *req = data;
req              1159 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req              1160 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	    req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
req              1167 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	if (req->event_type != G2D_EVENT_NOT) {
req              1176 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		e->event.user_data = req->user_data;
req              1227 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
req              1234 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
req              1238 drivers/gpu/drm/exynos/exynos_drm_g2d.c 				sizeof(*cmd) * req->cmd_nr)) {
req              1242 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	cmdlist->last += req->cmd_nr * 2;
req              1244 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
req              1248 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	node->buf_info.map_nr = req->cmd_buf_nr;
req              1249 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	if (req->cmd_buf_nr) {
req              1253 drivers/gpu/drm/exynos/exynos_drm_g2d.c 				(unsigned long)req->cmd_buf;
req              1257 drivers/gpu/drm/exynos/exynos_drm_g2d.c 					sizeof(*cmd_buf) * req->cmd_buf_nr)) {
req              1261 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		cmdlist->last += req->cmd_buf_nr * 2;
req              1263 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
req              1301 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	struct drm_exynos_g2d_exec *req = data;
req              1315 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	runqueue_node->async = req->async;
req               824 drivers/gpu/drm/i915/display/intel_opregion.c 			u32 req = opregion->swsci_sbcb_sub_functions;
req               825 drivers/gpu/drm/i915/display/intel_opregion.c 			if ((req & tmp) != req)
req               826 drivers/gpu/drm/i915/display/intel_opregion.c 				DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
req               421 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
req               430 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
req               437 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
req               439 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	*status = req->status;
req               632 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	struct ct_request *req;
req               657 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	list_for_each_entry(req, &ct->pending_requests, link) {
req               658 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		if (unlikely(fence != req->fence)) {
req               660 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 					req->fence);
req               663 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		if (unlikely(datalen > req->response_len)) {
req               665 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 				  req->fence, 4 * msglen, msg);
req               669 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 			memcpy(req->response_buf, msg + 3, 4 * datalen);
req               670 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		req->response_len = datalen;
req               671 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		WRITE_ONCE(req->status, status);
req               193 drivers/gpu/drm/i915/gvt/mmio_context.c 				 struct i915_request *req)
req               199 drivers/gpu/drm/i915/gvt/mmio_context.c 	int ring_id = req->engine->id;
req               205 drivers/gpu/drm/i915/gvt/mmio_context.c 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
req               209 drivers/gpu/drm/i915/gvt/mmio_context.c 	cs = intel_ring_begin(req, count * 2 + 2);
req               228 drivers/gpu/drm/i915/gvt/mmio_context.c 	intel_ring_advance(req, cs);
req               230 drivers/gpu/drm/i915/gvt/mmio_context.c 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
req               239 drivers/gpu/drm/i915/gvt/mmio_context.c 					struct i915_request *req)
req               244 drivers/gpu/drm/i915/gvt/mmio_context.c 	cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
req               254 drivers/gpu/drm/i915/gvt/mmio_context.c 			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
req               259 drivers/gpu/drm/i915/gvt/mmio_context.c 	intel_ring_advance(req, cs);
req               266 drivers/gpu/drm/i915/gvt/mmio_context.c 				     struct i915_request *req)
req               271 drivers/gpu/drm/i915/gvt/mmio_context.c 	cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
req               281 drivers/gpu/drm/i915/gvt/mmio_context.c 			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
req               286 drivers/gpu/drm/i915/gvt/mmio_context.c 	intel_ring_advance(req, cs);
req               297 drivers/gpu/drm/i915/gvt/mmio_context.c 				       struct i915_request *req)
req               302 drivers/gpu/drm/i915/gvt/mmio_context.c 	cs = intel_ring_begin(req, 2);
req               308 drivers/gpu/drm/i915/gvt/mmio_context.c 	intel_ring_advance(req, cs);
req               310 drivers/gpu/drm/i915/gvt/mmio_context.c 	ret = restore_context_mmio_for_inhibit(vgpu, req);
req               315 drivers/gpu/drm/i915/gvt/mmio_context.c 	if (req->engine->id != RCS0)
req               318 drivers/gpu/drm/i915/gvt/mmio_context.c 	ret = restore_render_mocs_control_for_inhibit(vgpu, req);
req               322 drivers/gpu/drm/i915/gvt/mmio_context.c 	ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
req               327 drivers/gpu/drm/i915/gvt/mmio_context.c 	cs = intel_ring_begin(req, 2);
req               333 drivers/gpu/drm/i915/gvt/mmio_context.c 	intel_ring_advance(req, cs);
req                55 drivers/gpu/drm/i915/gvt/mmio_context.h 				       struct i915_request *req);
req                61 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->req->hw_context->state->obj;
req               132 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->req->hw_context->state->obj;
req               207 drivers/gpu/drm/i915/gvt/scheduler.c static inline bool is_gvt_request(struct i915_request *req)
req               209 drivers/gpu/drm/i915/gvt/scheduler.c 	return i915_gem_context_force_single_submission(req->gem_context);
req               229 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *req = data;
req               231 drivers/gpu/drm/i915/gvt/scheduler.c 				shadow_ctx_notifier_block[req->engine->id]);
req               233 drivers/gpu/drm/i915/gvt/scheduler.c 	enum intel_engine_id ring_id = req->engine->id;
req               237 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!is_gvt_request(req)) {
req               304 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *req = workload->req;
req               309 drivers/gpu/drm/i915/gvt/scheduler.c 	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
req               310 drivers/gpu/drm/i915/gvt/scheduler.c 		intel_vgpu_restore_inhibit_context(vgpu, req);
req               322 drivers/gpu/drm/i915/gvt/scheduler.c 	if (req->engine->emit_init_breadcrumb) {
req               323 drivers/gpu/drm/i915/gvt/scheduler.c 		err = req->engine->emit_init_breadcrumb(req);
req               331 drivers/gpu/drm/i915/gvt/scheduler.c 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
req               347 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_ring_advance(workload->req, cs);
req               397 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->req)
req               406 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->req = i915_request_get(rq);
req               515 drivers/gpu/drm/i915/gvt/scheduler.c 						      workload->req,
req               534 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
req               723 drivers/gpu/drm/i915/gvt/scheduler.c 		rq = fetch_and_zero(&workload->req);
req               727 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!IS_ERR_OR_NULL(workload->req)) {
req               729 drivers/gpu/drm/i915/gvt/scheduler.c 				ring_id, workload->req);
req               730 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_add(workload->req);
req               800 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
req               911 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
req               931 drivers/gpu/drm/i915/gvt/scheduler.c 			if (workload->req->fence.error == -EIO)
req               946 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_put(fetch_and_zero(&workload->req));
req              1051 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
req                83 drivers/gpu/drm/i915/gvt/scheduler.h 	struct i915_request *req;
req               464 drivers/gpu/drm/mga/mga_dma.c 	struct drm_buf_desc req;
req               553 drivers/gpu/drm/mga/mga_dma.c 	(void)memset(&req, 0, sizeof(req));
req               554 drivers/gpu/drm/mga/mga_dma.c 	req.count = dma_bs->secondary_bin_count;
req               555 drivers/gpu/drm/mga/mga_dma.c 	req.size = dma_bs->secondary_bin_size;
req               556 drivers/gpu/drm/mga/mga_dma.c 	req.flags = _DRM_AGP_BUFFER;
req               557 drivers/gpu/drm/mga/mga_dma.c 	req.agp_start = offset;
req               559 drivers/gpu/drm/mga/mga_dma.c 	err = drm_legacy_addbufs_agp(dev, &req);
req               636 drivers/gpu/drm/mga/mga_dma.c 	struct drm_buf_desc req;
req               686 drivers/gpu/drm/mga/mga_dma.c 		(void)memset(&req, 0, sizeof(req));
req               687 drivers/gpu/drm/mga/mga_dma.c 		req.count = bin_count;
req               688 drivers/gpu/drm/mga/mga_dma.c 		req.size = dma_bs->secondary_bin_size;
req               690 drivers/gpu/drm/mga/mga_dma.c 		err = drm_legacy_addbufs_pci(dev, &req);
req               750 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	uint32_t lr, tb, req;
req               778 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		req  = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
req               781 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
req               786 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
req               793 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 			FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
req               800 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 			FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
req               123 drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
req               124 drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
req               125 drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
req               408 drivers/gpu/drm/nouveau/nouveau_abi16.c 	struct drm_nouveau_channel_free *req = data;
req               415 drivers/gpu/drm/nouveau/nouveau_abi16.c 	chan = nouveau_abi16_chan(abi16, req->channel);
req               263 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct drm_nouveau_gem_new *req = data;
req               267 drivers/gpu/drm/nouveau/nouveau_gem.c 	ret = nouveau_gem_new(cli, req->info.size, req->align,
req               268 drivers/gpu/drm/nouveau/nouveau_gem.c 			      req->info.domain, req->info.tile_mode,
req               269 drivers/gpu/drm/nouveau/nouveau_gem.c 			      req->info.tile_flags, &nvbo);
req               274 drivers/gpu/drm/nouveau/nouveau_gem.c 				    &req->info.handle);
req               276 drivers/gpu/drm/nouveau/nouveau_gem.c 		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
req               278 drivers/gpu/drm/nouveau/nouveau_gem.c 			drm_gem_handle_delete(file_priv, req->info.handle);
req               607 drivers/gpu/drm/nouveau/nouveau_gem.c 				struct drm_nouveau_gem_pushbuf *req,
req               614 drivers/gpu/drm/nouveau/nouveau_gem.c 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
req               618 drivers/gpu/drm/nouveau/nouveau_gem.c 	for (i = 0; i < req->nr_relocs; i++) {
req               624 drivers/gpu/drm/nouveau/nouveau_gem.c 		if (unlikely(r->bo_index >= req->nr_buffers)) {
req               634 drivers/gpu/drm/nouveau/nouveau_gem.c 		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
req               694 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct drm_nouveau_gem_pushbuf *req = data;
req               706 drivers/gpu/drm/nouveau/nouveau_gem.c 		if (temp->chan->chid == req->channel) {
req               715 drivers/gpu/drm/nouveau/nouveau_gem.c 	req->vram_available = drm->gem.vram_available;
req               716 drivers/gpu/drm/nouveau/nouveau_gem.c 	req->gart_available = drm->gem.gart_available;
req               717 drivers/gpu/drm/nouveau/nouveau_gem.c 	if (unlikely(req->nr_push == 0))
req               720 drivers/gpu/drm/nouveau/nouveau_gem.c 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
req               722 drivers/gpu/drm/nouveau/nouveau_gem.c 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
req               726 drivers/gpu/drm/nouveau/nouveau_gem.c 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
req               728 drivers/gpu/drm/nouveau/nouveau_gem.c 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
req               732 drivers/gpu/drm/nouveau/nouveau_gem.c 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
req               734 drivers/gpu/drm/nouveau/nouveau_gem.c 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
req               738 drivers/gpu/drm/nouveau/nouveau_gem.c 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
req               742 drivers/gpu/drm/nouveau/nouveau_gem.c 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
req               749 drivers/gpu/drm/nouveau/nouveau_gem.c 	for (i = 0; i < req->nr_push; i++) {
req               750 drivers/gpu/drm/nouveau/nouveau_gem.c 		if (push[i].bo_index >= req->nr_buffers) {
req               758 drivers/gpu/drm/nouveau/nouveau_gem.c 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req               759 drivers/gpu/drm/nouveau/nouveau_gem.c 					   req->nr_buffers, &op, &do_reloc);
req               768 drivers/gpu/drm/nouveau/nouveau_gem.c 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
req               776 drivers/gpu/drm/nouveau/nouveau_gem.c 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
req               782 drivers/gpu/drm/nouveau/nouveau_gem.c 		for (i = 0; i < req->nr_push; i++) {
req               791 drivers/gpu/drm/nouveau/nouveau_gem.c 		ret = RING_SPACE(chan, req->nr_push * 2);
req               797 drivers/gpu/drm/nouveau/nouveau_gem.c 		for (i = 0; i < req->nr_push; i++) {
req               805 drivers/gpu/drm/nouveau/nouveau_gem.c 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
req               811 drivers/gpu/drm/nouveau/nouveau_gem.c 		for (i = 0; i < req->nr_push; i++) {
req               818 drivers/gpu/drm/nouveau/nouveau_gem.c 			if (unlikely(cmd != req->suffix0)) {
req               860 drivers/gpu/drm/nouveau/nouveau_gem.c 		req->suffix0 = 0x00000000;
req               861 drivers/gpu/drm/nouveau/nouveau_gem.c 		req->suffix1 = 0x00000000;
req               864 drivers/gpu/drm/nouveau/nouveau_gem.c 		req->suffix0 = 0x00020000;
req               865 drivers/gpu/drm/nouveau/nouveau_gem.c 		req->suffix1 = 0x00000000;
req               867 drivers/gpu/drm/nouveau/nouveau_gem.c 		req->suffix0 = 0x20000000 |
req               869 drivers/gpu/drm/nouveau/nouveau_gem.c 		req->suffix1 = 0x00000000;
req               879 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct drm_nouveau_gem_cpu_prep *req = data;
req               882 drivers/gpu/drm/nouveau/nouveau_gem.c 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
req               883 drivers/gpu/drm/nouveau/nouveau_gem.c 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
req               887 drivers/gpu/drm/nouveau/nouveau_gem.c 	gem = drm_gem_object_lookup(file_priv, req->handle);
req               911 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct drm_nouveau_gem_cpu_fini *req = data;
req               915 drivers/gpu/drm/nouveau/nouveau_gem.c 	gem = drm_gem_object_lookup(file_priv, req->handle);
req               929 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct drm_nouveau_gem_info *req = data;
req               933 drivers/gpu/drm/nouveau/nouveau_gem.c 	gem = drm_gem_object_lookup(file_priv, req->handle);
req               937 drivers/gpu/drm/nouveau/nouveau_gem.c 	ret = nouveau_gem_info(file_priv, gem, req);
req               131 drivers/gpu/drm/nouveau/nouveau_usif.c 	} *req;
req               140 drivers/gpu/drm/nouveau/nouveau_usif.c 	req = data;
req               147 drivers/gpu/drm/nouveau/nouveau_usif.c 	if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) {
req               148 drivers/gpu/drm/nouveau/nouveau_usif.c 		ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
req               149 drivers/gpu/drm/nouveau/nouveau_usif.c 		ntfy->route = req->v0.route;
req               150 drivers/gpu/drm/nouveau/nouveau_usif.c 		ntfy->token = req->v0.token;
req               151 drivers/gpu/drm/nouveau/nouveau_usif.c 		req->v0.route = NVDRM_NOTIFY_USIF;
req               152 drivers/gpu/drm/nouveau/nouveau_usif.c 		req->v0.token = (unsigned long)(void *)ntfy;
req               154 drivers/gpu/drm/nouveau/nouveau_usif.c 		req->v0.token = ntfy->token;
req               155 drivers/gpu/drm/nouveau/nouveau_usif.c 		req->v0.route = ntfy->route;
req               172 drivers/gpu/drm/nouveau/nvif/notify.c 		struct nvif_notify_req_v0 req;
req               196 drivers/gpu/drm/nouveau/nvif/notify.c 	args->req.version = 0;
req               197 drivers/gpu/drm/nouveau/nvif/notify.c 	args->req.reply = notify->size;
req               198 drivers/gpu/drm/nouveau/nvif/notify.c 	args->req.route = 0;
req               199 drivers/gpu/drm/nouveau/nvif/notify.c 	args->req.token = (unsigned long)(void *)notify;
req               201 drivers/gpu/drm/nouveau/nvif/notify.c 	memcpy(args->req.data, data, size);
req               135 drivers/gpu/drm/nouveau/nvkm/core/client.c 	} *req = data;
req               152 drivers/gpu/drm/nouveau/nvkm/core/client.c 	if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) {
req               154 drivers/gpu/drm/nouveau/nvkm/core/client.c 				   "token %llx\n", req->v0.version,
req               155 drivers/gpu/drm/nouveau/nvkm/core/client.c 			   req->v0.reply, req->v0.route, req->v0.token);
req               156 drivers/gpu/drm/nouveau/nvkm/core/client.c 		notify->version = req->v0.version;
req               158 drivers/gpu/drm/nouveau/nvkm/core/client.c 		notify->rep.v0.version = req->v0.version;
req               159 drivers/gpu/drm/nouveau/nvkm/core/client.c 		notify->rep.v0.route = req->v0.route;
req               160 drivers/gpu/drm/nouveau/nvkm/core/client.c 		notify->rep.v0.token = req->v0.token;
req               161 drivers/gpu/drm/nouveau/nvkm/core/client.c 		reply = req->v0.reply;
req                68 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 	} *req = data;
req                71 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 	if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
req                73 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 		if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
req                75 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 			notify->index = req->v0.head;
req               105 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 	} *req = data;
req               109 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 	if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
req               112 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 			if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
req               114 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 					notify->types = req->v0.mask;
req               115 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c 					notify->index = req->v0.conn;
req               191 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c 	} *req = data;
req               194 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c 	if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
req                55 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c 	} *req = data;
req                58 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c 	if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
req               472 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
req               480 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 	if (req != -1 && req != -2) {
req               482 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 			if (pstate->pstate == req)
req               487 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 		if (pstate->pstate != req)
req               489 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 		req = i;
req               492 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 	return req + 2;
req               520 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
req               522 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 	int ret = nvkm_clk_ustate_update(clk, req);
req               532 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
req               534 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 	if (!rel) clk->astate  = req;
req               551 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
req               553 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c 	if (!rel) clk->dstate  = req;
req               130 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c 	struct nvkm_gpio_ntfy_req *req = data;
req               131 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c 	if (!WARN_ON(size != sizeof(*req))) {
req               133 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c 		notify->types = req->mask;
req               134 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c 		notify->index = req->line;
req               111 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 	struct nvkm_i2c_ntfy_req *req = data;
req               112 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 	if (!WARN_ON(size != sizeof(*req))) {
req               114 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 		notify->types = req->mask;
req               115 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 		notify->index = req->port;
req               208 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	struct drm_panfrost_perfcnt_enable *req = data;
req               216 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	if (req->counterset > (panfrost_model_is_bifrost(pfdev) ? 1 : 0))
req               220 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	if (req->enable)
req               222 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 						     req->counterset);
req               235 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	struct drm_panfrost_perfcnt_dump *req = data;
req               236 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr;
req               320 drivers/gpu/drm/radeon/radeon_acpi.c 		struct atif_sbios_requests *req)
req               335 drivers/gpu/drm/radeon/radeon_acpi.c 	memset(req, 0, sizeof(*req));
req               337 drivers/gpu/drm/radeon/radeon_acpi.c 	size = min(sizeof(*req), size);
req               338 drivers/gpu/drm/radeon/radeon_acpi.c 	memcpy(req, info->buffer.pointer, size);
req               339 drivers/gpu/drm/radeon/radeon_acpi.c 	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
req               341 drivers/gpu/drm/radeon/radeon_acpi.c 	count = hweight32(req->pending);
req               362 drivers/gpu/drm/radeon/radeon_acpi.c 	struct atif_sbios_requests req;
req               379 drivers/gpu/drm/radeon/radeon_acpi.c 	count = radeon_atif_get_sbios_requests(handle, &req);
req               386 drivers/gpu/drm/radeon/radeon_acpi.c 	if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
req               391 drivers/gpu/drm/radeon/radeon_acpi.c 					req.backlight_level);
req               393 drivers/gpu/drm/radeon/radeon_acpi.c 			radeon_set_backlight_level(rdev, enc, req.backlight_level);
req               408 drivers/gpu/drm/radeon/radeon_acpi.c 	if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
req                49 drivers/gpu/drm/sis/sis_mm.c 	struct sis_memreq req;
req               119 drivers/gpu/drm/sis/sis_mm.c 		item->req.size = mem->size;
req               120 drivers/gpu/drm/sis/sis_mm.c 		sis_malloc(&item->req);
req               121 drivers/gpu/drm/sis/sis_mm.c 		if (item->req.size == 0)
req               123 drivers/gpu/drm/sis/sis_mm.c 		offset = item->req.offset;
req               185 drivers/gpu/drm/sis/sis_mm.c 		sis_free(obj->req.offset);
req               343 drivers/gpu/drm/sis/sis_mm.c 			sis_free(entry->req.offset);
req                67 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 				     struct clk_rate_request *req)
req                72 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 	unsigned long rate = req->rate;
req               118 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 	req->rate = best_parent / best_half / best_div;
req               119 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 	req->best_parent_rate = best_parent;
req               120 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 	req->best_parent_hw = parent;
req                21 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 					struct clk_rate_request *req)
req                23 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 	unsigned long rate = req->rate;
req                61 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 	req->rate = best_rate / best_div;
req                62 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 	req->best_parent_rate = best_rate;
req                63 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 	req->best_parent_hw = best_parent;
req               105 drivers/gpu/drm/vc4/vc4_perfmon.c 	struct drm_vc4_perfmon_create *req = data;
req               116 drivers/gpu/drm/vc4/vc4_perfmon.c 	if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
req               117 drivers/gpu/drm/vc4/vc4_perfmon.c 	    !req->ncounters)
req               121 drivers/gpu/drm/vc4/vc4_perfmon.c 	for (i = 0; i < req->ncounters; i++) {
req               122 drivers/gpu/drm/vc4/vc4_perfmon.c 		if (req->events[i] >= VC4_PERFCNT_NUM_EVENTS)
req               126 drivers/gpu/drm/vc4/vc4_perfmon.c 	perfmon = kzalloc(struct_size(perfmon, counters, req->ncounters),
req               131 drivers/gpu/drm/vc4/vc4_perfmon.c 	for (i = 0; i < req->ncounters; i++)
req               132 drivers/gpu/drm/vc4/vc4_perfmon.c 		perfmon->events[i] = req->events[i];
req               134 drivers/gpu/drm/vc4/vc4_perfmon.c 	perfmon->ncounters = req->ncounters;
req               148 drivers/gpu/drm/vc4/vc4_perfmon.c 	req->id = ret;
req               157 drivers/gpu/drm/vc4/vc4_perfmon.c 	struct drm_vc4_perfmon_destroy *req = data;
req               166 drivers/gpu/drm/vc4/vc4_perfmon.c 	perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
req               181 drivers/gpu/drm/vc4/vc4_perfmon.c 	struct drm_vc4_perfmon_get_values *req = data;
req               191 drivers/gpu/drm/vc4/vc4_perfmon.c 	perfmon = idr_find(&vc4file->perfmon.idr, req->id);
req               198 drivers/gpu/drm/vc4/vc4_perfmon.c 	if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->counters,
req               822 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
req               833 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 				req->size, false, &handle, &vbo,
req               807 drivers/gpu/drm/vmwgfx/vmwgfx_context.c 	switch (arg->req) {
req                90 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 			       struct drm_vmw_gb_surface_create_ext_req *req,
req                95 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				  struct drm_vmw_surface_arg *req,
req               710 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	struct drm_vmw_surface_create_req *req = &arg->req;
req               732 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
req               734 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		num_sizes += req->mip_levels[i];
req               745 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	desc = svga3dsurface_get_desc(req->format);
req               748 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 			       req->format);
req               774 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
req               775 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	srf->format = req->format;
req               776 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	srf->scanout = req->scanout;
req               778 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
req               782 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				 req->size_addr,
req               855 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	if (dev_priv->has_mob && req->shareable) {
req               872 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				    req->shareable, VMW_RES_SURFACE,
req               983 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	struct drm_vmw_surface_arg *req = &arg->req;
req               992 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req               993 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					   req->handle_type, &base);
req              1295 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	req_ext.base = arg->req;
req              1318 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	struct drm_vmw_surface_arg *req = &arg->req;
req              1323 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
req              1512 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
req              1515 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
req              1531 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	struct drm_vmw_surface_arg *req = &arg->req;
req              1534 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
req              1548 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 			       struct drm_vmw_gb_surface_create_ext_req *req,
req              1562 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
req              1563 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				req->base.svga3d_flags);
req              1570 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		if (req->svga3d_flags_upper_32_bits != 0)
req              1573 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		if (req->base.multisample_count != 0)
req              1576 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
req              1579 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
req              1584 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	    req->base.multisample_count == 0)
req              1587 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
req              1600 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->base.format,
req              1601 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->base.drm_surface_flags &
req              1603 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->base.mip_levels,
req              1604 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->base.multisample_count,
req              1605 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->base.array_size,
req              1606 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->base.base_size,
req              1607 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->multisample_pattern,
req              1608 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					 req->quality_level,
req              1623 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
req              1624 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
req              1635 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				backup_handle = req->base.buffer_handle;
req              1638 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	} else if (req->base.drm_surface_flags &
req              1642 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					req->base.drm_surface_flags &
req              1655 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				    req->base.drm_surface_flags &
req              1697 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 				  struct drm_vmw_surface_arg *req,
req              1709 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req              1710 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 					   req->handle_type, &base);
req                92 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req                94 drivers/gpu/drm/xen/xen_drm_front.c 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
req                95 drivers/gpu/drm/xen/xen_drm_front.c 			       evtchnl->u.req.ring.req_prod_pvt);
req                96 drivers/gpu/drm/xen/xen_drm_front.c 	req->operation = operation;
req                97 drivers/gpu/drm/xen/xen_drm_front.c 	req->id = evtchnl->evt_next_id++;
req                98 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl->evt_id = req->id;
req                99 drivers/gpu/drm/xen/xen_drm_front.c 	return req;
req               103 drivers/gpu/drm/xen/xen_drm_front.c 			   struct xendispl_req *req)
req               105 drivers/gpu/drm/xen/xen_drm_front.c 	reinit_completion(&evtchnl->u.req.completion);
req               115 drivers/gpu/drm/xen/xen_drm_front.c 	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
req               119 drivers/gpu/drm/xen/xen_drm_front.c 	return evtchnl->u.req.resp_status;
req               128 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req               133 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl = &front_info->evt_pairs[pipeline->index].req;
req               137 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               140 drivers/gpu/drm/xen/xen_drm_front.c 	req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
req               141 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.set_config.x = x;
req               142 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.set_config.y = y;
req               143 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.set_config.width = width;
req               144 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.set_config.height = height;
req               145 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.set_config.bpp = bpp;
req               146 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.set_config.fb_cookie = fb_cookie;
req               148 drivers/gpu/drm/xen/xen_drm_front.c 	ret = be_stream_do_io(evtchnl, req);
req               154 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               164 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req               169 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
req               190 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               193 drivers/gpu/drm/xen/xen_drm_front.c 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
req               194 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_create.gref_directory =
req               196 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_create.buffer_sz = size;
req               197 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req               198 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_create.width = width;
req               199 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_create.height = height;
req               200 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_create.bpp = bpp;
req               202 drivers/gpu/drm/xen/xen_drm_front.c 		req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
req               204 drivers/gpu/drm/xen/xen_drm_front.c 	ret = be_stream_do_io(evtchnl, req);
req               218 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               222 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               232 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req               237 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
req               250 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               253 drivers/gpu/drm/xen/xen_drm_front.c 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
req               254 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
req               256 drivers/gpu/drm/xen/xen_drm_front.c 	ret = be_stream_do_io(evtchnl, req);
req               269 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               279 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req               283 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
req               293 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               296 drivers/gpu/drm/xen/xen_drm_front.c 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
req               297 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.fb_attach.dbuf_cookie = dbuf_cookie;
req               298 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.fb_attach.fb_cookie = fb_cookie;
req               299 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.fb_attach.width = width;
req               300 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.fb_attach.height = height;
req               301 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.fb_attach.pixel_format = pixel_format;
req               303 drivers/gpu/drm/xen/xen_drm_front.c 	ret = be_stream_do_io(evtchnl, req);
req               309 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               317 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req               321 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
req               325 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               328 drivers/gpu/drm/xen/xen_drm_front.c 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
req               329 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.fb_detach.fb_cookie = fb_cookie;
req               331 drivers/gpu/drm/xen/xen_drm_front.c 	ret = be_stream_do_io(evtchnl, req);
req               337 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               345 drivers/gpu/drm/xen/xen_drm_front.c 	struct xendispl_req *req;
req               352 drivers/gpu/drm/xen/xen_drm_front.c 	evtchnl = &front_info->evt_pairs[conn_idx].req;
req               354 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               357 drivers/gpu/drm/xen/xen_drm_front.c 	req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
req               358 drivers/gpu/drm/xen/xen_drm_front.c 	req->op.pg_flip.fb_cookie = fb_cookie;
req               360 drivers/gpu/drm/xen/xen_drm_front.c 	ret = be_stream_do_io(evtchnl, req);
req               366 drivers/gpu/drm/xen/xen_drm_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req                37 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	rp = evtchnl->u.req.ring.sring->rsp_prod;
req                41 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
req                42 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
req                53 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 			evtchnl->u.req.resp_status = resp->status;
req                54 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 			complete(&evtchnl->u.req.completion);
req                64 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	evtchnl->u.req.ring.rsp_cons = i;
req                66 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	if (i != evtchnl->u.req.ring.req_prod_pvt) {
req                69 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
req                74 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		evtchnl->u.req.ring.sring->rsp_event = i + 1;
req               129 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		page = (unsigned long)evtchnl->u.req.ring.sring;
req               139 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		evtchnl->u.req.resp_status = -EIO;
req               140 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		complete_all(&evtchnl->u.req.completion);
req               182 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		init_completion(&evtchnl->u.req.completion);
req               183 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		mutex_init(&evtchnl->u.req.req_io_lock);
req               186 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
req               190 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 			evtchnl->u.req.ring.sring = NULL;
req               246 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 				    &front_info->evt_pairs[conn].req,
req               311 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
req               350 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	evtchnl->u.req.ring.req_prod_pvt++;
req               351 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
req               367 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		front_info->evt_pairs[i].req.state = state;
req               381 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
req                58 drivers/gpu/drm/xen/xen_drm_front_evtchnl.h 		} req;
req                66 drivers/gpu/drm/xen/xen_drm_front_evtchnl.h 	struct xen_drm_front_evtchnl req;
req               536 drivers/greybus/connection.c 	struct gb_cport_shutdown_request *req;
req               542 drivers/greybus/connection.c 					     sizeof(*req), 0, 0,
req               547 drivers/greybus/connection.c 	req = operation->request->payload;
req               548 drivers/greybus/connection.c 	req->phase = phase;
req               130 drivers/greybus/es2.c 	struct arpc_request_message *req;
req               147 drivers/greybus/es2.c static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
req               153 drivers/greybus/es2.c 	data = kmemdup(req, size, GFP_KERNEL);
req               179 drivers/greybus/es2.c static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
req               198 drivers/greybus/es2.c 	memcpy(buf, req, size);
req               217 drivers/greybus/es2.c static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
req               223 drivers/greybus/es2.c 		return output_async(es2, req, size, cmd);
req               225 drivers/greybus/es2.c 	return output_sync(es2, req, size, cmd);
req               546 drivers/greybus/es2.c 	struct gb_apb_request_cport_flags *req;
req               550 drivers/greybus/es2.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               551 drivers/greybus/es2.c 	if (!req)
req               560 drivers/greybus/es2.c 	req->flags = cpu_to_le32(connection_flags);
req               569 drivers/greybus/es2.c 			      req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
req               570 drivers/greybus/es2.c 	if (ret != sizeof(*req)) {
req               581 drivers/greybus/es2.c 	kfree(req);
req               590 drivers/greybus/es2.c 	struct arpc_cport_connected_req req;
req               593 drivers/greybus/es2.c 	req.cport_id = cpu_to_le16(cport_id);
req               594 drivers/greybus/es2.c 	ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
req               609 drivers/greybus/es2.c 	struct arpc_cport_flush_req req;
req               612 drivers/greybus/es2.c 	req.cport_id = cpu_to_le16(cport_id);
req               613 drivers/greybus/es2.c 	ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
req               628 drivers/greybus/es2.c 	struct arpc_cport_shutdown_req req;
req               635 drivers/greybus/es2.c 	req.cport_id = cpu_to_le16(cport_id);
req               636 drivers/greybus/es2.c 	req.timeout = cpu_to_le16(timeout);
req               637 drivers/greybus/es2.c 	req.phase = phase;
req               638 drivers/greybus/es2.c 	ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
req               654 drivers/greybus/es2.c 	struct arpc_cport_quiesce_req req;
req               664 drivers/greybus/es2.c 	req.cport_id = cpu_to_le16(cport_id);
req               665 drivers/greybus/es2.c 	req.peer_space = cpu_to_le16(peer_space);
req               666 drivers/greybus/es2.c 	req.timeout = cpu_to_le16(timeout);
req               667 drivers/greybus/es2.c 	ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
req               682 drivers/greybus/es2.c 	struct arpc_cport_clear_req req;
req               685 drivers/greybus/es2.c 	req.cport_id = cpu_to_le16(cport_id);
req               686 drivers/greybus/es2.c 	ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
req               886 drivers/greybus/es2.c 	if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
req               894 drivers/greybus/es2.c 	rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
req               895 drivers/greybus/es2.c 	if (!rpc->req)
req               902 drivers/greybus/es2.c 	rpc->req->type = type;
req               903 drivers/greybus/es2.c 	rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
req               904 drivers/greybus/es2.c 	memcpy(rpc->req->data, payload, size);
req               911 drivers/greybus/es2.c 	kfree(rpc->req);
req               920 drivers/greybus/es2.c 	kfree(rpc->req);
req               930 drivers/greybus/es2.c 		if (rpc->req->id == id)
req               940 drivers/greybus/es2.c 	rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
req               962 drivers/greybus/es2.c 				 rpc->req, le16_to_cpu(rpc->req->size),
req               964 drivers/greybus/es2.c 	if (retval != le16_to_cpu(rpc->req->size)) {
req               967 drivers/greybus/es2.c 			rpc->req->type, retval);
req                24 drivers/greybus/hd.c int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
req                29 drivers/greybus/hd.c 	return hd->driver->output(hd, req, size, cmd, async);
req                72 drivers/hid/hid-u2fzero.c static int u2fzero_send(struct u2fzero_device *dev, struct u2f_hid_report *req)
req                78 drivers/hid/hid-u2fzero.c 	memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
req               105 drivers/hid/hid-u2fzero.c 			struct u2f_hid_report *req,
req               114 drivers/hid/hid-u2fzero.c 	memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
req               153 drivers/hid/hid-u2fzero.c 	struct u2f_hid_report req = {
req               163 drivers/hid/hid-u2fzero.c 	return u2fzero_send(dev, &req);
req               181 drivers/hid/hid-u2fzero.c 	struct u2f_hid_report req = {
req               200 drivers/hid/hid-u2fzero.c 	ret = u2fzero_recv(dev, &req, &resp);
req               216 drivers/hid/uhid.c 	struct uhid_get_report_reply_req *req;
req               242 drivers/hid/uhid.c 	req = &uhid->report_buf.u.get_report_reply;
req               243 drivers/hid/uhid.c 	if (req->err) {
req               246 drivers/hid/uhid.c 		ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
req               247 drivers/hid/uhid.c 		memcpy(buf, req->data, ret);
req              1348 drivers/hv/hv_balloon.c 			struct dm_unballoon_request *req)
req              1350 drivers/hv/hv_balloon.c 	union dm_mem_page_range *range_array = req->range_array;
req              1351 drivers/hv/hv_balloon.c 	int range_count = req->range_count;
req              1364 drivers/hv/hv_balloon.c 	if (req->more_pages == 1)
req               604 drivers/hwtracing/intel_th/core.c 	unsigned int req = 0;
req               672 drivers/hwtracing/intel_th/core.c 	if (subdev->type == INTEL_TH_SWITCH && !req) {
req               675 drivers/hwtracing/intel_th/core.c 			req++;
req                39 drivers/i2c/busses/i2c-opal.c static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req)
req                52 drivers/i2c/busses/i2c-opal.c 	rc = opal_i2c_request(token, bus_id, req);
req                77 drivers/i2c/busses/i2c-opal.c 	struct opal_i2c_request req;
req                83 drivers/i2c/busses/i2c-opal.c 	memset(&req, 0, sizeof(req));
req                86 drivers/i2c/busses/i2c-opal.c 		req.type = (msgs[0].flags & I2C_M_RD) ?
req                88 drivers/i2c/busses/i2c-opal.c 		req.addr = cpu_to_be16(msgs[0].addr);
req                89 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(msgs[0].len);
req                90 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf));
req                93 drivers/i2c/busses/i2c-opal.c 		req.type = (msgs[1].flags & I2C_M_RD) ?
req                95 drivers/i2c/busses/i2c-opal.c 		req.addr = cpu_to_be16(msgs[0].addr);
req                96 drivers/i2c/busses/i2c-opal.c 		req.subaddr_sz = msgs[0].len;
req                98 drivers/i2c/busses/i2c-opal.c 			req.subaddr = (req.subaddr << 8) | msgs[0].buf[i];
req                99 drivers/i2c/busses/i2c-opal.c 		req.subaddr = cpu_to_be32(req.subaddr);
req               100 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(msgs[1].len);
req               101 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf));
req               105 drivers/i2c/busses/i2c-opal.c 	rc = i2c_opal_send_request(opal_id, &req);
req               117 drivers/i2c/busses/i2c-opal.c 	struct opal_i2c_request req;
req               121 drivers/i2c/busses/i2c-opal.c 	memset(&req, 0, sizeof(req));
req               123 drivers/i2c/busses/i2c-opal.c 	req.addr = cpu_to_be16(addr);
req               126 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(&data->byte));
req               127 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(1);
req               130 drivers/i2c/busses/i2c-opal.c 		req.type = (read_write == I2C_SMBUS_READ) ?
req               134 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(&data->byte));
req               135 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(1);
req               136 drivers/i2c/busses/i2c-opal.c 		req.subaddr = cpu_to_be32(command);
req               137 drivers/i2c/busses/i2c-opal.c 		req.subaddr_sz = 1;
req               138 drivers/i2c/busses/i2c-opal.c 		req.type = (read_write == I2C_SMBUS_READ) ?
req               146 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(local));
req               147 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(2);
req               148 drivers/i2c/busses/i2c-opal.c 		req.subaddr = cpu_to_be32(command);
req               149 drivers/i2c/busses/i2c-opal.c 		req.subaddr_sz = 1;
req               150 drivers/i2c/busses/i2c-opal.c 		req.type = (read_write == I2C_SMBUS_READ) ?
req               154 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(&data->block[1]));
req               155 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(data->block[0]);
req               156 drivers/i2c/busses/i2c-opal.c 		req.subaddr = cpu_to_be32(command);
req               157 drivers/i2c/busses/i2c-opal.c 		req.subaddr_sz = 1;
req               158 drivers/i2c/busses/i2c-opal.c 		req.type = (read_write == I2C_SMBUS_READ) ?
req               165 drivers/i2c/busses/i2c-opal.c 	rc = i2c_opal_send_request(opal_id, &req);
req               140 drivers/i3c/device.c 			   const struct i3c_ibi_setup *req)
req               144 drivers/i3c/device.c 	if (!req->handler || !req->num_slots)
req               150 drivers/i3c/device.c 		ret = i3c_dev_request_ibi_locked(dev->desc, req);
req                24 drivers/i3c/internals.h 			       const struct i3c_ibi_setup *req);
req              2269 drivers/i3c/master.c 			   const struct i3c_ibi_setup *req)
req              2284 drivers/i3c/master.c 	pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
req              2290 drivers/i3c/master.c 	if (req->max_payload_len) {
req              2291 drivers/i3c/master.c 		pool->payload_buf = kcalloc(req->num_slots,
req              2292 drivers/i3c/master.c 					    req->max_payload_len, GFP_KERNEL);
req              2299 drivers/i3c/master.c 	for (i = 0; i < req->num_slots; i++) {
req              2303 drivers/i3c/master.c 		if (req->max_payload_len)
req              2305 drivers/i3c/master.c 					  (i * req->max_payload_len);
req              2594 drivers/i3c/master.c 			       const struct i3c_ibi_setup *req)
req              2612 drivers/i3c/master.c 	ibi->handler = req->handler;
req              2613 drivers/i3c/master.c 	ibi->max_payload_len = req->max_payload_len;
req              2614 drivers/i3c/master.c 	ibi->num_slots = req->num_slots;
req              2617 drivers/i3c/master.c 	ret = master->ops->request_ibi(dev, req);
req              1441 drivers/i3c/master/i3c-master-cdns.c 				       const struct i3c_ibi_setup *req)
req              1449 drivers/i3c/master/i3c-master-cdns.c 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
req               177 drivers/ide/ide-atapi.c 	struct scsi_request *req;
req               206 drivers/ide/ide-atapi.c 	req = scsi_req(sense_rq);
req               210 drivers/ide/ide-atapi.c 	scsi_req_init(req);
req               228 drivers/ide/ide-atapi.c 	req->cmd[0] = GPCMD_REQUEST_SENSE;
req               229 drivers/ide/ide-atapi.c 	req->cmd[4] = cmd_len;
req               231 drivers/ide/ide-atapi.c 		req->cmd[13] = REQ_IDETAPE_PC1;
req               536 drivers/ide/ide-cd.c 	struct scsi_request *req = scsi_req(rq);
req               539 drivers/ide/ide-cd.c 		req->cmd[0] = GPCMD_READ_10;
req               541 drivers/ide/ide-cd.c 		req->cmd[0] = GPCMD_WRITE_10;
req               546 drivers/ide/ide-cd.c 	req->cmd[2] = (block >> 24) & 0xff;
req               547 drivers/ide/ide-cd.c 	req->cmd[3] = (block >> 16) & 0xff;
req               548 drivers/ide/ide-cd.c 	req->cmd[4] = (block >>  8) & 0xff;
req               549 drivers/ide/ide-cd.c 	req->cmd[5] = block & 0xff;
req               554 drivers/ide/ide-cd.c 	req->cmd[7] = (blocks >> 8) & 0xff;
req               555 drivers/ide/ide-cd.c 	req->cmd[8] = blocks & 0xff;
req               556 drivers/ide/ide-cd.c 	req->cmd_len = 10;
req               748 drivers/ide/ide-probe.c 	struct ide_request *req = blk_mq_rq_to_pdu(rq);
req               750 drivers/ide/ide-probe.c 	req->special = NULL;
req               751 drivers/ide/ide-probe.c 	scsi_req_init(&req->sreq);
req               752 drivers/ide/ide-probe.c 	req->sreq.sense = req->sense;
req               573 drivers/ide/ide-tape.c 	struct scsi_request *req = scsi_req(rq);
req               577 drivers/ide/ide-tape.c 		      req->cmd[0], (unsigned long long)blk_rq_pos(rq),
req               597 drivers/ide/ide-tape.c 	    (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
req               614 drivers/ide/ide-tape.c 			if (req->cmd[13] & REQ_IDETAPE_PC2) {
req               631 drivers/ide/ide-tape.c 	if (req->cmd[13] & REQ_IDETAPE_READ) {
req               636 drivers/ide/ide-tape.c 	if (req->cmd[13] & REQ_IDETAPE_WRITE) {
req               641 drivers/ide/ide-tape.c 	if (req->cmd[13] & REQ_IDETAPE_PC1) {
req               643 drivers/ide/ide-tape.c 		req->cmd[13] &= ~(REQ_IDETAPE_PC1);
req               644 drivers/ide/ide-tape.c 		req->cmd[13] |= REQ_IDETAPE_PC2;
req               647 drivers/ide/ide-tape.c 	if (req->cmd[13] & REQ_IDETAPE_PC2) {
req               113 drivers/iio/accel/mma9551_core.c 	struct mma9551_mbox_request req;
req               125 drivers/iio/accel/mma9551_core.c 	req.start_mbox = 0;
req               126 drivers/iio/accel/mma9551_core.c 	req.app_id = app_id;
req               127 drivers/iio/accel/mma9551_core.c 	req.cmd_off = command | (offset >> 8);
req               128 drivers/iio/accel/mma9551_core.c 	req.lower_off = offset;
req               131 drivers/iio/accel/mma9551_core.c 		req.nbytes = num_inbytes;
req               133 drivers/iio/accel/mma9551_core.c 		req.nbytes = num_outbytes;
req               135 drivers/iio/accel/mma9551_core.c 		memcpy(req.buf, inbytes, num_inbytes);
req               140 drivers/iio/accel/mma9551_core.c 	out.buf = (u8 *)&req;
req               535 drivers/iio/adc/cpcap-adc.c 				 struct cpcap_adc_request *req)
req               545 drivers/iio/adc/cpcap-adc.c 	switch (req->channel) {
req               564 drivers/iio/adc/cpcap-adc.c 	switch (req->timing) {
req               603 drivers/iio/adc/cpcap-adc.c 	if (req->timing == CPCAP_ADC_TIMING_IMM) {
req               630 drivers/iio/adc/cpcap-adc.c 				struct cpcap_adc_request *req)
req               634 drivers/iio/adc/cpcap-adc.c 	req->timing = CPCAP_ADC_TIMING_IMM;
req               638 drivers/iio/adc/cpcap-adc.c 		cpcap_adc_setup_bank(ddata, req);
req               672 drivers/iio/adc/cpcap-adc.c static void cpcap_adc_phase(struct cpcap_adc_request *req)
req               674 drivers/iio/adc/cpcap-adc.c 	const struct cpcap_adc_conversion_tbl *conv_tbl = req->conv_tbl;
req               675 drivers/iio/adc/cpcap-adc.c 	const struct cpcap_adc_phasing_tbl *phase_tbl = req->phase_tbl;
req               676 drivers/iio/adc/cpcap-adc.c 	int index = req->channel;
req               679 drivers/iio/adc/cpcap-adc.c 	switch (req->channel) {
req               682 drivers/iio/adc/cpcap-adc.c 		index = req->bank_index;
req               683 drivers/iio/adc/cpcap-adc.c 		req->result -= phase_tbl[index].offset;
req               684 drivers/iio/adc/cpcap-adc.c 		req->result -= CPCAP_FOUR_POINT_TWO_ADC;
req               685 drivers/iio/adc/cpcap-adc.c 		req->result *= phase_tbl[index].multiplier;
req               688 drivers/iio/adc/cpcap-adc.c 		req->result /= phase_tbl[index].divider;
req               689 drivers/iio/adc/cpcap-adc.c 		req->result += CPCAP_FOUR_POINT_TWO_ADC;
req               692 drivers/iio/adc/cpcap-adc.c 		index = req->bank_index;
req               695 drivers/iio/adc/cpcap-adc.c 		req->result += conv_tbl[index].cal_offset;
req               696 drivers/iio/adc/cpcap-adc.c 		req->result += conv_tbl[index].align_offset;
req               697 drivers/iio/adc/cpcap-adc.c 		req->result *= phase_tbl[index].multiplier;
req               700 drivers/iio/adc/cpcap-adc.c 		req->result /= phase_tbl[index].divider;
req               701 drivers/iio/adc/cpcap-adc.c 		req->result += phase_tbl[index].offset;
req               705 drivers/iio/adc/cpcap-adc.c 	if (req->result < phase_tbl[index].min)
req               706 drivers/iio/adc/cpcap-adc.c 		req->result = phase_tbl[index].min;
req               707 drivers/iio/adc/cpcap-adc.c 	else if (req->result > phase_tbl[index].max)
req               708 drivers/iio/adc/cpcap-adc.c 		req->result = phase_tbl[index].max;
req               744 drivers/iio/adc/cpcap-adc.c static void cpcap_adc_convert(struct cpcap_adc_request *req)
req               746 drivers/iio/adc/cpcap-adc.c 	const struct cpcap_adc_conversion_tbl *conv_tbl = req->conv_tbl;
req               747 drivers/iio/adc/cpcap-adc.c 	int index = req->channel;
req               750 drivers/iio/adc/cpcap-adc.c 	switch (req->channel) {
req               766 drivers/iio/adc/cpcap-adc.c 	if ((req->channel == CPCAP_ADC_AD0) ||
req               767 drivers/iio/adc/cpcap-adc.c 	    (req->channel == CPCAP_ADC_AD3)) {
req               768 drivers/iio/adc/cpcap-adc.c 		req->result =
req               769 drivers/iio/adc/cpcap-adc.c 			cpcap_adc_table_to_millicelcius(req->result);
req               775 drivers/iio/adc/cpcap-adc.c 	req->result *= conv_tbl[index].multiplier;
req               778 drivers/iio/adc/cpcap-adc.c 	req->result /= conv_tbl[index].divider;
req               779 drivers/iio/adc/cpcap-adc.c 	req->result += conv_tbl[index].conv_offset;
req               787 drivers/iio/adc/cpcap-adc.c 				      struct cpcap_adc_request *req)
req               807 drivers/iio/adc/cpcap-adc.c 	addr = CPCAP_REG_ADCD0 + req->bank_index * 4;
req               809 drivers/iio/adc/cpcap-adc.c 	error = regmap_read(ddata->reg, addr, &req->result);
req               813 drivers/iio/adc/cpcap-adc.c 	req->result &= 0x3ff;
req               814 drivers/iio/adc/cpcap-adc.c 	cpcap_adc_phase(req);
req               815 drivers/iio/adc/cpcap-adc.c 	cpcap_adc_convert(req);
req               820 drivers/iio/adc/cpcap-adc.c static int cpcap_adc_init_request(struct cpcap_adc_request *req,
req               823 drivers/iio/adc/cpcap-adc.c 	req->channel = channel;
req               824 drivers/iio/adc/cpcap-adc.c 	req->phase_tbl = bank_phasing;
req               825 drivers/iio/adc/cpcap-adc.c 	req->conv_tbl = bank_conversion;
req               829 drivers/iio/adc/cpcap-adc.c 		req->bank_index = channel;
req               832 drivers/iio/adc/cpcap-adc.c 		req->bank_index = channel - 8;
req               835 drivers/iio/adc/cpcap-adc.c 		req->bank_index = CPCAP_ADC_BATTP;
req               838 drivers/iio/adc/cpcap-adc.c 		req->bank_index = CPCAP_ADC_BATTI;
req               868 drivers/iio/adc/cpcap-adc.c 	struct cpcap_adc_request req;
req               871 drivers/iio/adc/cpcap-adc.c 	error = cpcap_adc_init_request(&req, chan->channel);
req               878 drivers/iio/adc/cpcap-adc.c 		error = cpcap_adc_start_bank(ddata, &req);
req               891 drivers/iio/adc/cpcap-adc.c 		error = cpcap_adc_start_bank(ddata, &req);
req               898 drivers/iio/adc/cpcap-adc.c 							   &req.result);
req               902 drivers/iio/adc/cpcap-adc.c 			error = cpcap_adc_read_bank_scaled(ddata, &req);
req               910 drivers/iio/adc/cpcap-adc.c 		*val = req.result;
req               165 drivers/iio/adc/qcom-spmi-iadc.c 	u8 mode, sta1, chan, dig, en, req;
req               180 drivers/iio/adc/qcom-spmi-iadc.c 	ret = iadc_read(iadc, IADC_CONV_REQ, &req);
req               194 drivers/iio/adc/qcom-spmi-iadc.c 		mode, en, chan, dig, req, sta1);
req               176 drivers/iio/adc/qcom-spmi-vadc.c 	u8 mode, sta1, chan, dig, en, req;
req               191 drivers/iio/adc/qcom-spmi-vadc.c 	ret = vadc_read(vadc, VADC_CONV_REQ, &req);
req               205 drivers/iio/adc/qcom-spmi-vadc.c 		mode, en, chan, dig, req, sta1);
req               172 drivers/iio/adc/twl4030-madc.c static int twl4030_madc_conversion(struct twl4030_madc_request *req);
req               179 drivers/iio/adc/twl4030-madc.c 	struct twl4030_madc_request req;
req               182 drivers/iio/adc/twl4030-madc.c 	req.method = madc->use_second_irq ? TWL4030_MADC_SW2 : TWL4030_MADC_SW1;
req               184 drivers/iio/adc/twl4030-madc.c 	req.channels = BIT(chan->channel);
req               185 drivers/iio/adc/twl4030-madc.c 	req.active = false;
req               186 drivers/iio/adc/twl4030-madc.c 	req.type = TWL4030_MADC_WAIT;
req               187 drivers/iio/adc/twl4030-madc.c 	req.raw = !(mask == IIO_CHAN_INFO_PROCESSED);
req               188 drivers/iio/adc/twl4030-madc.c 	req.do_avg = (mask == IIO_CHAN_INFO_AVERAGE_RAW);
req               190 drivers/iio/adc/twl4030-madc.c 	ret = twl4030_madc_conversion(&req);
req               194 drivers/iio/adc/twl4030-madc.c 	*val = req.rbuf[chan->channel];
req               610 drivers/iio/adc/twl4030-madc.c static int twl4030_madc_conversion(struct twl4030_madc_request *req)
req               615 drivers/iio/adc/twl4030-madc.c 	if (!req || !twl4030_madc)
req               619 drivers/iio/adc/twl4030-madc.c 	if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
req               624 drivers/iio/adc/twl4030-madc.c 	if (twl4030_madc->requests[req->method].active) {
req               628 drivers/iio/adc/twl4030-madc.c 	method = &twl4030_conversion_methods[req->method];
req               630 drivers/iio/adc/twl4030-madc.c 	ret = twl_i2c_write_u16(TWL4030_MODULE_MADC, req->channels, method->sel);
req               637 drivers/iio/adc/twl4030-madc.c 	if (req->do_avg) {
req               638 drivers/iio/adc/twl4030-madc.c 		ret = twl_i2c_write_u16(TWL4030_MODULE_MADC, req->channels,
req               648 drivers/iio/adc/twl4030-madc.c 	if (req->method == TWL4030_MADC_RT) {
req               652 drivers/iio/adc/twl4030-madc.c 	ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
req               655 drivers/iio/adc/twl4030-madc.c 	twl4030_madc->requests[req->method].active = 1;
req               659 drivers/iio/adc/twl4030-madc.c 		twl4030_madc->requests[req->method].active = 0;
req               663 drivers/iio/adc/twl4030-madc.c 					 req->channels, req->rbuf, req->raw);
req               664 drivers/iio/adc/twl4030-madc.c 	twl4030_madc->requests[req->method].active = 0;
req               102 drivers/infiniband/core/addr.c 	struct addr_req *req;
req               115 drivers/infiniband/core/addr.c 	list_for_each_entry(req, &req_list, list) {
req               116 drivers/infiniband/core/addr.c 		if (nlh->nlmsg_seq != req->seq)
req               119 drivers/infiniband/core/addr.c 		rdma_addr_set_dgid(req->addr, &gid);
req               120 drivers/infiniband/core/addr.c 		req->status = 0;
req               297 drivers/infiniband/core/addr.c static void set_timeout(struct addr_req *req, unsigned long time)
req               305 drivers/infiniband/core/addr.c 	mod_delayed_work(addr_wq, &req->work, delay);
req               308 drivers/infiniband/core/addr.c static void queue_req(struct addr_req *req)
req               311 drivers/infiniband/core/addr.c 	list_add_tail(&req->list, &req_list);
req               312 drivers/infiniband/core/addr.c 	set_timeout(req, req->timeout);
req               620 drivers/infiniband/core/addr.c 	struct addr_req *req;
req               623 drivers/infiniband/core/addr.c 	req = container_of(_work, struct addr_req, work.work);
req               625 drivers/infiniband/core/addr.c 	if (req->status == -ENODATA) {
req               626 drivers/infiniband/core/addr.c 		src_in = (struct sockaddr *)&req->src_addr;
req               627 drivers/infiniband/core/addr.c 		dst_in = (struct sockaddr *)&req->dst_addr;
req               628 drivers/infiniband/core/addr.c 		req->status = addr_resolve(src_in, dst_in, req->addr,
req               629 drivers/infiniband/core/addr.c 					   true, req->resolve_by_gid_attr,
req               630 drivers/infiniband/core/addr.c 					   req->seq);
req               631 drivers/infiniband/core/addr.c 		if (req->status && time_after_eq(jiffies, req->timeout)) {
req               632 drivers/infiniband/core/addr.c 			req->status = -ETIMEDOUT;
req               633 drivers/infiniband/core/addr.c 		} else if (req->status == -ENODATA) {
req               636 drivers/infiniband/core/addr.c 			if (!list_empty(&req->list))
req               637 drivers/infiniband/core/addr.c 				set_timeout(req, req->timeout);
req               643 drivers/infiniband/core/addr.c 	req->callback(req->status, (struct sockaddr *)&req->src_addr,
req               644 drivers/infiniband/core/addr.c 		req->addr, req->context);
req               645 drivers/infiniband/core/addr.c 	req->callback = NULL;
req               648 drivers/infiniband/core/addr.c 	if (!list_empty(&req->list)) {
req               654 drivers/infiniband/core/addr.c 		cancel_delayed_work(&req->work);
req               655 drivers/infiniband/core/addr.c 		list_del_init(&req->list);
req               656 drivers/infiniband/core/addr.c 		kfree(req);
req               668 drivers/infiniband/core/addr.c 	struct addr_req *req;
req               671 drivers/infiniband/core/addr.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               672 drivers/infiniband/core/addr.c 	if (!req)
req               675 drivers/infiniband/core/addr.c 	src_in = (struct sockaddr *) &req->src_addr;
req               676 drivers/infiniband/core/addr.c 	dst_in = (struct sockaddr *) &req->dst_addr;
req               690 drivers/infiniband/core/addr.c 	req->addr = addr;
req               691 drivers/infiniband/core/addr.c 	req->callback = callback;
req               692 drivers/infiniband/core/addr.c 	req->context = context;
req               693 drivers/infiniband/core/addr.c 	req->resolve_by_gid_attr = resolve_by_gid_attr;
req               694 drivers/infiniband/core/addr.c 	INIT_DELAYED_WORK(&req->work, process_one_req);
req               695 drivers/infiniband/core/addr.c 	req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
req               697 drivers/infiniband/core/addr.c 	req->status = addr_resolve(src_in, dst_in, addr, true,
req               698 drivers/infiniband/core/addr.c 				   req->resolve_by_gid_attr, req->seq);
req               699 drivers/infiniband/core/addr.c 	switch (req->status) {
req               701 drivers/infiniband/core/addr.c 		req->timeout = jiffies;
req               702 drivers/infiniband/core/addr.c 		queue_req(req);
req               705 drivers/infiniband/core/addr.c 		req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
req               706 drivers/infiniband/core/addr.c 		queue_req(req);
req               709 drivers/infiniband/core/addr.c 		ret = req->status;
req               714 drivers/infiniband/core/addr.c 	kfree(req);
req               768 drivers/infiniband/core/addr.c 	struct addr_req *req, *temp_req;
req               772 drivers/infiniband/core/addr.c 	list_for_each_entry_safe(req, temp_req, &req_list, list) {
req               773 drivers/infiniband/core/addr.c 		if (req->addr == addr) {
req               778 drivers/infiniband/core/addr.c 			list_del_init(&req->list);
req               779 drivers/infiniband/core/addr.c 			found = req;
req               850 drivers/infiniband/core/addr.c 	struct addr_req *req;
req               857 drivers/infiniband/core/addr.c 			list_for_each_entry(req, &req_list, list)
req               858 drivers/infiniband/core/addr.c 				set_timeout(req, jiffies);
req               184 drivers/infiniband/core/cm.c static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
req               713 drivers/infiniband/core/cma.c 			      struct cma_req_info *req)
req               724 drivers/infiniband/core/cma.c 	if (rdma_protocol_roce(req->device, req->port))
req               731 drivers/infiniband/core/cma.c 	gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
req               732 drivers/infiniband/core/cma.c 	sgid_attr = cma_validate_port(req->device, req->port,
req               737 drivers/infiniband/core/cma.c 	id_priv->id.port_num = req->port;
req              1369 drivers/infiniband/core/cma.c 			     struct cma_req_info *req)
req              1378 drivers/infiniband/core/cma.c 		req->device	= req_param->listen_id->device;
req              1379 drivers/infiniband/core/cma.c 		req->port	= req_param->port;
req              1380 drivers/infiniband/core/cma.c 		memcpy(&req->local_gid, &req_param->primary_path->sgid,
req              1381 drivers/infiniband/core/cma.c 		       sizeof(req->local_gid));
req              1382 drivers/infiniband/core/cma.c 		req->has_gid	= true;
req              1383 drivers/infiniband/core/cma.c 		req->service_id = req_param->primary_path->service_id;
req              1384 drivers/infiniband/core/cma.c 		req->pkey	= be16_to_cpu(req_param->primary_path->pkey);
req              1385 drivers/infiniband/core/cma.c 		if (req->pkey != req_param->bth_pkey)
req              1388 drivers/infiniband/core/cma.c 					    req_param->bth_pkey, req->pkey);
req              1391 drivers/infiniband/core/cma.c 		req->device	= sidr_param->listen_id->device;
req              1392 drivers/infiniband/core/cma.c 		req->port	= sidr_param->port;
req              1393 drivers/infiniband/core/cma.c 		req->has_gid	= false;
req              1394 drivers/infiniband/core/cma.c 		req->service_id	= sidr_param->service_id;
req              1395 drivers/infiniband/core/cma.c 		req->pkey	= sidr_param->pkey;
req              1396 drivers/infiniband/core/cma.c 		if (req->pkey != sidr_param->bth_pkey)
req              1399 drivers/infiniband/core/cma.c 					    sidr_param->bth_pkey, req->pkey);
req              1510 drivers/infiniband/core/cma.c 					  struct cma_req_info *req)
req              1513 drivers/infiniband/core/cma.c 			(struct sockaddr *)&req->listen_addr_storage;
req              1514 drivers/infiniband/core/cma.c 	struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
req              1516 drivers/infiniband/core/cma.c 	const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
req              1520 drivers/infiniband/core/cma.c 			       req->service_id);
req              1524 drivers/infiniband/core/cma.c 	if (rdma_protocol_roce(req->device, req->port))
req              1527 drivers/infiniband/core/cma.c 		net_dev = ib_get_net_dev_by_params(req->device, req->port,
req              1528 drivers/infiniband/core/cma.c 						   req->pkey,
req              1585 drivers/infiniband/core/cma.c static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
req              1588 drivers/infiniband/core/cma.c 			(const struct sockaddr *)&req->listen_addr_storage;
req              1598 drivers/infiniband/core/cma.c 			      const struct cma_req_info *req)
req              1604 drivers/infiniband/core/cma.c 		return (!id->port_num || id->port_num == req->port) &&
req              1611 drivers/infiniband/core/cma.c 	if (!cma_is_req_ipv6_ll(req))
req              1629 drivers/infiniband/core/cma.c 		const struct cma_req_info *req,
req              1640 drivers/infiniband/core/cma.c 			    cma_match_net_dev(&id_priv->id, net_dev, req))
req              1647 drivers/infiniband/core/cma.c 						      net_dev, req))
req              1659 drivers/infiniband/core/cma.c 		     struct cma_req_info *req,
req              1666 drivers/infiniband/core/cma.c 	err = cma_save_req_info(ib_event, req);
req              1670 drivers/infiniband/core/cma.c 	*net_dev = cma_get_net_dev(ib_event, req);
req              1708 drivers/infiniband/core/cma.c 				 (struct sockaddr *)&req->listen_addr_storage,
req              1709 drivers/infiniband/core/cma.c 				 (struct sockaddr *)&req->src_addr_storage)) {
req              1716 drivers/infiniband/core/cma.c 				rdma_ps_from_service_id(req->service_id),
req              1717 drivers/infiniband/core/cma.c 				cma_port_from_service_id(req->service_id));
req              1718 drivers/infiniband/core/cma.c 	id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
req              2140 drivers/infiniband/core/cma.c 	struct cma_req_info req = {};
req              2145 drivers/infiniband/core/cma.c 	listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
req              2178 drivers/infiniband/core/cma.c 	ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
req              3741 drivers/infiniband/core/cma.c 	struct ib_cm_sidr_req_param req;
req              3747 drivers/infiniband/core/cma.c 	memset(&req, 0, sizeof req);
req              3749 drivers/infiniband/core/cma.c 	req.private_data_len = offset + conn_param->private_data_len;
req              3750 drivers/infiniband/core/cma.c 	if (req.private_data_len < conn_param->private_data_len)
req              3753 drivers/infiniband/core/cma.c 	if (req.private_data_len) {
req              3754 drivers/infiniband/core/cma.c 		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
req              3769 drivers/infiniband/core/cma.c 		req.private_data = private_data;
req              3780 drivers/infiniband/core/cma.c 	req.path = id_priv->id.route.path_rec;
req              3781 drivers/infiniband/core/cma.c 	req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
req              3782 drivers/infiniband/core/cma.c 	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req              3783 drivers/infiniband/core/cma.c 	req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req              3784 drivers/infiniband/core/cma.c 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
req              3786 drivers/infiniband/core/cma.c 	ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
req              3799 drivers/infiniband/core/cma.c 	struct ib_cm_req_param req;
req              3806 drivers/infiniband/core/cma.c 	memset(&req, 0, sizeof req);
req              3808 drivers/infiniband/core/cma.c 	req.private_data_len = offset + conn_param->private_data_len;
req              3809 drivers/infiniband/core/cma.c 	if (req.private_data_len < conn_param->private_data_len)
req              3812 drivers/infiniband/core/cma.c 	if (req.private_data_len) {
req              3813 drivers/infiniband/core/cma.c 		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
req              3836 drivers/infiniband/core/cma.c 		req.private_data = private_data;
req              3839 drivers/infiniband/core/cma.c 	req.primary_path = &route->path_rec[0];
req              3841 drivers/infiniband/core/cma.c 		req.alternate_path = &route->path_rec[1];
req              3843 drivers/infiniband/core/cma.c 	req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
req              3845 drivers/infiniband/core/cma.c 	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req              3846 drivers/infiniband/core/cma.c 	req.qp_num = id_priv->qp_num;
req              3847 drivers/infiniband/core/cma.c 	req.qp_type = id_priv->id.qp_type;
req              3848 drivers/infiniband/core/cma.c 	req.starting_psn = id_priv->seq_num;
req              3849 drivers/infiniband/core/cma.c 	req.responder_resources = conn_param->responder_resources;
req              3850 drivers/infiniband/core/cma.c 	req.initiator_depth = conn_param->initiator_depth;
req              3851 drivers/infiniband/core/cma.c 	req.flow_control = conn_param->flow_control;
req              3852 drivers/infiniband/core/cma.c 	req.retry_count = min_t(u8, 7, conn_param->retry_count);
req              3853 drivers/infiniband/core/cma.c 	req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
req              3854 drivers/infiniband/core/cma.c 	req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req              3855 drivers/infiniband/core/cma.c 	req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req              3856 drivers/infiniband/core/cma.c 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
req              3857 drivers/infiniband/core/cma.c 	req.srq = id_priv->srq ? 1 : 0;
req              3859 drivers/infiniband/core/cma.c 	ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
req               668 drivers/infiniband/core/user_mad.c 	struct ib_mad_reg_req req;
req               708 drivers/infiniband/core/user_mad.c 		memset(&req, 0, sizeof(req));
req               709 drivers/infiniband/core/user_mad.c 		req.mgmt_class         = ureq.mgmt_class;
req               710 drivers/infiniband/core/user_mad.c 		req.mgmt_class_version = ureq.mgmt_class_version;
req               711 drivers/infiniband/core/user_mad.c 		memcpy(req.oui, ureq.oui, sizeof req.oui);
req               718 drivers/infiniband/core/user_mad.c 				req.method_mask[i] =
req               721 drivers/infiniband/core/user_mad.c 			memcpy(req.method_mask, ureq.method_mask,
req               722 drivers/infiniband/core/user_mad.c 			       sizeof req.method_mask);
req               727 drivers/infiniband/core/user_mad.c 				      ureq.mgmt_class ? &req : NULL,
req               770 drivers/infiniband/core/user_mad.c 	struct ib_mad_reg_req req;
req               824 drivers/infiniband/core/user_mad.c 		memset(&req, 0, sizeof(req));
req               825 drivers/infiniband/core/user_mad.c 		req.mgmt_class         = ureq.mgmt_class;
req               826 drivers/infiniband/core/user_mad.c 		req.mgmt_class_version = ureq.mgmt_class_version;
req               834 drivers/infiniband/core/user_mad.c 		req.oui[2] =  ureq.oui & 0x0000ff;
req               835 drivers/infiniband/core/user_mad.c 		req.oui[1] = (ureq.oui & 0x00ff00) >> 8;
req               836 drivers/infiniband/core/user_mad.c 		req.oui[0] = (ureq.oui & 0xff0000) >> 16;
req               837 drivers/infiniband/core/user_mad.c 		memcpy(req.method_mask, ureq.method_mask,
req               838 drivers/infiniband/core/user_mad.c 			sizeof(req.method_mask));
req               843 drivers/infiniband/core/user_mad.c 				      ureq.mgmt_class ? &req : NULL,
req                91 drivers/infiniband/core/uverbs_cmd.c static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
req                94 drivers/infiniband/core/uverbs_cmd.c 	if (copy_from_user(req, attrs->ucore.inbuf,
req                99 drivers/infiniband/core/uverbs_cmd.c 		memset(req + attrs->ucore.inlen, 0,
req               132 drivers/infiniband/core/uverbs_cmd.c 				void *req,
req               138 drivers/infiniband/core/uverbs_cmd.c 	if (copy_from_user(req, attrs->ucore.inbuf, req_len))
req              3710 drivers/infiniband/core/uverbs_cmd.c #define UAPI_DEF_WRITE_IO(req, resp)                                           \
req              3712 drivers/infiniband/core/uverbs_cmd.c 			  BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
req              3713 drivers/infiniband/core/uverbs_cmd.c 			  BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) !=    \
req              3715 drivers/infiniband/core/uverbs_cmd.c 	.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
req              3717 drivers/infiniband/core/uverbs_cmd.c #define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
req              3719 drivers/infiniband/core/uverbs_cmd.c #define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
req              3720 drivers/infiniband/core/uverbs_cmd.c 	UAPI_DEF_WRITE_IO(req, resp),                                          \
req              3723 drivers/infiniband/core/uverbs_cmd.c 			BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
req              3724 drivers/infiniband/core/uverbs_cmd.c 					  sizeof(req)) +                       \
req              3728 drivers/infiniband/core/uverbs_cmd.c #define UAPI_DEF_WRITE_UDATA_I(req)                                            \
req              3729 drivers/infiniband/core/uverbs_cmd.c 	UAPI_DEF_WRITE_I(req),                                                 \
req              3731 drivers/infiniband/core/uverbs_cmd.c 			1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
req              3732 drivers/infiniband/core/uverbs_cmd.c 					      sizeof(req))
req              3738 drivers/infiniband/core/uverbs_cmd.c #define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
req              3740 drivers/infiniband/core/uverbs_cmd.c 	.write.req_size = offsetofend(req, req_last_member),                   \
req              3743 drivers/infiniband/core/uverbs_cmd.c #define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
req              3744 drivers/infiniband/core/uverbs_cmd.c 	.write.req_size = offsetofend(req, req_last_member)
req              2558 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		struct bnxt_re_cq_req req;
req              2561 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
req              2566 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		cq->umem = ib_umem_get(udata, req.cq_va,
req               376 drivers/infiniband/hw/bnxt_re/main.c 	struct hwrm_ring_free_input req = {0};
req               386 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
req               387 drivers/infiniband/hw/bnxt_re/main.c 	req.ring_type = type;
req               388 drivers/infiniband/hw/bnxt_re/main.c 	req.ring_id = cpu_to_le16(fw_ring_id);
req               389 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
req               394 drivers/infiniband/hw/bnxt_re/main.c 			"Failed to free HW ring:%d :%#x", req.ring_id, rc);
req               403 drivers/infiniband/hw/bnxt_re/main.c 	struct hwrm_ring_alloc_input req = {0};
req               412 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
req               413 drivers/infiniband/hw/bnxt_re/main.c 	req.enables = 0;
req               414 drivers/infiniband/hw/bnxt_re/main.c 	req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
req               417 drivers/infiniband/hw/bnxt_re/main.c 		req.page_size = BNXT_PAGE_SHIFT;
req               418 drivers/infiniband/hw/bnxt_re/main.c 		req.page_tbl_depth = 1;
req               420 drivers/infiniband/hw/bnxt_re/main.c 	req.fbo = 0;
req               422 drivers/infiniband/hw/bnxt_re/main.c 	req.logical_id = cpu_to_le16(map_index);
req               423 drivers/infiniband/hw/bnxt_re/main.c 	req.length = cpu_to_le32(ring_mask + 1);
req               424 drivers/infiniband/hw/bnxt_re/main.c 	req.ring_type = type;
req               425 drivers/infiniband/hw/bnxt_re/main.c 	req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
req               426 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
req               439 drivers/infiniband/hw/bnxt_re/main.c 	struct hwrm_stat_ctx_free_input req = {0};
req               448 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
req               449 drivers/infiniband/hw/bnxt_re/main.c 	req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
req               450 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
req               451 drivers/infiniband/hw/bnxt_re/main.c 			    sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
req               465 drivers/infiniband/hw/bnxt_re/main.c 	struct hwrm_stat_ctx_alloc_input req = {0};
req               477 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req               478 drivers/infiniband/hw/bnxt_re/main.c 	req.update_period_ms = cpu_to_le32(1000);
req               479 drivers/infiniband/hw/bnxt_re/main.c 	req.stats_dma_addr = cpu_to_le64(dma_map);
req               480 drivers/infiniband/hw/bnxt_re/main.c 	req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
req               481 drivers/infiniband/hw/bnxt_re/main.c 	req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
req               482 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
req              1065 drivers/infiniband/hw/bnxt_re/main.c 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
req              1078 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
req              1082 drivers/infiniband/hw/bnxt_re/main.c 	req.flags = cpu_to_le32(flags);
req              1083 drivers/infiniband/hw/bnxt_re/main.c 	req.port_id = bp->pf.port_id;
req              1085 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
req              1255 drivers/infiniband/hw/bnxt_re/main.c 	struct hwrm_ver_get_input req = {0};
req              1260 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
req              1262 drivers/infiniband/hw/bnxt_re/main.c 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req              1263 drivers/infiniband/hw/bnxt_re/main.c 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
req              1264 drivers/infiniband/hw/bnxt_re/main.c 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
req              1265 drivers/infiniband/hw/bnxt_re/main.c 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
req               514 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_destroy_srq req;
req               519 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
req               522 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.srq_cid = cpu_to_le32(srq->id);
req               524 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
req               536 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_create_srq req;
req               557 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
req               560 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.dpi = cpu_to_le32(srq->dpi->dpi);
req               561 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
req               563 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
req               565 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
req               581 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req               582 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pd_id = cpu_to_le32(srq->pd->id);
req               583 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
req               585 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               637 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_query_srq req;
req               644 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
req               645 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.srq_cid = cpu_to_le32(srq->id);
req               652 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               725 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_create_qp1 req;
req               734 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
req               737 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.type = qp->type;
req               738 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.dpi = cpu_to_le32(qp->dpi->dpi);
req               739 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_handle = cpu_to_le64(qp->qp_handle);
req               756 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req               757 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_pg_size_sq_lvl =
req               775 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.scq_cid = cpu_to_le32(qp->scq->id);
req               796 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req               797 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rq_pg_size_rq_lvl =
req               814 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			req.rcq_cid = cpu_to_le32(qp->rcq->id);
req               823 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_flags = cpu_to_le32(qp_flags);
req               824 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req               825 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
req               827 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_fwo_sq_sge =
req               830 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_fwo_rq_sge =
req               834 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pd_id = cpu_to_le32(qp->pd->id);
req               836 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               872 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_create_qp req;
req               877 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
req               880 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.type = qp->type;
req               881 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.dpi = cpu_to_le32(qp->dpi->dpi);
req               882 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_handle = cpu_to_le64(qp->qp_handle);
req               931 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req               932 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_pg_size_sq_lvl =
req               950 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.scq_cid = cpu_to_le32(qp->scq->id);
req               975 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req               976 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rq_pg_size_rq_lvl =
req               996 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			req.srq_cid = cpu_to_le32(qp->srq->id);
req              1001 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rcq_cid = cpu_to_le32(qp->rcq->id);
req              1002 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_flags = cpu_to_le32(qp_flags);
req              1003 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req              1004 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
req              1017 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_fwo_sq_sge = cpu_to_le16(
req              1021 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_fwo_rq_sge = cpu_to_le16(
req              1039 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
req              1056 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
req              1058 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pd_id = cpu_to_le32(qp->pd->id);
req              1060 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req              1185 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_modify_qp req;
req              1192 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
req              1197 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.modify_mask = cpu_to_le32(qp->modify_flags);
req              1198 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_cid = cpu_to_le32(qp->id);
req              1200 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.network_type_en_sqd_async_notify_new_state =
req              1205 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
req              1208 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.access = qp->access;
req              1213 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			req.pkey = cpu_to_le16(pkey);
req              1216 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.qkey = cpu_to_le32(qp->qkey);
req              1220 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.dgid[0] = cpu_to_le32(temp32[0]);
req              1221 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.dgid[1] = cpu_to_le32(temp32[1]);
req              1222 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.dgid[2] = cpu_to_le32(temp32[2]);
req              1223 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.dgid[3] = cpu_to_le32(temp32[3]);
req              1226 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
req              1229 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
req              1233 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.hop_limit = qp->ah.hop_limit;
req              1236 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.traffic_class = qp->ah.traffic_class;
req              1239 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		memcpy(req.dest_mac, qp->ah.dmac, 6);
req              1242 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.path_mtu = qp->path_mtu;
req              1245 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.timeout = qp->timeout;
req              1248 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.retry_cnt = qp->retry_cnt;
req              1251 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rnr_retry = qp->rnr_retry;
req              1254 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.min_rnr_timer = qp->min_rnr_timer;
req              1257 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rq_psn = cpu_to_le32(qp->rq.psn);
req              1260 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.sq_psn = cpu_to_le32(qp->sq.psn);
req              1263 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.max_rd_atomic =
req              1267 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.max_dest_rd_atomic =
req              1270 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
req              1271 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
req              1272 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
req              1273 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
req              1274 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
req              1276 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
req              1278 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
req              1280 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req              1291 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_query_qp req;
req              1299 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
req              1306 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_cid = cpu_to_le32(qp->id);
req              1307 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
req              1308 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req              1417 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_destroy_qp req;
req              1425 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
req              1427 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.qp_cid = cpu_to_le32(qp->id);
req              1428 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req              1930 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_create_cq req;
req              1944 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
req              1951 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.dpi = cpu_to_le32(cq->dpi->dpi);
req              1952 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.cq_handle = cpu_to_le64(cq->cq_handle);
req              1954 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
req              1956 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pg_size_lvl = cpu_to_le32(
req              1967 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req              1969 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.cq_fco_cnq_id = cpu_to_le32(
req              1973 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req              1999 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct cmdq_destroy_cq req;
req              2004 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
req              2006 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.cq_cid = cpu_to_le32(cq->id);
req              2007 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req                84 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
req                97 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	opcode = req->opcode;
req               120 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
req               133 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req->cookie = cpu_to_le16(cookie);
req               140 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	size = req->cmd_size;
req               144 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	bnxt_qplib_set_cmd_slots(req);
req               148 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	crsqe->resp->cookie = req->cookie;
req               149 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	crsqe->req_size = req->cmd_size;
req               150 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	if (req->resp_size && sb) {
req               153 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		req->resp_addr = cpu_to_le64(sbuf->dma_addr);
req               154 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
req               159 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	preq = (u8 *)req;
req               205 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 				 struct cmdq_base *req,
req               215 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		opcode = req->opcode;
req               216 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		rc = __send_message(rcfw, req, resp, sb, is_block);
req               217 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
req               444 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	struct cmdq_deinitialize_fw req;
req               449 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
req               450 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               479 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	struct cmdq_initialize_fw req;
req               484 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
req               488 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
req               501 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
req               504 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
req               507 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
req               510 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
req               513 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
req               516 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
req               519 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
req               522 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
req               525 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.qpc_page_dir =
req               527 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.mrw_page_dir =
req               529 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.srq_page_dir =
req               531 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.cq_page_dir =
req               533 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.tim_page_dir =
req               535 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.tqm_page_dir =
req               538 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
req               539 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
req               540 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req               541 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
req               543 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req               544 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req               545 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
req               546 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
req               547 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
req               550 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
req               551 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req                54 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h #define RCFW_CMD_PREP(req, CMD, cmd_flags)				\
req                56 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 		memset(&(req), 0, sizeof((req)));			\
req                57 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 		(req).opcode = CMDQ_BASE_OPCODE_##CMD;			\
req                58 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 		(req).cmd_size = sizeof((req));				\
req                59 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 		(req).flags = cpu_to_le16(cmd_flags);			\
req                97 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
req                99 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 	req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
req               292 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 				 struct cmdq_base *req, struct creq_base *resp,
req                60 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_query_version req;
req                65 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
req                67 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req                80 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_query_func req;
req                89 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
req                99 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
req               100 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               178 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_set_func_resources req;
req               183 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
req               185 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.number_of_qp = cpu_to_le32(ctx->qpc_count);
req               186 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
req               187 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.number_of_srq =  cpu_to_le32(ctx->srqc_count);
req               188 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.number_of_cq = cpu_to_le32(ctx->cq_count);
req               190 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req               191 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req               192 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
req               193 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
req               194 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
req               196 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               249 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		struct cmdq_delete_gid req;
req               254 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
req               260 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
req               261 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               319 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		struct cmdq_add_gid req;
req               324 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
req               326 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
req               327 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
req               328 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
req               329 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
req               337 drivers/infiniband/hw/bnxt_re/qplib_sp.c 				req.vlan = cpu_to_le16
req               339 drivers/infiniband/hw/bnxt_re/qplib_sp.c 			req.vlan |= cpu_to_le16
req               345 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
req               346 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req               347 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
req               349 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               380 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_modify_gid req;
req               384 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
req               386 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
req               387 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
req               388 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
req               389 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
req               391 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.vlan |= cpu_to_le16
req               397 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
req               398 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req               399 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
req               401 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.gid_index = cpu_to_le16(gid_idx);
req               403 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               500 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_create_ah req;
req               507 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
req               510 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dgid[0] = cpu_to_le32(temp32[0]);
req               511 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dgid[1] = cpu_to_le32(temp32[1]);
req               512 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dgid[2] = cpu_to_le32(temp32[2]);
req               513 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dgid[3] = cpu_to_le32(temp32[3]);
req               515 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.type = ah->nw_type;
req               516 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.hop_limit = ah->hop_limit;
req               517 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
req               518 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
req               521 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.pd_id = cpu_to_le32(ah->pd->id);
req               522 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.traffic_class = ah->traffic_class;
req               526 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dest_mac[0] = cpu_to_le16(temp16[0]);
req               527 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dest_mac[1] = cpu_to_le16(temp16[1]);
req               528 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.dest_mac[2] = cpu_to_le16(temp16[2]);
req               530 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               543 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_destroy_ah req;
req               548 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
req               550 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.ah_cid = cpu_to_le32(ah->id);
req               552 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
req               560 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_deallocate_key req;
req               570 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
req               572 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.mrw_flags = mrw->type;
req               577 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.key = cpu_to_le32(mrw->rkey);
req               579 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.key = cpu_to_le32(mrw->lkey);
req               581 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               596 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_allocate_mrw req;
req               602 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
req               604 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.pd_id = cpu_to_le32(mrw->pd->id);
req               605 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.mrw_flags = mrw->type;
req               610 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
req               612 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.mrw_handle = cpu_to_le64(tmp);
req               614 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               632 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_deregister_mr req;
req               637 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
req               639 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.lkey = cpu_to_le32(mrw->lkey);
req               640 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               659 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_register_mr req;
req               703 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
req               709 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.pbl = 0;
req               713 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
req               716 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
req               720 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
req               723 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.access = (mr->flags & 0xFFFF);
req               724 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.va = cpu_to_le64(mr->va);
req               725 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.key = cpu_to_le32(mr->lkey);
req               726 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.mr_size = cpu_to_le64(mr->total_size);
req               728 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
req               776 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_map_tc_to_cos req;
req               780 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
req               781 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.cos0 = cpu_to_le16(cids[0]);
req               782 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.cos1 = cpu_to_le16(cids[1]);
req               784 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               791 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	struct cmdq_query_roce_stats req;
req               798 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
req               808 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
req               809 drivers/infiniband/hw/bnxt_re/qplib_sp.c 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
req               171 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_tid_release *req;
req               173 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
req               176 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req               177 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               178 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
req               186 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_set_tcb_field *req;
req               187 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req               191 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req               192 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               193 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
req               194 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
req               195 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->reply = 0;
req               196 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->cpu_idx = 0;
req               197 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->word = htons(W_TCB_RX_QUIESCE);
req               198 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
req               199 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
req               207 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_set_tcb_field *req;
req               208 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req               212 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req               213 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               214 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
req               215 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
req               216 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->reply = 0;
req               217 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->cpu_idx = 0;
req               218 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->word = htons(W_TCB_RX_QUIESCE);
req               219 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
req               220 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->val = 0;
req               379 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_abort_req *req = cplhdr(skb);
req               382 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->cmd = CPL_ABORT_NO_RST;
req               388 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_close_con_req *req;
req               392 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), gfp);
req               399 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req               400 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
req               401 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
req               402 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
req               408 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_abort_req *req;
req               411 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(skb, sizeof(*req), gfp);
req               418 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put_zero(skb, sizeof(*req));
req               419 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
req               420 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
req               421 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
req               422 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->cmd = CPL_ABORT_SEND_RST;
req               428 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_act_open_req *req;
req               436 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req               456 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req               457 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               458 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
req               459 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->local_port = ep->com.local_addr.sin_port;
req               460 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->peer_port = ep->com.remote_addr.sin_port;
req               461 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->local_ip = ep->com.local_addr.sin_addr.s_addr;
req               462 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
req               463 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->opt0h = htonl(opt0h);
req               464 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->opt0l = htonl(opt0l);
req               465 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->params = 0;
req               466 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->opt2 = htonl(opt2);
req               473 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct tx_data_wr *req;
req               482 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
req               484 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
req               491 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reserve(skb, sizeof(*req));
req               514 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_push(skb, sizeof(*req));
req               515 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
req               516 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr_lo = htonl(V_WR_TID(ep->hwtid));
req               517 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->len = htonl(len);
req               518 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
req               520 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->flags = htonl(F_TX_INIT);
req               521 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->sndseq = htonl(ep->snd_seq);
req               533 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct tx_data_wr *req;
req               541 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
req               546 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reserve(skb, sizeof(*req));
req               565 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_push(skb, sizeof(*req));
req               566 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
req               567 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr_lo = htonl(V_WR_TID(ep->hwtid));
req               568 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->len = htonl(mpalen);
req               569 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
req               571 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->flags = htonl(F_TX_INIT);
req               572 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->sndseq = htonl(ep->snd_seq);
req               581 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct tx_data_wr *req;
req               590 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
req               596 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reserve(skb, sizeof(*req));
req               616 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_push(skb, sizeof(*req));
req               617 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
req               618 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr_lo = htonl(V_WR_TID(ep->hwtid));
req               619 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->len = htonl(len);
req               620 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
req               622 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->flags = htonl(F_TX_INIT);
req               623 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->sndseq = htonl(ep->snd_seq);
req               632 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_act_establish *req = cplhdr(skb);
req               633 drivers/infiniband/hw/cxgb3/iwch_cm.c 	unsigned int tid = GET_TID(req);
req               643 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->snd_seq = ntohl(req->snd_isn);
req               644 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->rcv_seq = ntohl(req->rcv_isn);
req               646 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_emss(ep, ntohs(req->tcp_opt));
req               791 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_rx_data_ack *req;
req               795 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req               801 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req               802 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               803 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
req               804 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
req              1197 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_pass_open_req *req;
req              1200 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req              1206 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req              1207 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req              1208 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
req              1209 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->local_port = ep->com.local_addr.sin_port;
req              1210 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->local_ip = ep->com.local_addr.sin_addr.s_addr;
req              1211 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->peer_port = 0;
req              1212 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->peer_ip = 0;
req              1213 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->peer_netmask = 0;
req              1214 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
req              1215 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
req              1216 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
req              1239 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_close_listserv_req *req;
req              1242 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req              1247 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
req              1248 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req              1249 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req->cpu_idx = 0;
req              1250 drivers/infiniband/hw/cxgb3/iwch_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
req              1337 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
req              1338 drivers/infiniband/hw/cxgb3/iwch_cm.c 	unsigned int hwtid = GET_TID(req);
req              1354 drivers/infiniband/hw/cxgb3/iwch_cm.c 	tim.mac_addr = req->dst_mac;
req              1355 drivers/infiniband/hw/cxgb3/iwch_cm.c 	tim.vlan_tag = ntohs(req->vlan_tag);
req              1357 drivers/infiniband/hw/cxgb3/iwch_cm.c 		pr_err("%s bad dst mac %pM\n", __func__, req->dst_mac);
req              1363 drivers/infiniband/hw/cxgb3/iwch_cm.c 			req->local_ip,
req              1364 drivers/infiniband/hw/cxgb3/iwch_cm.c 			req->peer_ip,
req              1365 drivers/infiniband/hw/cxgb3/iwch_cm.c 			req->local_port,
req              1366 drivers/infiniband/hw/cxgb3/iwch_cm.c 			req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
req              1372 drivers/infiniband/hw/cxgb3/iwch_cm.c 	l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
req              1389 drivers/infiniband/hw/cxgb3/iwch_cm.c 	child_ep->com.local_addr.sin_port = req->local_port;
req              1390 drivers/infiniband/hw/cxgb3/iwch_cm.c 	child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
req              1392 drivers/infiniband/hw/cxgb3/iwch_cm.c 	child_ep->com.remote_addr.sin_port = req->peer_port;
req              1393 drivers/infiniband/hw/cxgb3/iwch_cm.c 	child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
req              1396 drivers/infiniband/hw/cxgb3/iwch_cm.c 	child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
req              1402 drivers/infiniband/hw/cxgb3/iwch_cm.c 	accept_cr(child_ep, req->peer_ip, skb);
req              1405 drivers/infiniband/hw/cxgb3/iwch_cm.c 	reject_cr(tdev, hwtid, req->peer_ip, skb);
req              1413 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_pass_establish *req = cplhdr(skb);
req              1416 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->snd_seq = ntohl(req->snd_isn);
req              1417 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->rcv_seq = ntohl(req->rcv_isn);
req              1419 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_emss(ep, ntohs(req->tcp_opt));
req              1521 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
req              1530 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (is_neg_adv_abort(req->status)) {
req               575 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_req *req = cplhdr(skb);
req               578 drivers/infiniband/hw/cxgb4/cm.c 	req->cmd = CPL_ABORT_NO_RST;
req               664 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_get_tcb *req;
req               665 drivers/infiniband/hw/cxgb4/cm.c 	int wrlen = roundup(sizeof(*req), 16);
req               667 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req               672 drivers/infiniband/hw/cxgb4/cm.c 	req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
req               673 drivers/infiniband/hw/cxgb4/cm.c 	memset(req, 0, wrlen);
req               674 drivers/infiniband/hw/cxgb4/cm.c 	INIT_TP_WR(req, ep->hwtid);
req               675 drivers/infiniband/hw/cxgb4/cm.c 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
req               676 drivers/infiniband/hw/cxgb4/cm.c 	req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
req               715 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_act_open_req *req = NULL;
req               828 drivers/infiniband/hw/cxgb4/cm.c 			req = skb_put(skb, wrlen);
req               829 drivers/infiniband/hw/cxgb4/cm.c 			INIT_TP_WR(req, 0);
req               834 drivers/infiniband/hw/cxgb4/cm.c 			req = (struct cpl_act_open_req *)t5req;
req               839 drivers/infiniband/hw/cxgb4/cm.c 			req = (struct cpl_act_open_req *)t6req;
req               849 drivers/infiniband/hw/cxgb4/cm.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
req               851 drivers/infiniband/hw/cxgb4/cm.c 		req->local_port = la->sin_port;
req               852 drivers/infiniband/hw/cxgb4/cm.c 		req->peer_port = ra->sin_port;
req               853 drivers/infiniband/hw/cxgb4/cm.c 		req->local_ip = la->sin_addr.s_addr;
req               854 drivers/infiniband/hw/cxgb4/cm.c 		req->peer_ip = ra->sin_addr.s_addr;
req               855 drivers/infiniband/hw/cxgb4/cm.c 		req->opt0 = cpu_to_be64(opt0);
req               858 drivers/infiniband/hw/cxgb4/cm.c 			req->params = cpu_to_be32(params);
req               859 drivers/infiniband/hw/cxgb4/cm.c 			req->opt2 = cpu_to_be32(opt2);
req               944 drivers/infiniband/hw/cxgb4/cm.c 	struct fw_ofld_tx_data_wr *req;
req               954 drivers/infiniband/hw/cxgb4/cm.c 	wrlen = roundup(mpalen + sizeof(*req), 16);
req               962 drivers/infiniband/hw/cxgb4/cm.c 	req = skb_put_zero(skb, wrlen);
req               963 drivers/infiniband/hw/cxgb4/cm.c 	req->op_to_immdlen = cpu_to_be32(
req               967 drivers/infiniband/hw/cxgb4/cm.c 	req->flowid_len16 = cpu_to_be32(
req               970 drivers/infiniband/hw/cxgb4/cm.c 	req->plen = cpu_to_be32(mpalen);
req               971 drivers/infiniband/hw/cxgb4/cm.c 	req->tunnel_to_proxy = cpu_to_be32(
req               975 drivers/infiniband/hw/cxgb4/cm.c 	mpa = (struct mpa_message *)(req + 1);
req              1048 drivers/infiniband/hw/cxgb4/cm.c 	struct fw_ofld_tx_data_wr *req;
req              1059 drivers/infiniband/hw/cxgb4/cm.c 	wrlen = roundup(mpalen + sizeof(*req), 16);
req              1068 drivers/infiniband/hw/cxgb4/cm.c 	req = skb_put_zero(skb, wrlen);
req              1069 drivers/infiniband/hw/cxgb4/cm.c 	req->op_to_immdlen = cpu_to_be32(
req              1073 drivers/infiniband/hw/cxgb4/cm.c 	req->flowid_len16 = cpu_to_be32(
req              1076 drivers/infiniband/hw/cxgb4/cm.c 	req->plen = cpu_to_be32(mpalen);
req              1077 drivers/infiniband/hw/cxgb4/cm.c 	req->tunnel_to_proxy = cpu_to_be32(
req              1081 drivers/infiniband/hw/cxgb4/cm.c 	mpa = (struct mpa_message *)(req + 1);
req              1128 drivers/infiniband/hw/cxgb4/cm.c 	struct fw_ofld_tx_data_wr *req;
req              1139 drivers/infiniband/hw/cxgb4/cm.c 	wrlen = roundup(mpalen + sizeof(*req), 16);
req              1148 drivers/infiniband/hw/cxgb4/cm.c 	req = skb_put_zero(skb, wrlen);
req              1149 drivers/infiniband/hw/cxgb4/cm.c 	req->op_to_immdlen = cpu_to_be32(
req              1153 drivers/infiniband/hw/cxgb4/cm.c 	req->flowid_len16 = cpu_to_be32(
req              1156 drivers/infiniband/hw/cxgb4/cm.c 	req->plen = cpu_to_be32(mpalen);
req              1157 drivers/infiniband/hw/cxgb4/cm.c 	req->tunnel_to_proxy = cpu_to_be32(
req              1161 drivers/infiniband/hw/cxgb4/cm.c 	mpa = (struct mpa_message *)(req + 1);
req              1217 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_act_establish *req = cplhdr(skb);
req              1218 drivers/infiniband/hw/cxgb4/cm.c 	unsigned short tcp_opt = ntohs(req->tcp_opt);
req              1219 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int tid = GET_TID(req);
req              1220 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
req              1227 drivers/infiniband/hw/cxgb4/cm.c 		 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
req              1237 drivers/infiniband/hw/cxgb4/cm.c 	ep->snd_seq = be32_to_cpu(req->snd_isn);
req              1238 drivers/infiniband/hw/cxgb4/cm.c 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
req              1961 drivers/infiniband/hw/cxgb4/cm.c 	struct fw_ofld_connection_wr *req;
req              1967 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req              1968 drivers/infiniband/hw/cxgb4/cm.c 	req = __skb_put_zero(skb, sizeof(*req));
req              1969 drivers/infiniband/hw/cxgb4/cm.c 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req              1970 drivers/infiniband/hw/cxgb4/cm.c 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req              1971 drivers/infiniband/hw/cxgb4/cm.c 	req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
req              1975 drivers/infiniband/hw/cxgb4/cm.c 	req->le.lport = sin->sin_port;
req              1976 drivers/infiniband/hw/cxgb4/cm.c 	req->le.u.ipv4.lip = sin->sin_addr.s_addr;
req              1978 drivers/infiniband/hw/cxgb4/cm.c 	req->le.pport = sin->sin_port;
req              1979 drivers/infiniband/hw/cxgb4/cm.c 	req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req              1980 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.t_state_to_astid =
req              1983 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.cplrxdataack_cplpassacceptrpl =
req              1985 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.tx_max = (__force __be32) jiffies;
req              1986 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.rcv_adv = htons(1);
req              2000 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
req              2012 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.opt2 = (__force __be32) (PACE_V(1) |
req              2018 drivers/infiniband/hw/cxgb4/cm.c 		req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
req              2020 drivers/infiniband/hw/cxgb4/cm.c 		req->tcb.opt2 |= (__force __be32)SACK_EN_F;
req              2022 drivers/infiniband/hw/cxgb4/cm.c 		req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
req              2023 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
req              2024 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
req              2413 drivers/infiniband/hw/cxgb4/cm.c 		     struct cpl_pass_accept_req *req)
req              2426 drivers/infiniband/hw/cxgb4/cm.c 		      enable_tcp_timestamps && req->tcpopt.tstamp,
req              2451 drivers/infiniband/hw/cxgb4/cm.c 	if (enable_tcp_timestamps && req->tcpopt.tstamp)
req              2453 drivers/infiniband/hw/cxgb4/cm.c 	if (enable_tcp_sack && req->tcpopt.sack)
req              2459 drivers/infiniband/hw/cxgb4/cm.c 		u32 hlen = ntohl(req->hdr_len);
req              2462 drivers/infiniband/hw/cxgb4/cm.c 			tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
req              2465 drivers/infiniband/hw/cxgb4/cm.c 			tcph = (const void *)(req + 1) +
req              2516 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
req              2517 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
req              2519 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int hwtid = GET_TID(req);
req              2525 drivers/infiniband/hw/cxgb4/cm.c 	u16 peer_mss = ntohs(req->tcpopt.mss);
req              2545 drivers/infiniband/hw/cxgb4/cm.c 		tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
req              2547 drivers/infiniband/hw/cxgb4/cm.c 	cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
req              2593 drivers/infiniband/hw/cxgb4/cm.c 	       ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
req              2654 drivers/infiniband/hw/cxgb4/cm.c 	if (accept_cr(child_ep, skb, req)) {
req              2679 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_establish *req = cplhdr(skb);
req              2680 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int tid = GET_TID(req);
req              2682 drivers/infiniband/hw/cxgb4/cm.c 	u16 tcp_opt = ntohs(req->tcp_opt);
req              2686 drivers/infiniband/hw/cxgb4/cm.c 	ep->snd_seq = be32_to_cpu(req->snd_isn);
req              2687 drivers/infiniband/hw/cxgb4/cm.c 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
req              2814 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_req_rss6 *req = cplhdr(skb);
req              2820 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int tid = GET_TID(req);
req              2830 drivers/infiniband/hw/cxgb4/cm.c 	status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
req              2892 drivers/infiniband/hw/cxgb4/cm.c 					be32_to_cpu(req->srqidx_status));
req              3745 drivers/infiniband/hw/cxgb4/cm.c 			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
req              3748 drivers/infiniband/hw/cxgb4/cm.c 	int atid = be32_to_cpu(req->tid);
req              3751 drivers/infiniband/hw/cxgb4/cm.c 					   (__force u32) req->tid);
req              3755 drivers/infiniband/hw/cxgb4/cm.c 	switch (req->retval) {
req              3772 drivers/infiniband/hw/cxgb4/cm.c 		       __func__, req->retval);
req              3776 drivers/infiniband/hw/cxgb4/cm.c 	       req->retval, atid);
req              3780 drivers/infiniband/hw/cxgb4/cm.c 	connect_reply_upcall(ep, status2errno(req->retval));
req              3796 drivers/infiniband/hw/cxgb4/cm.c 			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
req              3802 drivers/infiniband/hw/cxgb4/cm.c 	rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
req              3803 drivers/infiniband/hw/cxgb4/cm.c 	if (req->retval) {
req              3804 drivers/infiniband/hw/cxgb4/cm.c 		pr_err("%s passive open failure %d\n", __func__, req->retval);
req              3813 drivers/infiniband/hw/cxgb4/cm.c 					(__force u32) req->tid)));
req              3898 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
req              3905 drivers/infiniband/hw/cxgb4/cm.c 		req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
req              3906 drivers/infiniband/hw/cxgb4/cm.c 		switch (req->t_state) {
req              3908 drivers/infiniband/hw/cxgb4/cm.c 			active_ofld_conn_reply(dev, skb, req);
req              3911 drivers/infiniband/hw/cxgb4/cm.c 			passive_ofld_conn_reply(dev, skb, req);
req              3915 drivers/infiniband/hw/cxgb4/cm.c 			       __func__, req->t_state);
req              3931 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_accept_req *req;
req              3944 drivers/infiniband/hw/cxgb4/cm.c 	__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
req              3954 drivers/infiniband/hw/cxgb4/cm.c 	req = __skb_push(skb, sizeof(*req));
req              3955 drivers/infiniband/hw/cxgb4/cm.c 	memset(req, 0, sizeof(*req));
req              3956 drivers/infiniband/hw/cxgb4/cm.c 	req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
req              3963 drivers/infiniband/hw/cxgb4/cm.c 	req->hdr_len =
req              3969 drivers/infiniband/hw/cxgb4/cm.c 		req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
req              3974 drivers/infiniband/hw/cxgb4/cm.c 		req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
req              3978 drivers/infiniband/hw/cxgb4/cm.c 	req->vlan = vlantag;
req              3979 drivers/infiniband/hw/cxgb4/cm.c 	req->len = len;
req              3980 drivers/infiniband/hw/cxgb4/cm.c 	req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
req              3982 drivers/infiniband/hw/cxgb4/cm.c 	req->tcpopt.mss = htons(tmp_opt.mss_clamp);
req              3984 drivers/infiniband/hw/cxgb4/cm.c 		req->tcpopt.wsf = tmp_opt.snd_wscale;
req              3985 drivers/infiniband/hw/cxgb4/cm.c 	req->tcpopt.tstamp = tmp_opt.saw_tstamp;
req              3987 drivers/infiniband/hw/cxgb4/cm.c 		req->tcpopt.sack = 1;
req              3988 drivers/infiniband/hw/cxgb4/cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
req              3999 drivers/infiniband/hw/cxgb4/cm.c 	struct fw_ofld_connection_wr *req;
req              4006 drivers/infiniband/hw/cxgb4/cm.c 	req = __skb_put_zero(req_skb, sizeof(*req));
req              4007 drivers/infiniband/hw/cxgb4/cm.c 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req              4008 drivers/infiniband/hw/cxgb4/cm.c 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req              4009 drivers/infiniband/hw/cxgb4/cm.c 	req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req              4010 drivers/infiniband/hw/cxgb4/cm.c 	req->le.filter = (__force __be32) filter;
req              4011 drivers/infiniband/hw/cxgb4/cm.c 	req->le.lport = lport;
req              4012 drivers/infiniband/hw/cxgb4/cm.c 	req->le.pport = rport;
req              4013 drivers/infiniband/hw/cxgb4/cm.c 	req->le.u.ipv4.lip = laddr;
req              4014 drivers/infiniband/hw/cxgb4/cm.c 	req->le.u.ipv4.pip = raddr;
req              4015 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.rcv_nxt = htonl(rcv_isn + 1);
req              4016 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.rcv_adv = htons(window);
req              4017 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.t_state_to_astid =
req              4027 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
req              4035 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
req              4036 drivers/infiniband/hw/cxgb4/cm.c 	req->cookie = (uintptr_t)skb;
req              4065 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_accept_req *req = (void *)(rss + 1);
req              4116 drivers/infiniband/hw/cxgb4/cm.c 		eh = (struct ethhdr *)(req + 1);
req              4119 drivers/infiniband/hw/cxgb4/cm.c 		vlan_eh = (struct vlan_ethhdr *)(req + 1);
req              4402 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
req              4404 drivers/infiniband/hw/cxgb4/cm.c 	unsigned int tid = GET_TID(req);
req              4413 drivers/infiniband/hw/cxgb4/cm.c 	if (cxgb_is_neg_adv(req->status)) {
req              4415 drivers/infiniband/hw/cxgb4/cm.c 			 ep->hwtid, req->status,
req              4416 drivers/infiniband/hw/cxgb4/cm.c 			 neg_adv_str(req->status));
req                66 drivers/infiniband/hw/cxgb4/mem.c 	struct ulp_mem_io *req;
req                75 drivers/infiniband/hw/cxgb4/mem.c 	wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
req                84 drivers/infiniband/hw/cxgb4/mem.c 	req = __skb_put_zero(skb, wr_len);
req                85 drivers/infiniband/hw/cxgb4/mem.c 	INIT_ULPTX_WR(req, wr_len, 0, 0);
req                86 drivers/infiniband/hw/cxgb4/mem.c 	req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
req                88 drivers/infiniband/hw/cxgb4/mem.c 	req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
req                89 drivers/infiniband/hw/cxgb4/mem.c 	req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req                90 drivers/infiniband/hw/cxgb4/mem.c 	req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
req                93 drivers/infiniband/hw/cxgb4/mem.c 	req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req                94 drivers/infiniband/hw/cxgb4/mem.c 	req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req                95 drivers/infiniband/hw/cxgb4/mem.c 	req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
req                97 drivers/infiniband/hw/cxgb4/mem.c 	sgl = (struct ulptx_sgl *)(req + 1);
req               114 drivers/infiniband/hw/cxgb4/mem.c 	struct ulp_mem_io *req;
req               133 drivers/infiniband/hw/cxgb4/mem.c 		wr_len = roundup(sizeof(*req) + sizeof(*sc) +
req               144 drivers/infiniband/hw/cxgb4/mem.c 		req = __skb_put_zero(skb, wr_len);
req               145 drivers/infiniband/hw/cxgb4/mem.c 		INIT_ULPTX_WR(req, wr_len, 0, 0);
req               148 drivers/infiniband/hw/cxgb4/mem.c 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
req               150 drivers/infiniband/hw/cxgb4/mem.c 			req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
req               152 drivers/infiniband/hw/cxgb4/mem.c 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req               153 drivers/infiniband/hw/cxgb4/mem.c 		req->wr.wr_mid = cpu_to_be32(
req               156 drivers/infiniband/hw/cxgb4/mem.c 		req->cmd = cmd;
req               157 drivers/infiniband/hw/cxgb4/mem.c 		req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
req               159 drivers/infiniband/hw/cxgb4/mem.c 		req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
req               161 drivers/infiniband/hw/cxgb4/mem.c 		req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
req               163 drivers/infiniband/hw/cxgb4/mem.c 		sc = (struct ulptx_idata *)(req + 1);
req              2728 drivers/infiniband/hw/hfi1/mad.c 	struct opa_port_status_req *req =
req              2732 drivers/infiniband/hw/hfi1/mad.c 	unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
req              2736 drivers/infiniband/hw/hfi1/mad.c 	u8 port_num = req->port_num;
req              2979 drivers/infiniband/hw/hfi1/mad.c 	struct opa_port_data_counters_msg *req =
req              2999 drivers/infiniband/hw/hfi1/mad.c 	num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
req              3000 drivers/infiniband/hw/hfi1/mad.c 	vl_select_mask = be32_to_cpu(req->vl_select_mask);
req              3001 drivers/infiniband/hw/hfi1/mad.c 	res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
req              3003 drivers/infiniband/hw/hfi1/mad.c 	res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
req              3012 drivers/infiniband/hw/hfi1/mad.c 	response_data_size = struct_size(req, port[0].vls, num_vls);
req              3023 drivers/infiniband/hw/hfi1/mad.c 	port_mask = be64_to_cpu(req->port_select_mask[3]);
req              3032 drivers/infiniband/hw/hfi1/mad.c 	rsp = &req->port[0];
req              3203 drivers/infiniband/hw/hfi1/mad.c 	struct opa_port_error_counters64_msg *req;
req              3216 drivers/infiniband/hw/hfi1/mad.c 	req = (struct opa_port_error_counters64_msg *)pmp->data;
req              3220 drivers/infiniband/hw/hfi1/mad.c 	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
req              3221 drivers/infiniband/hw/hfi1/mad.c 	num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
req              3228 drivers/infiniband/hw/hfi1/mad.c 	response_data_size = struct_size(req, port[0].vls, num_vls);
req              3238 drivers/infiniband/hw/hfi1/mad.c 	port_mask = be64_to_cpu(req->port_select_mask[3]);
req              3247 drivers/infiniband/hw/hfi1/mad.c 	rsp = &req->port[0];
req              3270 drivers/infiniband/hw/hfi1/mad.c 	vl_select_mask = be32_to_cpu(req->vl_select_mask);
req              3377 drivers/infiniband/hw/hfi1/mad.c 	struct opa_port_error_info_msg *req;
req              3385 drivers/infiniband/hw/hfi1/mad.c 	req = (struct opa_port_error_info_msg *)pmp->data;
req              3386 drivers/infiniband/hw/hfi1/mad.c 	rsp = &req->port[0];
req              3389 drivers/infiniband/hw/hfi1/mad.c 	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
req              3410 drivers/infiniband/hw/hfi1/mad.c 	port_mask = be64_to_cpu(req->port_select_mask[3]);
req              3474 drivers/infiniband/hw/hfi1/mad.c 	struct opa_clear_port_status *req =
req              3480 drivers/infiniband/hw/hfi1/mad.c 	u64 portn = be64_to_cpu(req->port_select_mask[3]);
req              3481 drivers/infiniband/hw/hfi1/mad.c 	u32 counter_select = be32_to_cpu(req->counter_select_mask);
req              3617 drivers/infiniband/hw/hfi1/mad.c 		*resp_len += sizeof(*req);
req              3627 drivers/infiniband/hw/hfi1/mad.c 	struct opa_port_error_info_msg *req;
req              3635 drivers/infiniband/hw/hfi1/mad.c 	req = (struct opa_port_error_info_msg *)pmp->data;
req              3636 drivers/infiniband/hw/hfi1/mad.c 	rsp = &req->port[0];
req              3639 drivers/infiniband/hw/hfi1/mad.c 	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
req              3652 drivers/infiniband/hw/hfi1/mad.c 	port_mask = be64_to_cpu(req->port_select_mask[3]);
req              3661 drivers/infiniband/hw/hfi1/mad.c 	error_info_select = be32_to_cpu(req->error_info_select_mask);
req              3694 drivers/infiniband/hw/hfi1/mad.c 		*resp_len += sizeof(*req);
req               124 drivers/infiniband/hw/hfi1/rc.c 	struct tid_rdma_request *req;
req               222 drivers/infiniband/hw/hfi1/rc.c 			req = ack_to_tid_req(e);
req               223 drivers/infiniband/hw/hfi1/rc.c 			if (req->state == TID_REQUEST_RESEND ||
req               224 drivers/infiniband/hw/hfi1/rc.c 			    req->state == TID_REQUEST_INIT_RESEND)
req               227 drivers/infiniband/hw/hfi1/rc.c 			qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
req               313 drivers/infiniband/hw/hfi1/rc.c 		req = ack_to_tid_req(e);
req               322 drivers/infiniband/hw/hfi1/rc.c 		    req->cur_seg == req->alloc_seg) {
req               338 drivers/infiniband/hw/hfi1/rc.c 						     e->lpsn, req);
req               339 drivers/infiniband/hw/hfi1/rc.c 		if (req->cur_seg != req->total_segs)
req               437 drivers/infiniband/hw/hfi1/rc.c 	struct tid_rdma_request *req = NULL;
req               743 drivers/infiniband/hw/hfi1/rc.c 			req = wqe_to_tid_req(wqe);
req               746 drivers/infiniband/hw/hfi1/rc.c 				priv->pending_tid_w_resp += req->total_segs;
req               750 drivers/infiniband/hw/hfi1/rc.c 				req->state = TID_REQUEST_RESEND;
req               751 drivers/infiniband/hw/hfi1/rc.c 				req->comp_seg = delta_psn(bth2, wqe->psn);
req               756 drivers/infiniband/hw/hfi1/rc.c 				req->setup_head = req->clear_tail;
req               765 drivers/infiniband/hw/hfi1/rc.c 							  req);
req               801 drivers/infiniband/hw/hfi1/rc.c 			req = wqe_to_tid_req(wqe);
req               805 drivers/infiniband/hw/hfi1/rc.c 							 req);
req               824 drivers/infiniband/hw/hfi1/rc.c 					&req->flows[req->setup_head];
req               838 drivers/infiniband/hw/hfi1/rc.c 					req->isge = 0;
req               839 drivers/infiniband/hw/hfi1/rc.c 					req->clear_tail = req->setup_head;
req               840 drivers/infiniband/hw/hfi1/rc.c 					req->flow_idx = req->setup_head;
req               841 drivers/infiniband/hw/hfi1/rc.c 					req->state = TID_REQUEST_ACTIVE;
req               845 drivers/infiniband/hw/hfi1/rc.c 				req->cur_seg = 0;
req               846 drivers/infiniband/hw/hfi1/rc.c 				req->comp_seg = 0;
req               847 drivers/infiniband/hw/hfi1/rc.c 				req->ack_pending = 0;
req               848 drivers/infiniband/hw/hfi1/rc.c 				req->flow_idx = req->clear_tail;
req               849 drivers/infiniband/hw/hfi1/rc.c 				req->state = TID_REQUEST_RESEND;
req               851 drivers/infiniband/hw/hfi1/rc.c 			req->s_next_psn = qp->s_psn;
req               853 drivers/infiniband/hw/hfi1/rc.c 			len = min_t(u32, req->seg_len,
req               854 drivers/infiniband/hw/hfi1/rc.c 				    wqe->length - req->seg_len * req->cur_seg);
req               867 drivers/infiniband/hw/hfi1/rc.c 			if (req->cur_seg >= req->total_segs &&
req               933 drivers/infiniband/hw/hfi1/rc.c 			qp->s_psn = req->s_next_psn;
req              1057 drivers/infiniband/hw/hfi1/rc.c 		req = wqe_to_tid_req(wqe);
req              1058 drivers/infiniband/hw/hfi1/rc.c 		req->state = TID_REQUEST_RESEND;
req              1061 drivers/infiniband/hw/hfi1/rc.c 		req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
req              1062 drivers/infiniband/hw/hfi1/rc.c 		len = wqe->length - (req->comp_seg * remote->max_len);
req              1076 drivers/infiniband/hw/hfi1/rc.c 						  wqe->psn, wqe->lpsn, req);
req              1083 drivers/infiniband/hw/hfi1/rc.c 		req = wqe_to_tid_req(wqe);
req              1090 drivers/infiniband/hw/hfi1/rc.c 		req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
req              1098 drivers/infiniband/hw/hfi1/rc.c 		req->state = TID_REQUEST_RESEND;
req              1100 drivers/infiniband/hw/hfi1/rc.c 		if (req->state != TID_REQUEST_ACTIVE) {
req              1105 drivers/infiniband/hw/hfi1/rc.c 			hfi1_kern_exp_rcv_clear_all(req);
req              1111 drivers/infiniband/hw/hfi1/rc.c 		req->state = TID_REQUEST_RESEND;
req              1112 drivers/infiniband/hw/hfi1/rc.c 		len = min_t(u32, req->seg_len,
req              1113 drivers/infiniband/hw/hfi1/rc.c 			    wqe->length - req->seg_len * req->cur_seg);
req              1114 drivers/infiniband/hw/hfi1/rc.c 		flow = &req->flows[req->flow_idx];
req              1116 drivers/infiniband/hw/hfi1/rc.c 		req->s_next_psn = flow->flow_state.ib_lpsn + 1;
req              1126 drivers/infiniband/hw/hfi1/rc.c 		if (req->cur_seg >= req->total_segs &&
req              1129 drivers/infiniband/hw/hfi1/rc.c 		qp->s_psn = req->s_next_psn;
req              1131 drivers/infiniband/hw/hfi1/rc.c 						 wqe->psn, wqe->lpsn, req);
req              1134 drivers/infiniband/hw/hfi1/rc.c 		req = wqe_to_tid_req(wqe);
req              1158 drivers/infiniband/hw/hfi1/rc.c 		len = min_t(u32, req->seg_len,
req              1159 drivers/infiniband/hw/hfi1/rc.c 			    wqe->length - req->seg_len * req->cur_seg);
req              1169 drivers/infiniband/hw/hfi1/rc.c 		if (req->cur_seg >= req->total_segs &&
req              1172 drivers/infiniband/hw/hfi1/rc.c 		qp->s_psn = req->s_next_psn;
req              1174 drivers/infiniband/hw/hfi1/rc.c 						 wqe->psn, wqe->lpsn, req);
req              1470 drivers/infiniband/hw/hfi1/rc.c 		struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              1477 drivers/infiniband/hw/hfi1/rc.c 			req->ack_pending = cur_seg - req->comp_seg;
req              1478 drivers/infiniband/hw/hfi1/rc.c 			priv->pending_tid_r_segs += req->ack_pending;
req              1479 drivers/infiniband/hw/hfi1/rc.c 			qp->s_num_rd_atomic += req->ack_pending;
req              1484 drivers/infiniband/hw/hfi1/rc.c 								req);
req              1486 drivers/infiniband/hw/hfi1/rc.c 			priv->pending_tid_r_segs += req->total_segs;
req              1487 drivers/infiniband/hw/hfi1/rc.c 			qp->s_num_rd_atomic += req->total_segs;
req              1639 drivers/infiniband/hw/hfi1/rc.c 					struct tid_rdma_request *req;
req              1641 drivers/infiniband/hw/hfi1/rc.c 					req = wqe_to_tid_req(wqe);
req              1642 drivers/infiniband/hw/hfi1/rc.c 					hfi1_kern_exp_rcv_clear_all(req);
req              1743 drivers/infiniband/hw/hfi1/rc.c 	struct tid_rdma_request *req;
req              1784 drivers/infiniband/hw/hfi1/rc.c 		req = wqe_to_tid_req(wqe);
req              1785 drivers/infiniband/hw/hfi1/rc.c 		if (head == tail && req->comp_seg < req->total_segs) {
req              1825 drivers/infiniband/hw/hfi1/rc.c 		req = wqe_to_tid_req(wqe);
req              1827 drivers/infiniband/hw/hfi1/rc.c 		    req->ack_seg < req->cur_seg)
req               111 drivers/infiniband/hw/hfi1/tid_rdma.c static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
req               112 drivers/infiniband/hw/hfi1/tid_rdma.c static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
req               115 drivers/infiniband/hw/hfi1/tid_rdma.c 				struct tid_rdma_request *req);
req               892 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
req               895 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
req               929 drivers/infiniband/hw/hfi1/tid_rdma.c 				trace_hfi1_tid_pageset(flow->req->qp, setcount,
req              1027 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
req              1030 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
req              1084 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = flow->req;
req              1086 drivers/infiniband/hw/hfi1/tid_rdma.c 	u32 length = flow->req->seg_len;
req              1090 drivers/infiniband/hw/hfi1/tid_rdma.c 	while (length && req->isge < ss->num_sge) {
req              1097 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (++req->isge < ss->num_sge)
req              1098 drivers/infiniband/hw/hfi1/tid_rdma.c 				*sge = ss->sg_list[req->isge - 1];
req              1110 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow->length = flow->req->seg_len - length;
req              1111 drivers/infiniband/hw/hfi1/tid_rdma.c 	*last = req->isge == ss->num_sge ? false : true;
req              1121 drivers/infiniband/hw/hfi1/tid_rdma.c 	dd = flow->req->rcd->dd;
req              1137 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = flow->req->rcd->dd;
req              1176 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
req              1185 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
req              1213 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
req              1232 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ctxtdata *rcd = flow->req->rcd;
req              1286 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
req              1296 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ctxtdata *rcd = flow->req->rcd;
req              1301 drivers/infiniband/hw/hfi1/tid_rdma.c 	u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
req              1340 drivers/infiniband/hw/hfi1/tid_rdma.c 			   flow->req->qp, flow->tidcnt - 1,
req              1363 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ctxtdata *rcd = flow->req->rcd;
req              1392 drivers/infiniband/hw/hfi1/tid_rdma.c 		struct hfi1_ctxtdata *rcd = flow->req->rcd;
req              1409 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
req              1454 drivers/infiniband/hw/hfi1/tid_rdma.c int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
req              1456 drivers/infiniband/hw/hfi1/tid_rdma.c 	__must_hold(&req->qp->s_lock)
req              1458 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[req->setup_head];
req              1459 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ctxtdata *rcd = req->rcd;
req              1460 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_qp_priv *qpriv = req->qp->priv;
req              1463 drivers/infiniband/hw/hfi1/tid_rdma.c 	u16 clear_tail = req->clear_tail;
req              1465 drivers/infiniband/hw/hfi1/tid_rdma.c 	lockdep_assert_held(&req->qp->s_lock);
req              1472 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
req              1473 drivers/infiniband/hw/hfi1/tid_rdma.c 	    CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
req              1474 drivers/infiniband/hw/hfi1/tid_rdma.c 	    req->n_flows)
req              1483 drivers/infiniband/hw/hfi1/tid_rdma.c 		hfi1_wait_kmem(flow->req->qp);
req              1488 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
req              1521 drivers/infiniband/hw/hfi1/tid_rdma.c 	dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
req              1527 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
req              1530 drivers/infiniband/hw/hfi1/tid_rdma.c 	queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
req              1546 drivers/infiniband/hw/hfi1/tid_rdma.c int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
req              1547 drivers/infiniband/hw/hfi1/tid_rdma.c 	__must_hold(&req->qp->s_lock)
req              1549 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
req              1550 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ctxtdata *rcd = req->rcd;
req              1555 drivers/infiniband/hw/hfi1/tid_rdma.c 	lockdep_assert_held(&req->qp->s_lock);
req              1557 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
req              1573 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
req              1575 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (fqp == req->qp) {
req              1589 drivers/infiniband/hw/hfi1/tid_rdma.c void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
req              1590 drivers/infiniband/hw/hfi1/tid_rdma.c 	__must_hold(&req->qp->s_lock)
req              1593 drivers/infiniband/hw/hfi1/tid_rdma.c 	while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
req              1594 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (hfi1_kern_exp_rcv_clear(req))
req              1603 drivers/infiniband/hw/hfi1/tid_rdma.c static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
req              1605 drivers/infiniband/hw/hfi1/tid_rdma.c 	kfree(req->flows);
req              1606 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->flows = NULL;
req              1624 drivers/infiniband/hw/hfi1/tid_rdma.c static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
req              1630 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (likely(req->flows))
req              1633 drivers/infiniband/hw/hfi1/tid_rdma.c 			     req->rcd->numa_id);
req              1638 drivers/infiniband/hw/hfi1/tid_rdma.c 		flows[i].req = req;
req              1643 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->flows = flows;
req              1648 drivers/infiniband/hw/hfi1/tid_rdma.c 				struct tid_rdma_request *req)
req              1662 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->qp = qp;
req              1663 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->rcd = qpriv->rcd;
req              1674 drivers/infiniband/hw/hfi1/tid_rdma.c static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
req              1680 drivers/infiniband/hw/hfi1/tid_rdma.c 	head = req->setup_head;
req              1681 drivers/infiniband/hw/hfi1/tid_rdma.c 	tail = req->clear_tail;
req              1684 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow = &req->flows[tail];
req              1700 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              1701 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
req              1702 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_qp *qp = req->qp;
req              1712 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
req              1742 drivers/infiniband/hw/hfi1/tid_rdma.c 			   req->cur_seg * req->seg_len + flow->sent);
req              1763 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->cur_seg++;
req              1765 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ack_pending++;
req              1766 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
req              1786 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              1794 drivers/infiniband/hw/hfi1/tid_rdma.c 					  wqe->lpsn, req);
req              1800 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (req->state == TID_REQUEST_SYNC) {
req              1804 drivers/infiniband/hw/hfi1/tid_rdma.c 		hfi1_kern_clear_hw_flow(req->rcd, qp);
req              1806 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->state = TID_REQUEST_ACTIVE;
req              1814 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (req->flow_idx == req->setup_head) {
req              1816 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->state == TID_REQUEST_RESEND) {
req              1822 drivers/infiniband/hw/hfi1/tid_rdma.c 			restart_sge(&qp->s_sge, wqe, req->s_next_psn,
req              1824 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->isge = 0;
req              1825 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_ACTIVE;
req              1833 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_SYNC;
req              1845 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
req              1846 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_QUEUED;
req              1857 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->flow_idx];
req              1863 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow->flow_state.ib_spsn = req->s_next_psn;
req              1869 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->s_next_psn += flow->npkts;
req              1889 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              1893 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
req              1896 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->setup_head];
req              1929 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->clear_tail = req->setup_head;
req              1947 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
req              1949 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->flow_idx = req->setup_head;
req              1952 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
req              1962 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->n_flows = qpriv->tid_rdma.local.max_read;
req              1963 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->state = TID_REQUEST_ACTIVE;
req              1964 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->cur_seg = 0;
req              1965 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->comp_seg = 0;
req              1966 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ack_seg = 0;
req              1967 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->isge = 0;
req              1968 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->seg_len = qpriv->tid_rdma.local.max_len;
req              1969 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->total_len = len;
req              1970 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->total_segs = 1;
req              1971 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->r_flow_psn = e->psn;
req              1974 drivers/infiniband/hw/hfi1/tid_rdma.c 					req);
req              1987 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              2013 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
req              2014 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->r_flow_psn = psn;
req              2015 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
req              2030 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (psn != e->psn || len != req->total_len)
req              2070 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->state == TID_REQUEST_RESEND) {
req              2071 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_RESEND_ACTIVE;
req              2072 drivers/infiniband/hw/hfi1/tid_rdma.c 		} else if (req->state == TID_REQUEST_INIT_RESEND) {
req              2073 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_INIT;
req              2084 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (old_req || req->state == TID_REQUEST_INIT ||
req              2085 drivers/infiniband/hw/hfi1/tid_rdma.c 		    (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
req              2092 drivers/infiniband/hw/hfi1/tid_rdma.c 				req = ack_to_tid_req(e);
req              2094 drivers/infiniband/hw/hfi1/tid_rdma.c 				    req->state == TID_REQUEST_INIT)
req              2095 drivers/infiniband/hw/hfi1/tid_rdma.c 					req->state = TID_REQUEST_INIT_RESEND;
req              2111 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->clear_tail == req->setup_head)
req              2119 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
req              2120 drivers/infiniband/hw/hfi1/tid_rdma.c 			fstate = &req->flows[req->clear_tail].flow_state;
req              2122 drivers/infiniband/hw/hfi1/tid_rdma.c 				CIRC_CNT(req->flow_idx, req->clear_tail,
req              2124 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->flow_idx =
req              2125 drivers/infiniband/hw/hfi1/tid_rdma.c 				CIRC_ADD(req->clear_tail,
req              2137 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (CIRC_CNT(req->setup_head, req->flow_idx,
req              2139 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->cur_seg = delta_psn(psn, e->psn);
req              2140 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->state = TID_REQUEST_RESEND_ACTIVE;
req              2154 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = ack_to_tid_req(e);
req              2156 drivers/infiniband/hw/hfi1/tid_rdma.c 						   e->lpsn, req);
req              2158 drivers/infiniband/hw/hfi1/tid_rdma.c 			    req->cur_seg == req->comp_seg ||
req              2159 drivers/infiniband/hw/hfi1/tid_rdma.c 			    req->state == TID_REQUEST_INIT ||
req              2160 drivers/infiniband/hw/hfi1/tid_rdma.c 			    req->state == TID_REQUEST_INIT_RESEND) {
req              2161 drivers/infiniband/hw/hfi1/tid_rdma.c 				if (req->state == TID_REQUEST_INIT)
req              2162 drivers/infiniband/hw/hfi1/tid_rdma.c 					req->state = TID_REQUEST_INIT_RESEND;
req              2166 drivers/infiniband/hw/hfi1/tid_rdma.c 				CIRC_CNT(req->flow_idx,
req              2167 drivers/infiniband/hw/hfi1/tid_rdma.c 					 req->clear_tail,
req              2169 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->flow_idx = req->clear_tail;
req              2170 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_RESEND;
req              2171 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->cur_seg = req->comp_seg;
req              2342 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = &epriv->tid_req;
req              2344 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
req              2359 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
req              2391 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->clear_tail = (req->clear_tail + 1) &
req              2412 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = NULL;
req              2423 drivers/infiniband/hw/hfi1/tid_rdma.c 				req = wqe_to_tid_req(wqe);
req              2430 drivers/infiniband/hw/hfi1/tid_rdma.c 	return req;
req              2448 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              2463 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
req              2464 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (unlikely(!req))
req              2467 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->clear_tail];
req              2494 drivers/infiniband/hw/hfi1/tid_rdma.c 			len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
req              2506 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ack_pending--;
req              2521 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
req              2522 drivers/infiniband/hw/hfi1/tid_rdma.c 					 req->e.swqe->psn, req->e.swqe->lpsn,
req              2523 drivers/infiniband/hw/hfi1/tid_rdma.c 					 req);
req              2524 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
req              2527 drivers/infiniband/hw/hfi1/tid_rdma.c 	hfi1_kern_exp_rcv_clear(req);
req              2533 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (++req->comp_seg >= req->total_segs) {
req              2535 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->state = TID_REQUEST_COMPLETE;
req              2543 drivers/infiniband/hw/hfi1/tid_rdma.c 	if ((req->state == TID_REQUEST_SYNC &&
req              2544 drivers/infiniband/hw/hfi1/tid_rdma.c 	     req->comp_seg == req->cur_seg) ||
req              2548 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->state == TID_REQUEST_SYNC)
req              2549 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_ACTIVE;
req              2576 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              2584 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(wqe);
req              2585 drivers/infiniband/hw/hfi1/tid_rdma.c 			hfi1_kern_exp_rcv_clear_all(req);
req              2625 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              2630 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = wqe_to_tid_req(wqe);
req              2631 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->clear_tail];
req              2657 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              2733 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = wqe_to_tid_req(wqe);
req              2735 drivers/infiniband/hw/hfi1/tid_rdma.c 					     wqe->lpsn, req);
req              2749 drivers/infiniband/hw/hfi1/tid_rdma.c 			flow = &req->flows[req->clear_tail];
req              2751 drivers/infiniband/hw/hfi1/tid_rdma.c 							      req->clear_tail,
req              2857 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              2934 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
req              2935 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (req->comp_seg == req->cur_seg)
req              2937 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->clear_tail];
req              2942 drivers/infiniband/hw/hfi1/tid_rdma.c 					       e->lpsn, req);
req              2943 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
req              3040 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              3049 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow = find_flow_ib(req, *bth2, &fidx);
req              3056 drivers/infiniband/hw/hfi1/tid_rdma.c 						       req);
req              3060 drivers/infiniband/hw/hfi1/tid_rdma.c 		fidx = req->acked_tail;
req              3061 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow = &req->flows[fidx];
req              3062 drivers/infiniband/hw/hfi1/tid_rdma.c 		*bth2 = mask_psn(req->r_ack_psn);
req              3098 drivers/infiniband/hw/hfi1/tid_rdma.c 		rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
req              3118 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->flow_idx = fidx;
req              3120 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->clear_tail = fidx;
req              3124 drivers/infiniband/hw/hfi1/tid_rdma.c 				       wqe->lpsn, req);
req              3125 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->state = TID_REQUEST_ACTIVE;
req              3131 drivers/infiniband/hw/hfi1/tid_rdma.c 			for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
req              3133 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->flows[fidx].sent = 0;
req              3134 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->flows[fidx].pkt = 0;
req              3135 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->flows[fidx].tid_idx = 0;
req              3136 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->flows[fidx].tid_offset = 0;
req              3137 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->flows[fidx].resync_npkts = 0;
req              3145 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(wqe);
req              3146 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->cur_seg = req->ack_seg;
req              3147 drivers/infiniband/hw/hfi1/tid_rdma.c 			fidx = req->acked_tail;
req              3149 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->clear_tail = fidx;
req              3206 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              3220 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(prev);
req              3221 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (req->ack_seg != req->total_segs)
req              3238 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(prev);
req              3239 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (req->ack_seg != req->total_segs)
req              3358 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              3367 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->n_flows = remote->max_write;
req              3368 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->state = TID_REQUEST_ACTIVE;
req              3453 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              3496 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
req              3498 drivers/infiniband/hw/hfi1/tid_rdma.c 						   e->lpsn, req);
req              3500 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->alloc_seg >= req->total_segs)
req              3529 drivers/infiniband/hw/hfi1/tid_rdma.c 		npkts = rvt_div_round_up_mtu(qp, req->seg_len);
req              3547 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (!CIRC_SPACE(req->setup_head, req->acked_tail,
req              3556 drivers/infiniband/hw/hfi1/tid_rdma.c 		ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
req              3563 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->alloc_seg++;
req              3589 drivers/infiniband/hw/hfi1/tid_rdma.c 	qp->r_psn = e->psn + req->alloc_seg;
req              3654 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              3707 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
req              3715 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->state = TID_REQUEST_INIT;
req              3729 drivers/infiniband/hw/hfi1/tid_rdma.c 	    (req->setup_head != req->clear_tail ||
req              3730 drivers/infiniband/hw/hfi1/tid_rdma.c 	     req->clear_tail != req->acked_tail))
req              3744 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
req              3745 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->state = TID_REQUEST_INIT;
req              3746 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->cur_seg = 0;
req              3747 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->comp_seg = 0;
req              3748 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ack_seg = 0;
req              3749 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->alloc_seg = 0;
req              3750 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->isge = 0;
req              3751 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->seg_len = qpriv->tid_rdma.local.max_len;
req              3752 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->total_len = len;
req              3753 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->total_segs = num_segs;
req              3754 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->r_flow_psn = e->psn;
req              3755 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ss.sge = e->rdma_sge;
req              3756 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ss.num_sge = 1;
req              3758 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->flow_idx = req->setup_head;
req              3759 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->clear_tail = req->setup_head;
req              3760 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->acked_tail = req->setup_head;
req              3773 drivers/infiniband/hw/hfi1/tid_rdma.c 					 req);
req              3828 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = &epriv->tid_req;
req              3836 drivers/infiniband/hw/hfi1/tid_rdma.c 					    req);
req              3839 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->flow_idx];
req              3840 drivers/infiniband/hw/hfi1/tid_rdma.c 	switch (req->state) {
req              3849 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->cur_seg >= req->alloc_seg)
req              3859 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->state = TID_REQUEST_ACTIVE;
req              3860 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
req              3861 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
req              3867 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
req              3868 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
req              3869 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
req              3870 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_ACTIVE;
req              3878 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->cur_seg++;
req              3992 drivers/infiniband/hw/hfi1/tid_rdma.c 			struct tid_rdma_request *req =
req              3995 drivers/infiniband/hw/hfi1/tid_rdma.c 			hfi1_kern_exp_rcv_clear_all(req);
req              4032 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              4072 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = wqe_to_tid_req(wqe);
req              4078 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
req              4093 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->setup_head];
req              4106 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow->length = min_t(u32, req->seg_len,
req              4107 drivers/infiniband/hw/hfi1/tid_rdma.c 			     (wqe->length - (req->comp_seg * req->seg_len)));
req              4120 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
req              4122 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->comp_seg++;
req              4143 drivers/infiniband/hw/hfi1/tid_rdma.c 					  wqe->lpsn, req);
req              4149 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->r_last_acked = mask_psn(wqe->psn - 1);
req              4151 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->acked_tail = req->setup_head;
req              4155 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
req              4156 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->state = TID_REQUEST_ACTIVE;
req              4166 drivers/infiniband/hw/hfi1/tid_rdma.c 	    req->comp_seg == req->total_segs) {
req              4196 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              4197 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
req              4199 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_qp *qp = req->qp;
req              4218 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
req              4241 drivers/infiniband/hw/hfi1/tid_rdma.c 		    rvt_div_round_up_mtu(qp, req->seg_len) >
req              4243 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_SYNC;
req              4263 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              4281 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
req              4282 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->clear_tail];
req              4309 drivers/infiniband/hw/hfi1/tid_rdma.c 			len = req->comp_seg * req->seg_len;
req              4313 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (unlikely(req->total_len - len < pmtu))
req              4323 drivers/infiniband/hw/hfi1/tid_rdma.c 			ss.total_len = req->total_len;
req              4334 drivers/infiniband/hw/hfi1/tid_rdma.c 	hfi1_kern_exp_rcv_clear(req);
req              4337 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->comp_seg++;
req              4349 drivers/infiniband/hw/hfi1/tid_rdma.c 					  req);
req              4375 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (req->cur_seg < req->total_segs ||
req              4384 drivers/infiniband/hw/hfi1/tid_rdma.c 			hfi1_mod_tid_reap_timer(req->qp);
req              4386 drivers/infiniband/hw/hfi1/tid_rdma.c 			hfi1_stop_tid_reap_timer(req->qp);
req              4419 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = ack_to_tid_req(e);
req              4420 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[iflow];
req              4490 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              4529 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = wqe_to_tid_req(wqe);
req              4531 drivers/infiniband/hw/hfi1/tid_rdma.c 				       wqe->lpsn, req);
req              4532 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow = &req->flows[req->acked_tail];
req              4533 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
req              4542 drivers/infiniband/hw/hfi1/tid_rdma.c 	       req->ack_seg < req->cur_seg) {
req              4543 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->ack_seg++;
req              4545 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
req              4546 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->r_last_acked = flow->flow_state.resp_ib_psn;
req              4548 drivers/infiniband/hw/hfi1/tid_rdma.c 					       wqe->lpsn, req);
req              4549 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->ack_seg == req->total_segs) {
req              4550 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->state = TID_REQUEST_COMPLETE;
req              4560 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(wqe);
req              4562 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow = &req->flows[req->acked_tail];
req              4563 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
req              4567 drivers/infiniband/hw/hfi1/tid_rdma.c 				       wqe->lpsn, req);
req              4575 drivers/infiniband/hw/hfi1/tid_rdma.c 			    req->ack_seg < req->cur_seg)
req              4598 drivers/infiniband/hw/hfi1/tid_rdma.c 			     req->ack_seg == req->total_segs) ||
req              4604 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (req->ack_seg == req->comp_seg) {
req              4623 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(wqe);
req              4624 drivers/infiniband/hw/hfi1/tid_rdma.c 			flow = &req->flows[req->acked_tail];
req              4635 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->r_ack_psn = psn;
req              4652 drivers/infiniband/hw/hfi1/tid_rdma.c 			rptr = req;
req              4694 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->cur_seg = req->ack_seg;
req              4708 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (!req->flows)
req              4710 drivers/infiniband/hw/hfi1/tid_rdma.c 			flow = &req->flows[req->acked_tail];
req              4714 drivers/infiniband/hw/hfi1/tid_rdma.c 			trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
req              4716 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
req              4717 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->cur_seg = req->ack_seg;
req              4791 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              4809 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(wqe);
req              4811 drivers/infiniband/hw/hfi1/tid_rdma.c 			   qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
req              4836 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
req              4837 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_flow *flow = &req->flows[fidx];
req              4864 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              4922 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = ack_to_tid_req(e);
req              4924 drivers/infiniband/hw/hfi1/tid_rdma.c 						      e->lpsn, req);
req              4927 drivers/infiniband/hw/hfi1/tid_rdma.c 			for (flow_idx = req->clear_tail;
req              4928 drivers/infiniband/hw/hfi1/tid_rdma.c 			     CIRC_CNT(req->setup_head, flow_idx,
req              4934 drivers/infiniband/hw/hfi1/tid_rdma.c 				flow = &req->flows[flow_idx];
req              5004 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = ack_to_tid_req(e);
req              5018 drivers/infiniband/hw/hfi1/tid_rdma.c 	    (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
req              5056 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = wqe_to_tid_req(wqe);
req              5058 drivers/infiniband/hw/hfi1/tid_rdma.c 					wqe->lpsn, req);
req              5089 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = wqe_to_tid_req(wqe);
req              5092 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (!req->comp_seg || req->cur_seg == req->comp_seg)
req              5096 drivers/infiniband/hw/hfi1/tid_rdma.c 						wqe->psn, wqe->lpsn, req);
req              5102 drivers/infiniband/hw/hfi1/tid_rdma.c 			req->clear_tail = CIRC_NEXT(req->clear_tail,
req              5104 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (++req->cur_seg < req->total_segs) {
req              5105 drivers/infiniband/hw/hfi1/tid_rdma.c 				if (!CIRC_CNT(req->setup_head, req->clear_tail,
req              5124 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = wqe_to_tid_req(wqe);
req              5126 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (!req->comp_seg) {
req              5130 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = wqe_to_tid_req(wqe);
req              5134 drivers/infiniband/hw/hfi1/tid_rdma.c 						     CIRC_PREV(req->setup_head,
req              5185 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req, *nreq;
req              5196 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
req              5210 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (!req->ack_seg || req->ack_seg == req->total_segs)
req              5215 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
req              5220 drivers/infiniband/hw/hfi1/tid_rdma.c 					req);
req              5226 drivers/infiniband/hw/hfi1/tid_rdma.c 	    req->ack_seg == req->comp_seg)
req              5236 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->ack_seg +=
req              5238 drivers/infiniband/hw/hfi1/tid_rdma.c 			CIRC_CNT(req->clear_tail, req->acked_tail,
req              5241 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->acked_tail = req->clear_tail;
req              5248 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
req              5249 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->ack_seg != req->total_segs)
req              5251 drivers/infiniband/hw/hfi1/tid_rdma.c 		req->state = TID_REQUEST_COMPLETE;
req              5265 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
req              5276 drivers/infiniband/hw/hfi1/tid_rdma.c 		      full_flow_psn(&req->flows[flow],
req              5277 drivers/infiniband/hw/hfi1/tid_rdma.c 				    req->flows[flow].flow_state.lpsn)) > 0))) {
req              5286 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
req              5287 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow = req->acked_tail;
req              5288 drivers/infiniband/hw/hfi1/tid_rdma.c 	} else if (req->ack_seg == req->total_segs &&
req              5294 drivers/infiniband/hw/hfi1/tid_rdma.c 					req);
req              5452 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req;
req              5464 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(prev);
req              5465 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (req->ack_seg != req->total_segs) {
req               179 drivers/infiniband/hw/hfi1/tid_rdma.h 	struct tid_rdma_request *req;
req               210 drivers/infiniband/hw/hfi1/tid_rdma.h int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
req               212 drivers/infiniband/hw/hfi1/tid_rdma.h int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
req               213 drivers/infiniband/hw/hfi1/tid_rdma.h void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
req              1045 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1046 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
req              1076 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->cur_seg = req->cur_seg;
req              1077 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->comp_seg = req->comp_seg;
req              1078 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->ack_seg = req->ack_seg;
req              1079 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->alloc_seg = req->alloc_seg;
req              1080 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->total_segs = req->total_segs;
req              1081 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->setup_head = req->setup_head;
req              1082 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->clear_tail = req->clear_tail;
req              1083 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->flow_idx = req->flow_idx;
req              1084 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->acked_tail = req->acked_tail;
req              1085 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->state = req->state;
req              1086 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->r_ack_psn = req->r_ack_psn;
req              1087 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->r_flow_psn = req->r_flow_psn;
req              1088 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->r_last_acked = req->r_last_acked;
req              1089 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->s_next_psn = req->s_next_psn;
req              1119 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1120 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1126 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1127 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1133 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1134 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1140 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1141 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1147 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1148 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1154 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1155 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1161 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1162 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1168 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1169 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1175 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1176 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1182 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1183 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1189 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1190 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1196 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1197 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1203 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1204 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1210 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1211 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1217 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1218 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1224 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1225 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1231 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1232 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1238 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1239 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1245 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1246 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1252 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1253 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1259 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1260 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req              1266 drivers/infiniband/hw/hfi1/trace_tid.h 		 struct tid_rdma_request *req),
req              1267 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
req               506 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
req               508 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
req               513 drivers/infiniband/hw/hfi1/trace_tx.h 		    __field(u16, req)
req               541 drivers/infiniband/hw/hfi1/trace_tx.h 		    __entry->req = req;
req               564 drivers/infiniband/hw/hfi1/trace_tx.h 		      __entry->req,
req               660 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
req               662 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
req               667 drivers/infiniband/hw/hfi1/trace_tx.h 	    __field(u16, req)
req               678 drivers/infiniband/hw/hfi1/trace_tx.h 	    __entry->req = req;
req               689 drivers/infiniband/hw/hfi1/trace_tx.h 		      __entry->req,
req                79 drivers/infiniband/hw/hfi1/user_sdma.c static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
req                82 drivers/infiniband/hw/hfi1/user_sdma.c static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
req                83 drivers/infiniband/hw/hfi1/user_sdma.c static int pin_vector_pages(struct user_sdma_request *req,
req                87 drivers/infiniband/hw/hfi1/user_sdma.c static int check_header_template(struct user_sdma_request *req,
req                90 drivers/infiniband/hw/hfi1/user_sdma.c static int set_txreq_header(struct user_sdma_request *req,
req                92 drivers/infiniband/hw/hfi1/user_sdma.c static int set_txreq_header_ahg(struct user_sdma_request *req,
req               355 drivers/infiniband/hw/hfi1/user_sdma.c 	struct user_sdma_request *req;
req               362 drivers/infiniband/hw/hfi1/user_sdma.c 	if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
req               367 drivers/infiniband/hw/hfi1/user_sdma.c 		   iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
req               417 drivers/infiniband/hw/hfi1/user_sdma.c 	req = pq->reqs + info.comp_idx;
req               418 drivers/infiniband/hw/hfi1/user_sdma.c 	req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
req               419 drivers/infiniband/hw/hfi1/user_sdma.c 	req->data_len  = 0;
req               420 drivers/infiniband/hw/hfi1/user_sdma.c 	req->pq = pq;
req               421 drivers/infiniband/hw/hfi1/user_sdma.c 	req->cq = cq;
req               422 drivers/infiniband/hw/hfi1/user_sdma.c 	req->ahg_idx = -1;
req               423 drivers/infiniband/hw/hfi1/user_sdma.c 	req->iov_idx = 0;
req               424 drivers/infiniband/hw/hfi1/user_sdma.c 	req->sent = 0;
req               425 drivers/infiniband/hw/hfi1/user_sdma.c 	req->seqnum = 0;
req               426 drivers/infiniband/hw/hfi1/user_sdma.c 	req->seqcomp = 0;
req               427 drivers/infiniband/hw/hfi1/user_sdma.c 	req->seqsubmitted = 0;
req               428 drivers/infiniband/hw/hfi1/user_sdma.c 	req->tids = NULL;
req               429 drivers/infiniband/hw/hfi1/user_sdma.c 	req->has_error = 0;
req               430 drivers/infiniband/hw/hfi1/user_sdma.c 	INIT_LIST_HEAD(&req->txps);
req               432 drivers/infiniband/hw/hfi1/user_sdma.c 	memcpy(&req->info, &info, sizeof(info));
req               439 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->data_iovs < 2) {
req               440 drivers/infiniband/hw/hfi1/user_sdma.c 			SDMA_DBG(req,
req               445 drivers/infiniband/hw/hfi1/user_sdma.c 		req->data_iovs--;
req               448 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
req               449 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
req               455 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
req               456 drivers/infiniband/hw/hfi1/user_sdma.c 			     sizeof(req->hdr));
req               458 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "Failed to copy header template (%d)", ret);
req               465 drivers/infiniband/hw/hfi1/user_sdma.c 		req->hdr.pbc[2] = 0;
req               468 drivers/infiniband/hw/hfi1/user_sdma.c 	opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
req               471 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "Invalid opcode (%d)", opcode);
req               480 drivers/infiniband/hw/hfi1/user_sdma.c 	vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
req               481 drivers/infiniband/hw/hfi1/user_sdma.c 	sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
req               482 drivers/infiniband/hw/hfi1/user_sdma.c 	      (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
req               485 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
req               491 drivers/infiniband/hw/hfi1/user_sdma.c 	pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
req               492 drivers/infiniband/hw/hfi1/user_sdma.c 	slid = be16_to_cpu(req->hdr.lrh[3]);
req               503 drivers/infiniband/hw/hfi1/user_sdma.c 	if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
req               504 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "User tried to pass in a GRH");
req               509 drivers/infiniband/hw/hfi1/user_sdma.c 	req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
req               514 drivers/infiniband/hw/hfi1/user_sdma.c 	req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
req               515 drivers/infiniband/hw/hfi1/user_sdma.c 		(KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
req               518 drivers/infiniband/hw/hfi1/user_sdma.c 					       info.comp_idx, req->tidoffset);
req               522 drivers/infiniband/hw/hfi1/user_sdma.c 	for (i = 0; i < req->data_iovs; i++) {
req               523 drivers/infiniband/hw/hfi1/user_sdma.c 		req->iovs[i].offset = 0;
req               524 drivers/infiniband/hw/hfi1/user_sdma.c 		INIT_LIST_HEAD(&req->iovs[i].list);
req               525 drivers/infiniband/hw/hfi1/user_sdma.c 		memcpy(&req->iovs[i].iov,
req               527 drivers/infiniband/hw/hfi1/user_sdma.c 		       sizeof(req->iovs[i].iov));
req               528 drivers/infiniband/hw/hfi1/user_sdma.c 		ret = pin_vector_pages(req, &req->iovs[i]);
req               530 drivers/infiniband/hw/hfi1/user_sdma.c 			req->data_iovs = i;
req               533 drivers/infiniband/hw/hfi1/user_sdma.c 		req->data_len += req->iovs[i].iov.iov_len;
req               536 drivers/infiniband/hw/hfi1/user_sdma.c 					 info.comp_idx, req->data_len);
req               537 drivers/infiniband/hw/hfi1/user_sdma.c 	if (pcount > req->info.npkts)
req               538 drivers/infiniband/hw/hfi1/user_sdma.c 		pcount = req->info.npkts;
req               547 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req_opcode(req->info.ctrl) == EXPECTED) {
req               548 drivers/infiniband/hw/hfi1/user_sdma.c 		u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
req               563 drivers/infiniband/hw/hfi1/user_sdma.c 				  ntids * sizeof(*req->tids));
req               566 drivers/infiniband/hw/hfi1/user_sdma.c 			SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
req               570 drivers/infiniband/hw/hfi1/user_sdma.c 		req->tids = tmp;
req               571 drivers/infiniband/hw/hfi1/user_sdma.c 		req->n_tids = ntids;
req               572 drivers/infiniband/hw/hfi1/user_sdma.c 		req->tididx = 0;
req               576 drivers/infiniband/hw/hfi1/user_sdma.c 	dlid = be16_to_cpu(req->hdr.lrh[1]);
req               579 drivers/infiniband/hw/hfi1/user_sdma.c 	req->sde = sdma_select_user_engine(dd, selector, vl);
req               581 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!req->sde || !sdma_running(req->sde)) {
req               587 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
req               588 drivers/infiniband/hw/hfi1/user_sdma.c 		req->ahg_idx = sdma_ahg_alloc(req->sde);
req               599 drivers/infiniband/hw/hfi1/user_sdma.c 	while (req->seqsubmitted != req->info.npkts) {
req               600 drivers/infiniband/hw/hfi1/user_sdma.c 		ret = user_sdma_send_pkts(req, pcount);
req               620 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req->seqsubmitted < req->info.npkts) {
req               621 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->seqsubmitted)
req               623 drivers/infiniband/hw/hfi1/user_sdma.c 				   (req->seqcomp == req->seqsubmitted - 1));
req               624 drivers/infiniband/hw/hfi1/user_sdma.c 		user_sdma_free_request(req, true);
req               631 drivers/infiniband/hw/hfi1/user_sdma.c static inline u32 compute_data_length(struct user_sdma_request *req,
req               648 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!req->seqnum) {
req               649 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->data_len < sizeof(u32))
req               650 drivers/infiniband/hw/hfi1/user_sdma.c 			len = req->data_len;
req               652 drivers/infiniband/hw/hfi1/user_sdma.c 			len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
req               654 drivers/infiniband/hw/hfi1/user_sdma.c 	} else if (req_opcode(req->info.ctrl) == EXPECTED) {
req               655 drivers/infiniband/hw/hfi1/user_sdma.c 		u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
req               661 drivers/infiniband/hw/hfi1/user_sdma.c 		len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
req               663 drivers/infiniband/hw/hfi1/user_sdma.c 		if (unlikely(!len) && ++req->tididx < req->n_tids &&
req               664 drivers/infiniband/hw/hfi1/user_sdma.c 		    req->tids[req->tididx]) {
req               665 drivers/infiniband/hw/hfi1/user_sdma.c 			tidlen = EXP_TID_GET(req->tids[req->tididx],
req               667 drivers/infiniband/hw/hfi1/user_sdma.c 			req->tidoffset = 0;
req               668 drivers/infiniband/hw/hfi1/user_sdma.c 			len = min_t(u32, tidlen, req->info.fragsize);
req               675 drivers/infiniband/hw/hfi1/user_sdma.c 		len = min(len, req->data_len - req->sent);
req               677 drivers/infiniband/hw/hfi1/user_sdma.c 		len = min(req->data_len - req->sent, (u32)req->info.fragsize);
req               679 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_compute_length(req->pq->dd,
req               680 drivers/infiniband/hw/hfi1/user_sdma.c 					    req->pq->ctxt,
req               681 drivers/infiniband/hw/hfi1/user_sdma.c 					    req->pq->subctxt,
req               682 drivers/infiniband/hw/hfi1/user_sdma.c 					    req->info.comp_idx,
req               700 drivers/infiniband/hw/hfi1/user_sdma.c static int user_sdma_txadd_ahg(struct user_sdma_request *req,
req               705 drivers/infiniband/hw/hfi1/user_sdma.c 	u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
req               706 drivers/infiniband/hw/hfi1/user_sdma.c 	u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
req               707 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
req               717 drivers/infiniband/hw/hfi1/user_sdma.c 	memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
req               722 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
req               726 drivers/infiniband/hw/hfi1/user_sdma.c 			      sizeof(tx->hdr) + datalen, req->ahg_idx,
req               736 drivers/infiniband/hw/hfi1/user_sdma.c static int user_sdma_txadd(struct user_sdma_request *req,
req               747 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
req               753 drivers/infiniband/hw/hfi1/user_sdma.c 	len = offset + req->info.fragsize > PAGE_SIZE ?
req               754 drivers/infiniband/hw/hfi1/user_sdma.c 		PAGE_SIZE - offset : req->info.fragsize;
req               759 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
req               766 drivers/infiniband/hw/hfi1/user_sdma.c 		     req->iov_idx < req->data_iovs - 1)) {
req               768 drivers/infiniband/hw/hfi1/user_sdma.c 		iovec = &req->iovs[++req->iov_idx];
req               778 drivers/infiniband/hw/hfi1/user_sdma.c static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
req               787 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!req->pq)
req               790 drivers/infiniband/hw/hfi1/user_sdma.c 	pq = req->pq;
req               793 drivers/infiniband/hw/hfi1/user_sdma.c 	if (READ_ONCE(req->has_error))
req               799 drivers/infiniband/hw/hfi1/user_sdma.c 	if (unlikely(req->seqnum == req->info.npkts)) {
req               800 drivers/infiniband/hw/hfi1/user_sdma.c 		if (!list_empty(&req->txps))
req               805 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
req               806 drivers/infiniband/hw/hfi1/user_sdma.c 		maxpkts = req->info.npkts - req->seqnum;
req               817 drivers/infiniband/hw/hfi1/user_sdma.c 		if (READ_ONCE(req->has_error))
req               825 drivers/infiniband/hw/hfi1/user_sdma.c 		tx->req = req;
req               832 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->seqnum == req->info.npkts - 1)
req               841 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->data_len) {
req               842 drivers/infiniband/hw/hfi1/user_sdma.c 			iovec = &req->iovs[req->iov_idx];
req               844 drivers/infiniband/hw/hfi1/user_sdma.c 				if (++req->iov_idx == req->data_iovs) {
req               848 drivers/infiniband/hw/hfi1/user_sdma.c 				iovec = &req->iovs[req->iov_idx];
req               852 drivers/infiniband/hw/hfi1/user_sdma.c 			datalen = compute_data_length(req, tx);
req               863 drivers/infiniband/hw/hfi1/user_sdma.c 				SDMA_DBG(req,
req               872 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->ahg_idx >= 0) {
req               873 drivers/infiniband/hw/hfi1/user_sdma.c 			if (!req->seqnum) {
req               874 drivers/infiniband/hw/hfi1/user_sdma.c 				ret = user_sdma_txadd_ahg(req, tx, datalen);
req               880 drivers/infiniband/hw/hfi1/user_sdma.c 				changes = set_txreq_header_ahg(req, tx,
req               888 drivers/infiniband/hw/hfi1/user_sdma.c 			ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
req               898 drivers/infiniband/hw/hfi1/user_sdma.c 			ret = set_txreq_header(req, tx, datalen);
req               908 drivers/infiniband/hw/hfi1/user_sdma.c 		       (req->sent + data_sent) < req->data_len) {
req               909 drivers/infiniband/hw/hfi1/user_sdma.c 			ret = user_sdma_txadd(req, tx, iovec, datalen,
req               918 drivers/infiniband/hw/hfi1/user_sdma.c 		req->koffset += datalen;
req               919 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req_opcode(req->info.ctrl) == EXPECTED)
req               920 drivers/infiniband/hw/hfi1/user_sdma.c 			req->tidoffset += datalen;
req               921 drivers/infiniband/hw/hfi1/user_sdma.c 		req->sent += data_sent;
req               922 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->data_len)
req               924 drivers/infiniband/hw/hfi1/user_sdma.c 		list_add_tail(&tx->txreq.list, &req->txps);
req               930 drivers/infiniband/hw/hfi1/user_sdma.c 		tx->seqnum = req->seqnum++;
req               934 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = sdma_send_txlist(req->sde,
req               936 drivers/infiniband/hw/hfi1/user_sdma.c 			       &req->txps, &count);
req               937 drivers/infiniband/hw/hfi1/user_sdma.c 	req->seqsubmitted += count;
req               938 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req->seqsubmitted == req->info.npkts) {
req               945 drivers/infiniband/hw/hfi1/user_sdma.c 		if (req->ahg_idx >= 0)
req               946 drivers/infiniband/hw/hfi1/user_sdma.c 			sdma_ahg_free(req->sde, req->ahg_idx);
req               967 drivers/infiniband/hw/hfi1/user_sdma.c static int pin_sdma_pages(struct user_sdma_request *req,
req               974 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
req              1016 drivers/infiniband/hw/hfi1/user_sdma.c static int pin_vector_pages(struct user_sdma_request *req,
req              1020 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
req              1055 drivers/infiniband/hw/hfi1/user_sdma.c 		pinned = pin_sdma_pages(req, iovec, node, npages);
req              1067 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
req              1086 drivers/infiniband/hw/hfi1/user_sdma.c static int check_header_template(struct user_sdma_request *req,
req              1100 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
req              1101 drivers/infiniband/hw/hfi1/user_sdma.c 	    lrhlen > get_lrh_len(*hdr, req->info.fragsize))
req              1104 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req_opcode(req->info.ctrl) == EXPECTED) {
req              1111 drivers/infiniband/hw/hfi1/user_sdma.c 		u32 tidval = req->tids[req->tididx],
req              1119 drivers/infiniband/hw/hfi1/user_sdma.c 			  (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
req              1156 drivers/infiniband/hw/hfi1/user_sdma.c static int set_txreq_header(struct user_sdma_request *req,
req              1159 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
req              1167 drivers/infiniband/hw/hfi1/user_sdma.c 	memcpy(hdr, &req->hdr, sizeof(*hdr));
req              1184 drivers/infiniband/hw/hfi1/user_sdma.c 		if (unlikely(req->seqnum == 2)) {
req              1192 drivers/infiniband/hw/hfi1/user_sdma.c 			req->hdr.pbc[0] = hdr->pbc[0];
req              1193 drivers/infiniband/hw/hfi1/user_sdma.c 			req->hdr.lrh[2] = hdr->lrh[2];
req              1201 drivers/infiniband/hw/hfi1/user_sdma.c 	if (unlikely(!req->seqnum)) {
req              1202 drivers/infiniband/hw/hfi1/user_sdma.c 		ret = check_header_template(req, hdr, lrhlen, datalen);
req              1210 drivers/infiniband/hw/hfi1/user_sdma.c 				(req_opcode(req->info.ctrl) == EXPECTED),
req              1211 drivers/infiniband/hw/hfi1/user_sdma.c 				req->seqnum));
req              1218 drivers/infiniband/hw/hfi1/user_sdma.c 	hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
req              1220 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req_opcode(req->info.ctrl) == EXPECTED) {
req              1221 drivers/infiniband/hw/hfi1/user_sdma.c 		tidval = req->tids[req->tididx];
req              1226 drivers/infiniband/hw/hfi1/user_sdma.c 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
req              1228 drivers/infiniband/hw/hfi1/user_sdma.c 			req->tidoffset = 0;
req              1233 drivers/infiniband/hw/hfi1/user_sdma.c 			if (++req->tididx > req->n_tids - 1 ||
req              1234 drivers/infiniband/hw/hfi1/user_sdma.c 			    !req->tids[req->tididx]) {
req              1237 drivers/infiniband/hw/hfi1/user_sdma.c 			tidval = req->tids[req->tididx];
req              1256 drivers/infiniband/hw/hfi1/user_sdma.c 			pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
req              1257 drivers/infiniband/hw/hfi1/user_sdma.c 			req->tidoffset, req->tidoffset >> omfactor,
req              1260 drivers/infiniband/hw/hfi1/user_sdma.c 			  req->tidoffset >> omfactor);
req              1266 drivers/infiniband/hw/hfi1/user_sdma.c 				    req->info.comp_idx, hdr, tidval);
req              1270 drivers/infiniband/hw/hfi1/user_sdma.c static int set_txreq_header_ahg(struct user_sdma_request *req,
req              1276 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
req              1277 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_pkt_header *hdr = &req->hdr;
req              1299 drivers/infiniband/hw/hfi1/user_sdma.c 	val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
req              1313 drivers/infiniband/hw/hfi1/user_sdma.c 			     (__force u16)cpu_to_le16(req->koffset & 0xffff));
req              1317 drivers/infiniband/hw/hfi1/user_sdma.c 			     (__force u16)cpu_to_le16(req->koffset >> 16));
req              1320 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req_opcode(req->info.ctrl) == EXPECTED) {
req              1323 drivers/infiniband/hw/hfi1/user_sdma.c 		tidval = req->tids[req->tididx];
req              1329 drivers/infiniband/hw/hfi1/user_sdma.c 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
req              1331 drivers/infiniband/hw/hfi1/user_sdma.c 			req->tidoffset = 0;
req              1336 drivers/infiniband/hw/hfi1/user_sdma.c 			if (++req->tididx > req->n_tids - 1 ||
req              1337 drivers/infiniband/hw/hfi1/user_sdma.c 			    !req->tids[req->tididx])
req              1339 drivers/infiniband/hw/hfi1/user_sdma.c 			tidval = req->tids[req->tididx];
req              1349 drivers/infiniband/hw/hfi1/user_sdma.c 				((req->tidoffset >> omfactor)
req              1376 drivers/infiniband/hw/hfi1/user_sdma.c 					req->info.comp_idx, req->sde->this_idx,
req              1377 drivers/infiniband/hw/hfi1/user_sdma.c 					req->ahg_idx, ahg, idx, tidval);
req              1380 drivers/infiniband/hw/hfi1/user_sdma.c 			datalen, req->ahg_idx, idx,
req              1381 drivers/infiniband/hw/hfi1/user_sdma.c 			ahg, sizeof(req->hdr),
req              1401 drivers/infiniband/hw/hfi1/user_sdma.c 	struct user_sdma_request *req;
req              1406 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!tx->req)
req              1409 drivers/infiniband/hw/hfi1/user_sdma.c 	req = tx->req;
req              1410 drivers/infiniband/hw/hfi1/user_sdma.c 	pq = req->pq;
req              1411 drivers/infiniband/hw/hfi1/user_sdma.c 	cq = req->cq;
req              1414 drivers/infiniband/hw/hfi1/user_sdma.c 		SDMA_DBG(req, "SDMA completion with error %d",
req              1416 drivers/infiniband/hw/hfi1/user_sdma.c 		WRITE_ONCE(req->has_error, 1);
req              1420 drivers/infiniband/hw/hfi1/user_sdma.c 	req->seqcomp = tx->seqnum;
req              1424 drivers/infiniband/hw/hfi1/user_sdma.c 	if (req->seqcomp != req->info.npkts - 1)
req              1427 drivers/infiniband/hw/hfi1/user_sdma.c 	user_sdma_free_request(req, false);
req              1428 drivers/infiniband/hw/hfi1/user_sdma.c 	set_comp_state(pq, cq, req->info.comp_idx, state, status);
req              1438 drivers/infiniband/hw/hfi1/user_sdma.c static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
req              1442 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!list_empty(&req->txps)) {
req              1445 drivers/infiniband/hw/hfi1/user_sdma.c 		list_for_each_entry_safe(t, p, &req->txps, list) {
req              1449 drivers/infiniband/hw/hfi1/user_sdma.c 			sdma_txclean(req->pq->dd, t);
req              1450 drivers/infiniband/hw/hfi1/user_sdma.c 			kmem_cache_free(req->pq->txreq_cache, tx);
req              1454 drivers/infiniband/hw/hfi1/user_sdma.c 	for (i = 0; i < req->data_iovs; i++) {
req              1455 drivers/infiniband/hw/hfi1/user_sdma.c 		struct sdma_mmu_node *node = req->iovs[i].node;
req              1460 drivers/infiniband/hw/hfi1/user_sdma.c 		req->iovs[i].node = NULL;
req              1463 drivers/infiniband/hw/hfi1/user_sdma.c 			hfi1_mmu_rb_remove(req->pq->handler,
req              1469 drivers/infiniband/hw/hfi1/user_sdma.c 	kfree(req->tids);
req              1470 drivers/infiniband/hw/hfi1/user_sdma.c 	clear_bit(req->info.comp_idx, req->pq->req_in_use);
req               115 drivers/infiniband/hw/hfi1/user_sdma.h #define SDMA_DBG(req, fmt, ...)				     \
req               116 drivers/infiniband/hw/hfi1/user_sdma.h 	hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
req               117 drivers/infiniband/hw/hfi1/user_sdma.h 		 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
req               240 drivers/infiniband/hw/hfi1/user_sdma.h 	struct user_sdma_request *req;
req              1283 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_cfg_global_param *req;
req              1289 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	req = (struct hns_roce_cfg_global_param *)desc.data;
req              1290 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	memset(req, 0, sizeof(*req));
req              1291 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->time_cfg_udp_port,
req              1294 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->time_cfg_udp_port,
req              1516 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_cfg_bt_attr *req;
req              1520 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	req = (struct hns_roce_cfg_bt_attr *)desc.data;
req              1521 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	memset(req, 0, sizeof(*req));
req              1523 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
req              1526 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
req              1529 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
req              1533 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
req              1536 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
req              1539 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
req              1543 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
req              1546 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
req              1549 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
req              1553 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
req              1556 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
req              1559 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
req              1563 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_sccc_cfg,
req              1567 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_sccc_cfg,
req              1571 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	roce_set_field(req->vf_sccc_cfg,
req               124 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	struct i40iw_alloc_ucontext_req req;
req               128 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (ib_copy_from_udata(&req, udata, sizeof(req)))
req               131 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
req               132 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
req               139 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	uresp.kernel_ver = req.userspace_ver;
req               142 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	ucontext->abi_ver = req.userspace_ver;
req               527 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	struct i40iw_create_qp_req req;
req               628 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		err_code = ib_copy_from_udata(&req, udata, sizeof(req));
req               633 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
req               636 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		if (req.user_wqe_buffers) {
req               642 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			    (unsigned long)req.user_wqe_buffers,
req              1127 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		struct i40iw_create_cq_req req;
req              1130 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		memset(&req, 0, sizeof(req));
req              1132 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
req              1138 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
req              1433 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			      struct i40iw_mem_reg_req *req,
req              1448 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	total = req->sq_pages + req->rq_pages + req->cq_pages;
req              1469 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
req              1471 drivers/infiniband/hw/i40iw/i40iw_verbs.c 				ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
req              1477 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
req              1481 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			hmc_p->addr = arr[req->sq_pages];
req              1488 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
req              1750 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	struct i40iw_mem_reg_req req;
req              1770 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (ib_copy_from_udata(&req, udata, sizeof(req))) {
req              1788 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (req.reg_type == IW_MEMREG_TYPE_MEM)
req              1801 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	iwmr->type = req.reg_type;
req              1804 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	switch (req.reg_type) {
req              1806 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		use_pbles = ((req.sq_pages + req.rq_pages) > 2);
req              1807 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
req              1816 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		use_pbles = (req.cq_pages > 1);
req              1817 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
req              1864 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	iwmr->type = req.reg_type;
req              1865 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (req.reg_type == IW_MEMREG_TYPE_MEM)
req               543 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req = NULL;
req               550 drivers/infiniband/hw/mlx4/mcg.c 			req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
req               551 drivers/infiniband/hw/mlx4/mcg.c 			list_del(&req->group_list);
req               552 drivers/infiniband/hw/mlx4/mcg.c 			list_del(&req->func_list);
req               553 drivers/infiniband/hw/mlx4/mcg.c 			--group->func[req->func].num_pend_reqs;
req               555 drivers/infiniband/hw/mlx4/mcg.c 			kfree(req);
req               585 drivers/infiniband/hw/mlx4/mcg.c 			    struct mcast_req *req)
req               589 drivers/infiniband/hw/mlx4/mcg.c 	if (req->clean)
req               590 drivers/infiniband/hw/mlx4/mcg.c 		leave_mask = group->func[req->func].join_state;
req               592 drivers/infiniband/hw/mlx4/mcg.c 	status = check_leave(group, req->func, leave_mask);
req               594 drivers/infiniband/hw/mlx4/mcg.c 		leave_group(group, req->func, leave_mask);
req               596 drivers/infiniband/hw/mlx4/mcg.c 	if (!req->clean)
req               597 drivers/infiniband/hw/mlx4/mcg.c 		send_reply_to_slave(req->func, group, &req->sa_mad, status);
req               598 drivers/infiniband/hw/mlx4/mcg.c 	--group->func[req->func].num_pend_reqs;
req               599 drivers/infiniband/hw/mlx4/mcg.c 	list_del(&req->group_list);
req               600 drivers/infiniband/hw/mlx4/mcg.c 	list_del(&req->func_list);
req               601 drivers/infiniband/hw/mlx4/mcg.c 	kfree(req);
req               606 drivers/infiniband/hw/mlx4/mcg.c 			   struct mcast_req *req)
req               611 drivers/infiniband/hw/mlx4/mcg.c 	struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
req               615 drivers/infiniband/hw/mlx4/mcg.c 		status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
req               617 drivers/infiniband/hw/mlx4/mcg.c 			join_group(group, req->func, join_mask);
req               619 drivers/infiniband/hw/mlx4/mcg.c 		--group->func[req->func].num_pend_reqs;
req               620 drivers/infiniband/hw/mlx4/mcg.c 		send_reply_to_slave(req->func, group, &req->sa_mad, status);
req               621 drivers/infiniband/hw/mlx4/mcg.c 		list_del(&req->group_list);
req               622 drivers/infiniband/hw/mlx4/mcg.c 		list_del(&req->func_list);
req               623 drivers/infiniband/hw/mlx4/mcg.c 		kfree(req);
req               628 drivers/infiniband/hw/mlx4/mcg.c 		if (send_join_to_wire(group, &req->sa_mad)) {
req               629 drivers/infiniband/hw/mlx4/mcg.c 			--group->func[req->func].num_pend_reqs;
req               630 drivers/infiniband/hw/mlx4/mcg.c 			list_del(&req->group_list);
req               631 drivers/infiniband/hw/mlx4/mcg.c 			list_del(&req->func_list);
req               632 drivers/infiniband/hw/mlx4/mcg.c 			kfree(req);
req               645 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req = NULL;
req               674 drivers/infiniband/hw/mlx4/mcg.c 				req = list_first_entry(&group->pending_list,
req               677 drivers/infiniband/hw/mlx4/mcg.c 					if (req) {
req               678 drivers/infiniband/hw/mlx4/mcg.c 						send_reply_to_slave(req->func, group, &req->sa_mad, status);
req               679 drivers/infiniband/hw/mlx4/mcg.c 						--group->func[req->func].num_pend_reqs;
req               680 drivers/infiniband/hw/mlx4/mcg.c 						list_del(&req->group_list);
req               681 drivers/infiniband/hw/mlx4/mcg.c 						list_del(&req->func_list);
req               682 drivers/infiniband/hw/mlx4/mcg.c 						kfree(req);
req               710 drivers/infiniband/hw/mlx4/mcg.c 		req = list_first_entry(&group->pending_list, struct mcast_req,
req               712 drivers/infiniband/hw/mlx4/mcg.c 		sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
req               718 drivers/infiniband/hw/mlx4/mcg.c 		if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
req               719 drivers/infiniband/hw/mlx4/mcg.c 			rc += handle_leave_req(group, req_join_state, req);
req               721 drivers/infiniband/hw/mlx4/mcg.c 			rc += handle_join_req(group, req_join_state, req);
req               751 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req;
req               766 drivers/infiniband/hw/mlx4/mcg.c 					req = list_first_entry(&group->pending_list,
req               768 drivers/infiniband/hw/mlx4/mcg.c 					--group->func[req->func].num_pend_reqs;
req               769 drivers/infiniband/hw/mlx4/mcg.c 					list_del(&req->group_list);
req               770 drivers/infiniband/hw/mlx4/mcg.c 					list_del(&req->func_list);
req               771 drivers/infiniband/hw/mlx4/mcg.c 					kfree(req);
req               869 drivers/infiniband/hw/mlx4/mcg.c static void queue_req(struct mcast_req *req)
req               871 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_group *group = req->group;
req               875 drivers/infiniband/hw/mlx4/mcg.c 	list_add_tail(&req->group_list, &group->pending_list);
req               876 drivers/infiniband/hw/mlx4/mcg.c 	list_add_tail(&req->func_list, &group->func[req->func].pending);
req               938 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req;
req               949 drivers/infiniband/hw/mlx4/mcg.c 		req = kzalloc(sizeof *req, GFP_KERNEL);
req               950 drivers/infiniband/hw/mlx4/mcg.c 		if (!req)
req               953 drivers/infiniband/hw/mlx4/mcg.c 		req->func = slave;
req               954 drivers/infiniband/hw/mlx4/mcg.c 		req->sa_mad = *sa_mad;
req               960 drivers/infiniband/hw/mlx4/mcg.c 			kfree(req);
req               969 drivers/infiniband/hw/mlx4/mcg.c 			kfree(req);
req               973 drivers/infiniband/hw/mlx4/mcg.c 		req->group = group;
req               974 drivers/infiniband/hw/mlx4/mcg.c 		queue_req(req);
req               995 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req = NULL;
req              1010 drivers/infiniband/hw/mlx4/mcg.c 		req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
req              1012 drivers/infiniband/hw/mlx4/mcg.c 				be64_to_cpu(req->sa_mad.mad_hdr.tid));
req              1062 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req, *tmp
req              1064 drivers/infiniband/hw/mlx4/mcg.c 	list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
req              1065 drivers/infiniband/hw/mlx4/mcg.c 		list_del(&req->group_list);
req              1066 drivers/infiniband/hw/mlx4/mcg.c 		kfree(req);
req              1155 drivers/infiniband/hw/mlx4/mcg.c static void build_leave_mad(struct mcast_req *req)
req              1157 drivers/infiniband/hw/mlx4/mcg.c 	struct ib_sa_mad *mad = &req->sa_mad;
req              1165 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req, *tmp, *group_first = NULL;
req              1172 drivers/infiniband/hw/mlx4/mcg.c 	list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
req              1174 drivers/infiniband/hw/mlx4/mcg.c 		if (group_first == req &&
req              1183 drivers/infiniband/hw/mlx4/mcg.c 			list_del(&req->group_list);
req              1184 drivers/infiniband/hw/mlx4/mcg.c 			list_del(&req->func_list);
req              1185 drivers/infiniband/hw/mlx4/mcg.c 			kfree(req);
req              1198 drivers/infiniband/hw/mlx4/mcg.c 	struct mcast_req *req;
req              1204 drivers/infiniband/hw/mlx4/mcg.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req              1205 drivers/infiniband/hw/mlx4/mcg.c 	if (!req)
req              1211 drivers/infiniband/hw/mlx4/mcg.c 			kfree(req);
req              1216 drivers/infiniband/hw/mlx4/mcg.c 	req->clean = 1;
req              1217 drivers/infiniband/hw/mlx4/mcg.c 	req->func = slave;
req              1218 drivers/infiniband/hw/mlx4/mcg.c 	req->group = group;
req              1220 drivers/infiniband/hw/mlx4/mcg.c 	build_leave_mad(req);
req              1221 drivers/infiniband/hw/mlx4/mcg.c 	queue_req(req);
req              1609 drivers/infiniband/hw/mlx5/main.c 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
req              1614 drivers/infiniband/hw/mlx5/main.c 	int ref_bfregs = req->total_num_bfregs;
req              1616 drivers/infiniband/hw/mlx5/main.c 	if (req->total_num_bfregs == 0)
req              1622 drivers/infiniband/hw/mlx5/main.c 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
req              1628 drivers/infiniband/hw/mlx5/main.c 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
req              1629 drivers/infiniband/hw/mlx5/main.c 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
req              1632 drivers/infiniband/hw/mlx5/main.c 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
req              1634 drivers/infiniband/hw/mlx5/main.c 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
req              1640 drivers/infiniband/hw/mlx5/main.c 		    req->total_num_bfregs, bfregi->total_num_bfregs,
req              1770 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
req              1792 drivers/infiniband/hw/mlx5/main.c 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
req              1796 drivers/infiniband/hw/mlx5/main.c 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
req              1799 drivers/infiniband/hw/mlx5/main.c 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
req              1802 drivers/infiniband/hw/mlx5/main.c 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
req              1804 drivers/infiniband/hw/mlx5/main.c 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
req              1818 drivers/infiniband/hw/mlx5/main.c 				 req.max_cqe_version);
req              1838 drivers/infiniband/hw/mlx5/main.c 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
req              1842 drivers/infiniband/hw/mlx5/main.c 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
req              1867 drivers/infiniband/hw/mlx5/main.c 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
req              1888 drivers/infiniband/hw/mlx5/main.c 	resp.tot_bfregs = req.total_num_bfregs;
req              1955 drivers/infiniband/hw/mlx5/main.c 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
req              1957 drivers/infiniband/hw/mlx5/main.c 	context->lib_caps = req.lib_caps;
req              1973 drivers/infiniband/hw/mlx5/main.c 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
req              1898 drivers/infiniband/hw/mlx5/mr.c 	struct mlx5_ib_alloc_mw req = {};
req              1904 drivers/infiniband/hw/mlx5/mr.c 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
req              1908 drivers/infiniband/hw/mlx5/mr.c 	if (req.comp_mask || req.reserved1 || req.reserved2)
req              1911 drivers/infiniband/hw/mlx5/mr.c 	if (udata->inlen > sizeof(req) &&
req              1912 drivers/infiniband/hw/mlx5/mr.c 	    !ib_is_udata_cleared(udata, sizeof(req),
req              1913 drivers/infiniband/hw/mlx5/mr.c 				 udata->inlen - sizeof(req)))
req              1916 drivers/infiniband/hw/mlx5/mr.c 	ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
req              3744 drivers/infiniband/hw/mlx5/qp.c static inline bool is_valid_mask(int mask, int req, int opt)
req              3746 drivers/infiniband/hw/mlx5/qp.c 	if ((mask & req) != req)
req              3749 drivers/infiniband/hw/mlx5/qp.c 	if (mask & ~(req | opt))
req              3761 drivers/infiniband/hw/mlx5/qp.c 	int req = IB_QP_STATE;
req              3765 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req              3767 drivers/infiniband/hw/mlx5/qp.c 		req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
req              3768 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req              3771 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req              3773 drivers/infiniband/hw/mlx5/qp.c 		req |= IB_QP_PATH_MTU;
req              3775 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req              3777 drivers/infiniband/hw/mlx5/qp.c 		req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
req              3780 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req              3783 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req              3785 drivers/infiniband/hw/mlx5/qp.c 		return is_valid_mask(attr_mask, req, opt);
req               423 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
req               440 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
req               443 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->req.rsvd_version = 2;
req               532 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
req               535 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
req               576 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
req               578 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->req.rsvd_version = 1;
req              1288 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
req              1307 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
req              1308 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	memset(req, 0, dev->stats_mem.size);
req              1310 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
req              1315 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		req->reset_stats = reset;
req              1320 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
req              1322 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		ocrdma_le32_to_cpu(req, dev->stats_mem.size);
req              1818 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
req              1832 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
req              1896 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
req              2884 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	struct ocrdma_get_dcbx_cfg_req *req = NULL;
req              2892 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
req              2893 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (!req) {
req              2904 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
req              2906 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	req->param_type = ptype;
req              2912 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
req              2917 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
req              3127 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
req               308 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               319 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               348 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               791 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               832 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               857 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               974 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1047 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1205 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1237 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1273 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1326 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_rsp req;
req              1347 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_rsp req;
req              1360 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_rsp req;
req              1372 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1390 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1401 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1419 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1426 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1460 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1476 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1609 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1628 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1642 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1672 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req              1686 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	struct ocrdma_mbx_hdr req;
req               100 drivers/infiniband/hw/qedr/qedr_hsi_rdma.h 	struct rdma_cqe_requester req;
req              3654 drivers/infiniband/hw/qedr/verbs.c 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
req              3662 drivers/infiniband/hw/qedr/verbs.c 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
req              3673 drivers/infiniband/hw/qedr/verbs.c 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
req              3755 drivers/infiniband/hw/qedr/verbs.c 			    struct rdma_cqe_requester *req)
req              3759 drivers/infiniband/hw/qedr/verbs.c 	switch (req->status) {
req              3761 drivers/infiniband/hw/qedr/verbs.c 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
req              3769 drivers/infiniband/hw/qedr/verbs.c 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
req              3776 drivers/infiniband/hw/qedr/verbs.c 				  req->sq_cons - 1, IB_WC_SUCCESS, 0);
req              3782 drivers/infiniband/hw/qedr/verbs.c 			switch (req->status) {
req              3849 drivers/infiniband/hw/qedr/verbs.c 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
req              4046 drivers/infiniband/hw/qedr/verbs.c 				struct rdma_cqe_requester *req, int *update)
req              4048 drivers/infiniband/hw/qedr/verbs.c 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
req              4095 drivers/infiniband/hw/qedr/verbs.c 					       &cqe->req);
req              4096 drivers/infiniband/hw/qedr/verbs.c 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
req               862 drivers/infiniband/hw/qib/qib_sd7220.c 	int cnt, sofar, req;
req               866 drivers/infiniband/hw/qib/qib_sd7220.c 		req = len - sofar;
req               867 drivers/infiniband/hw/qib/qib_sd7220.c 		if (req > PROG_CHUNK)
req               868 drivers/infiniband/hw/qib/qib_sd7220.c 			req = PROG_CHUNK;
req               870 drivers/infiniband/hw/qib/qib_sd7220.c 					  (u8 *)img + sofar, req, 0);
req               871 drivers/infiniband/hw/qib/qib_sd7220.c 		if (cnt < req) {
req               875 drivers/infiniband/hw/qib/qib_sd7220.c 		sofar += req;
req               886 drivers/infiniband/hw/qib/qib_sd7220.c 	int cnt, sofar, req, idx, errors;
req               892 drivers/infiniband/hw/qib/qib_sd7220.c 		req = len - sofar;
req               893 drivers/infiniband/hw/qib/qib_sd7220.c 		if (req > VFY_CHUNK)
req               894 drivers/infiniband/hw/qib/qib_sd7220.c 			req = VFY_CHUNK;
req               896 drivers/infiniband/hw/qib/qib_sd7220.c 					  readback, req, 1);
req               897 drivers/infiniband/hw/qib/qib_sd7220.c 		if (cnt < req) {
req               548 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
req                83 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
req                97 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c 	memcpy(dev->cmd_slot, req, sizeof(*req));
req               111 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	union pvrdma_cmd_req req;
req               113 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
req               185 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
req               241 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	union pvrdma_cmd_req req;
req               242 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
req               251 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req               616 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	union pvrdma_cmd_req req;
req               617 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
req               632 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req               654 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	union pvrdma_cmd_req req;
req               655 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
req               668 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req                62 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	union pvrdma_cmd_req req;
req                64 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
req                85 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
req               118 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	union pvrdma_cmd_req req;
req               120 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
req               174 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
req               209 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	union pvrdma_cmd_req req;
req               211 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
req               245 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
req               280 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	union pvrdma_cmd_req req;
req               281 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
req               287 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req               194 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	union pvrdma_cmd_req req;
req               196 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
req               374 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
req               448 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	union pvrdma_cmd_req req;
req               449 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
req               456 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
req               480 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	union pvrdma_cmd_req req;
req               482 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
req               562 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
req               926 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	union pvrdma_cmd_req req;
req               928 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
req               944 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
req                66 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	union pvrdma_cmd_req req;
req                68 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
req                76 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
req               104 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	union pvrdma_cmd_req req;
req               106 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
req               183 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
req               246 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	union pvrdma_cmd_req req;
req               247 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
req               255 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req               277 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	union pvrdma_cmd_req req;
req               278 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
req               292 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req               132 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	union pvrdma_cmd_req req;
req               134 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	struct pvrdma_cmd_query_port *cmd = &req.query_port;
req               142 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP);
req               212 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	union pvrdma_cmd_req req;
req               214 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey;
req               221 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp,
req               318 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	union pvrdma_cmd_req req = {};
req               320 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
req               340 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP);
req               372 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	union pvrdma_cmd_req req = {};
req               373 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
req               379 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	ret = pvrdma_cmd_post(context->dev, &req, NULL, 0);
req               432 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	union pvrdma_cmd_req req = {};
req               434 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
req               447 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
req               487 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	union pvrdma_cmd_req req = {};
req               488 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
req               494 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
req               330 drivers/infiniband/sw/rxe/rxe_comp.c 					if (qp->req.wait_psn) {
req               331 drivers/infiniband/sw/rxe/rxe_comp.c 						qp->req.wait_psn = 0;
req               332 drivers/infiniband/sw/rxe/rxe_comp.c 						rxe_run_task(&qp->req.task, 0);
req               464 drivers/infiniband/sw/rxe/rxe_comp.c 	if (qp->req.wait_fence) {
req               465 drivers/infiniband/sw/rxe/rxe_comp.c 		qp->req.wait_fence = 0;
req               466 drivers/infiniband/sw/rxe/rxe_comp.c 		rxe_run_task(&qp->req.task, 0);
req               478 drivers/infiniband/sw/rxe/rxe_comp.c 		atomic_inc(&qp->req.rd_atomic);
req               479 drivers/infiniband/sw/rxe/rxe_comp.c 		if (qp->req.need_rd_atomic) {
req               481 drivers/infiniband/sw/rxe/rxe_comp.c 			qp->req.need_rd_atomic = 0;
req               482 drivers/infiniband/sw/rxe/rxe_comp.c 			rxe_run_task(&qp->req.task, 0);
req               486 drivers/infiniband/sw/rxe/rxe_comp.c 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
req               489 drivers/infiniband/sw/rxe/rxe_comp.c 		if ((qp->req.state == QP_STATE_DRAIN) &&
req               490 drivers/infiniband/sw/rxe/rxe_comp.c 		    (qp->comp.psn == qp->req.psn)) {
req               491 drivers/infiniband/sw/rxe/rxe_comp.c 			qp->req.state = QP_STATE_DRAINED;
req               526 drivers/infiniband/sw/rxe/rxe_comp.c 		if (qp->req.wait_psn) {
req               527 drivers/infiniband/sw/rxe/rxe_comp.c 			qp->req.wait_psn = 0;
req               528 drivers/infiniband/sw/rxe/rxe_comp.c 			rxe_run_task(&qp->req.task, 1);
req               568 drivers/infiniband/sw/rxe/rxe_comp.c 	if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
req               569 drivers/infiniband/sw/rxe/rxe_comp.c 	    qp->req.state == QP_STATE_RESET) {
req               571 drivers/infiniband/sw/rxe/rxe_comp.c 				    qp->req.state == QP_STATE_ERROR);
req               582 drivers/infiniband/sw/rxe/rxe_comp.c 	if (qp->req.need_retry)
req               645 drivers/infiniband/sw/rxe/rxe_comp.c 			if (qp->req.wait_psn) {
req               646 drivers/infiniband/sw/rxe/rxe_comp.c 				qp->req.wait_psn = 0;
req               647 drivers/infiniband/sw/rxe/rxe_comp.c 				rxe_run_task(&qp->req.task, 1);
req               676 drivers/infiniband/sw/rxe/rxe_comp.c 			    (qp->req.state == QP_STATE_READY) &&
req               677 drivers/infiniband/sw/rxe/rxe_comp.c 			    (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
req               719 drivers/infiniband/sw/rxe/rxe_comp.c 				if (psn_compare(qp->req.psn,
req               726 drivers/infiniband/sw/rxe/rxe_comp.c 					qp->req.need_retry = 1;
req               728 drivers/infiniband/sw/rxe/rxe_comp.c 					rxe_run_task(&qp->req.task, 0);
req               751 drivers/infiniband/sw/rxe/rxe_comp.c 				qp->req.need_retry = 1;
req               257 drivers/infiniband/sw/rxe/rxe_loc.h 	if ((is_request && (qp->req.state != QP_STATE_READY)) ||
req               419 drivers/infiniband/sw/rxe/rxe_net.c 		rxe_run_task(&qp->req.task, 1);
req               266 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.wqe_index	= producer_index(qp->sq.queue);
req               267 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.state		= QP_STATE_RESET;
req               268 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.opcode		= -1;
req               274 drivers/infiniband/sw/rxe/rxe_qp.c 	rxe_init_task(rxe, &qp->req.task, qp,
req               429 drivers/infiniband/sw/rxe/rxe_qp.c 			if (qp->req.state == QP_STATE_DRAIN &&
req               509 drivers/infiniband/sw/rxe/rxe_qp.c 		rxe_disable_task(&qp->req.task);
req               513 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.state = QP_STATE_RESET;
req               523 drivers/infiniband/sw/rxe/rxe_qp.c 		__rxe_do_task(&qp->req.task);
req               529 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.opcode = -1;
req               530 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.need_retry = 0;
req               531 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.noack_pkts = 0;
req               552 drivers/infiniband/sw/rxe/rxe_qp.c 		rxe_enable_task(&qp->req.task);
req               560 drivers/infiniband/sw/rxe/rxe_qp.c 		if (qp->req.state != QP_STATE_DRAINED) {
req               561 drivers/infiniband/sw/rxe/rxe_qp.c 			qp->req.state = QP_STATE_DRAIN;
req               566 drivers/infiniband/sw/rxe/rxe_qp.c 			rxe_run_task(&qp->req.task, 1);
req               574 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.state = QP_STATE_ERROR;
req               585 drivers/infiniband/sw/rxe/rxe_qp.c 	rxe_run_task(&qp->req.task, 1);
req               598 drivers/infiniband/sw/rxe/rxe_qp.c 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
req               689 drivers/infiniband/sw/rxe/rxe_qp.c 		qp->req.psn = qp->attr.sq_psn;
req               691 drivers/infiniband/sw/rxe/rxe_qp.c 		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
req               711 drivers/infiniband/sw/rxe/rxe_qp.c 			qp->req.state = QP_STATE_INIT;
req               722 drivers/infiniband/sw/rxe/rxe_qp.c 			qp->req.state = QP_STATE_READY;
req               751 drivers/infiniband/sw/rxe/rxe_qp.c 	attr->sq_psn				= qp->req.psn;
req               765 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->req.state == QP_STATE_DRAIN) {
req               793 drivers/infiniband/sw/rxe/rxe_qp.c 	rxe_cleanup_task(&qp->req.task);
req               797 drivers/infiniband/sw/rxe/rxe_qp.c 	__rxe_do_task(&qp->req.task);
req               800 drivers/infiniband/sw/rxe/rxe_qp.c 		__rxe_do_task(&qp->req.task);
req                74 drivers/infiniband/sw/rxe/rxe_recv.c 	} else if (unlikely(qp->req.state < QP_STATE_READY ||
req                75 drivers/infiniband/sw/rxe/rxe_recv.c 				qp->req.state > QP_STATE_DRAINED)) {
req                54 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.opcode = next_opcode(qp, wqe,
req                76 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.wqe_index	= consumer_index(qp->sq.queue);
req                77 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.psn		= qp->comp.psn;
req                78 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.opcode		= -1;
req               129 drivers/infiniband/sw/rxe/rxe_req.c 	rxe_run_task(&qp->req.task, 1);
req               137 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
req               143 drivers/infiniband/sw/rxe/rxe_req.c 			if (qp->req.state != QP_STATE_DRAIN) {
req               150 drivers/infiniband/sw/rxe/rxe_req.c 			if (wqe && ((qp->req.wqe_index !=
req               159 drivers/infiniband/sw/rxe/rxe_req.c 			qp->req.state = QP_STATE_DRAINED;
req               174 drivers/infiniband/sw/rxe/rxe_req.c 	if (qp->req.wqe_index == producer_index(qp->sq.queue))
req               177 drivers/infiniband/sw/rxe/rxe_req.c 	wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
req               179 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely((qp->req.state == QP_STATE_DRAIN ||
req               180 drivers/infiniband/sw/rxe/rxe_req.c 		      qp->req.state == QP_STATE_DRAINED) &&
req               185 drivers/infiniband/sw/rxe/rxe_req.c 		     (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
req               186 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wait_fence = 1;
req               198 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
req               199 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
req               209 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
req               210 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
req               220 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
req               221 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
req               231 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
req               232 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
req               251 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
req               252 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
req               270 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
req               271 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
req               281 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
req               282 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
req               292 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
req               293 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
req               303 drivers/infiniband/sw/rxe/rxe_req.c 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
req               304 drivers/infiniband/sw/rxe/rxe_req.c 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
req               355 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.need_rd_atomic = 1;
req               356 drivers/infiniband/sw/rxe/rxe_req.c 	depth = atomic_dec_return(&qp->req.rd_atomic);
req               359 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.need_rd_atomic = 0;
req               364 drivers/infiniband/sw/rxe/rxe_req.c 	atomic_inc(&qp->req.rd_atomic);
req               403 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->psn	= qp->req.psn;
req               430 drivers/infiniband/sw/rxe/rxe_req.c 		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
req               432 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.noack_pkts = 0;
req               542 drivers/infiniband/sw/rxe/rxe_req.c 		wqe->first_psn = qp->req.psn;
req               543 drivers/infiniband/sw/rxe/rxe_req.c 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
req               547 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
req               549 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
req               560 drivers/infiniband/sw/rxe/rxe_req.c 	*rollback_psn		= qp->req.psn;
req               571 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.psn    = rollback_psn;
req               577 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.opcode = pkt->opcode;
req               580 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
req               606 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
req               609 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely(qp->req.state == QP_STATE_RESET)) {
req               610 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wqe_index = consumer_index(qp->sq.queue);
req               611 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.opcode = -1;
req               612 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.need_rd_atomic = 0;
req               613 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wait_psn = 0;
req               614 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.need_retry = 0;
req               618 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely(qp->req.need_retry)) {
req               620 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.need_retry = 0;
req               661 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wqe_index = next_index(qp->sq.queue,
req               662 drivers/infiniband/sw/rxe/rxe_req.c 						qp->req.wqe_index);
req               667 drivers/infiniband/sw/rxe/rxe_req.c 		     qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
req               668 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wait_psn = 1;
req               702 drivers/infiniband/sw/rxe/rxe_req.c 			wqe->first_psn = qp->req.psn;
req               703 drivers/infiniband/sw/rxe/rxe_req.c 			wqe->last_psn = qp->req.psn;
req               704 drivers/infiniband/sw/rxe/rxe_req.c 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
req               705 drivers/infiniband/sw/rxe/rxe_req.c 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
req               706 drivers/infiniband/sw/rxe/rxe_req.c 			qp->req.wqe_index = next_index(qp->sq.queue,
req               707 drivers/infiniband/sw/rxe/rxe_req.c 						       qp->req.wqe_index);
req               745 drivers/infiniband/sw/rxe/rxe_req.c 			rxe_run_task(&qp->req.task, 1);
req               711 drivers/infiniband/sw/rxe/rxe_verbs.c 	rxe_run_task(&qp->req.task, 1);
req               712 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (unlikely(qp->req.state == QP_STATE_ERROR))
req               728 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (unlikely(qp->req.state < QP_STATE_READY)) {
req               735 drivers/infiniband/sw/rxe/rxe_verbs.c 		rxe_run_task(&qp->req.task, 0);
req               268 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_req_info	req;
req               592 drivers/infiniband/sw/siw/siw_cm.c 	struct mpa_rr *req;
req               600 drivers/infiniband/sw/siw/siw_cm.c 	req = &cep->mpa.hdr;
req               602 drivers/infiniband/sw/siw/siw_cm.c 	version = __mpa_rr_revision(req->params.bits);
req               603 drivers/infiniband/sw/siw/siw_cm.c 	pd_len = be16_to_cpu(req->params.pd_len);
req               609 drivers/infiniband/sw/siw/siw_cm.c 	if (memcmp(req->key, MPA_KEY_REQ, 16))
req               613 drivers/infiniband/sw/siw/siw_cm.c 	memcpy(req->key, MPA_KEY_REP, 16);
req               616 drivers/infiniband/sw/siw/siw_cm.c 	    (req->params.bits & MPA_RR_FLAG_ENHANCED)) {
req               629 drivers/infiniband/sw/siw/siw_cm.c 	if (req->params.bits & MPA_RR_FLAG_MARKERS)
req               632 drivers/infiniband/sw/siw/siw_cm.c 	if (req->params.bits & MPA_RR_FLAG_CRC) {
req               644 drivers/infiniband/sw/siw/siw_cm.c 			req->params.bits |= MPA_RR_FLAG_CRC;
req               695 drivers/infiniband/sw/siw/siw_cm.c 		    req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
req               697 drivers/infiniband/sw/siw/siw_cm.c 		    req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
req               699 drivers/infiniband/sw/siw/siw_cm.c 	req->params.bits &= ~MPA_RR_FLAG_MARKERS;
req               700 drivers/infiniband/sw/siw/siw_cm.c 	req->params.bits |= MPA_RR_FLAG_REJECT;
req               703 drivers/infiniband/sw/siw/siw_cm.c 		req->params.bits &= ~MPA_RR_FLAG_CRC;
req               422 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			     const struct ib_cm_req_event_param *req,
req               435 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	rep.rnr_retry_count = req->rnr_retry_count;
req              1089 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ib_cm_req_param req = {};
req              1094 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.primary_path		= pathrec;
req              1095 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.alternate_path		= NULL;
req              1096 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
req              1097 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.qp_num			= qp->qp_num;
req              1098 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.qp_type			= qp->qp_type;
req              1099 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.private_data		= &data;
req              1100 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.private_data_len		= sizeof(data);
req              1101 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.flow_control		= 0;
req              1103 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.starting_psn		= 0; /* FIXME */
req              1109 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.responder_resources		= 4;
req              1110 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.remote_cm_response_timeout	= 20;
req              1111 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.local_cm_response_timeout	= 20;
req              1112 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.retry_count			= 0; /* RFC draft warns against retries */
req              1113 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
req              1114 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.max_cm_retries		= 15;
req              1115 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	req.srq				= ipoib_cm_has_srq(dev);
req              1116 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	return ib_send_cm_req(id, &req);
req               293 drivers/infiniband/ulp/iser/iscsi_iser.h 	void                         *req;
req               181 drivers/infiniband/ulp/iser/iser_initiator.c 	if (!desc->req)
req               190 drivers/infiniband/ulp/iser/iser_initiator.c 	kfree(desc->req);
req               194 drivers/infiniband/ulp/iser/iser_initiator.c 	desc->req = NULL;
req               203 drivers/infiniband/ulp/iser/iser_initiator.c 	desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
req               204 drivers/infiniband/ulp/iser/iser_initiator.c 	if (!desc->req)
req               207 drivers/infiniband/ulp/iser/iser_initiator.c 	desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
req               234 drivers/infiniband/ulp/iser/iser_initiator.c 	kfree(desc->req);
req               323 drivers/infiniband/ulp/iser/iser_initiator.c static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
req               329 drivers/infiniband/ulp/iser/iser_initiator.c 	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
req               331 drivers/infiniband/ulp/iser/iser_initiator.c 	if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
req               527 drivers/infiniband/ulp/iser/iser_initiator.c 		memcpy(desc->req, task->data, task->data_count);
req              1085 drivers/infiniband/ulp/isert/ib_isert.c 	memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
req              2440 drivers/infiniband/ulp/isert/ib_isert.c 	isert_info("processing login->req: %p\n", login->req);
req               845 drivers/infiniband/ulp/srp/ib_srp.c 	} *req = NULL;
req               849 drivers/infiniband/ulp/srp/ib_srp.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               850 drivers/infiniband/ulp/srp/ib_srp.c 	if (!req)
req               853 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_param.flow_control = 1;
req               854 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_param.retry_count = target->tl_retry_count;
req               860 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_param.responder_resources = 4;
req               861 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_param.rnr_retry_count = 7;
req               862 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_param.max_cm_retries = 15;
req               864 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_req.opcode = SRP_LOGIN_REQ;
req               865 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_req.tag = 0;
req               866 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
req               867 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
req               869 drivers/infiniband/ulp/srp/ib_srp.c 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
req               872 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
req               873 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
req               877 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.flow_control = req->ib_param.flow_control;
req               878 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.responder_resources =
req               879 drivers/infiniband/ulp/srp/ib_srp.c 			req->ib_param.responder_resources;
req               880 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
req               881 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.retry_count = req->ib_param.retry_count;
req               882 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
req               883 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.private_data = &req->rdma_req;
req               884 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
req               886 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_req.opcode = req->ib_req.opcode;
req               887 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_req.tag = req->ib_req.tag;
req               888 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
req               889 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
req               890 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_req.req_flags	= req->ib_req.req_flags;
req               891 drivers/infiniband/ulp/srp/ib_srp.c 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
req               893 drivers/infiniband/ulp/srp/ib_srp.c 		ipi = req->rdma_req.initiator_port_id;
req               894 drivers/infiniband/ulp/srp/ib_srp.c 		tpi = req->rdma_req.target_port_id;
req               900 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.primary_path = &ch->ib_cm.path;
req               901 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.alternate_path = NULL;
req               902 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.service_id = target->ib_cm.service_id;
req               903 drivers/infiniband/ulp/srp/ib_srp.c 		get_random_bytes(&req->ib_param.starting_psn, 4);
req               904 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.starting_psn &= 0xffffff;
req               905 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.qp_num = ch->qp->qp_num;
req               906 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.qp_type = ch->qp->qp_type;
req               907 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
req               908 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
req               909 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.private_data = &req->ib_req;
req               910 drivers/infiniband/ulp/srp/ib_srp.c 		req->ib_param.private_data_len = sizeof(req->ib_req);
req               912 drivers/infiniband/ulp/srp/ib_srp.c 		ipi = req->ib_req.initiator_port_id;
req               913 drivers/infiniband/ulp/srp/ib_srp.c 		tpi = req->ib_req.target_port_id;
req               952 drivers/infiniband/ulp/srp/ib_srp.c 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
req               954 drivers/infiniband/ulp/srp/ib_srp.c 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
req               956 drivers/infiniband/ulp/srp/ib_srp.c 	kfree(req);
req              1009 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_request *req;
req              1016 drivers/infiniband/ulp/srp/ib_srp.c 		req = &ch->req_ring[i];
req              1018 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(req->fr_list);
req              1020 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(req->fmr_list);
req              1021 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(req->map_page);
req              1023 drivers/infiniband/ulp/srp/ib_srp.c 		if (req->indirect_dma_addr) {
req              1024 drivers/infiniband/ulp/srp/ib_srp.c 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
req              1028 drivers/infiniband/ulp/srp/ib_srp.c 		kfree(req->indirect_desc);
req              1040 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_request *req;
req              1051 drivers/infiniband/ulp/srp/ib_srp.c 		req = &ch->req_ring[i];
req              1057 drivers/infiniband/ulp/srp/ib_srp.c 			req->fr_list = mr_list;
req              1059 drivers/infiniband/ulp/srp/ib_srp.c 			req->fmr_list = mr_list;
req              1060 drivers/infiniband/ulp/srp/ib_srp.c 			req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
req              1063 drivers/infiniband/ulp/srp/ib_srp.c 			if (!req->map_page)
req              1066 drivers/infiniband/ulp/srp/ib_srp.c 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
req              1067 drivers/infiniband/ulp/srp/ib_srp.c 		if (!req->indirect_desc)
req              1070 drivers/infiniband/ulp/srp/ib_srp.c 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
req              1076 drivers/infiniband/ulp/srp/ib_srp.c 		req->indirect_dma_addr = dma_addr;
req              1226 drivers/infiniband/ulp/srp/ib_srp.c static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
req              1237 drivers/infiniband/ulp/srp/ib_srp.c 	wr.wr_cqe = &req->reg_cqe;
req              1238 drivers/infiniband/ulp/srp/ib_srp.c 	req->reg_cqe.done = srp_inv_rkey_err_done;
req              1244 drivers/infiniband/ulp/srp/ib_srp.c 			   struct srp_request *req)
req              1259 drivers/infiniband/ulp/srp/ib_srp.c 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
req              1260 drivers/infiniband/ulp/srp/ib_srp.c 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
req              1269 drivers/infiniband/ulp/srp/ib_srp.c 		if (req->nmdesc)
req              1270 drivers/infiniband/ulp/srp/ib_srp.c 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
req              1271 drivers/infiniband/ulp/srp/ib_srp.c 					req->nmdesc);
req              1275 drivers/infiniband/ulp/srp/ib_srp.c 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
req              1295 drivers/infiniband/ulp/srp/ib_srp.c 				       struct srp_request *req,
req              1302 drivers/infiniband/ulp/srp/ib_srp.c 	if (req->scmnd &&
req              1303 drivers/infiniband/ulp/srp/ib_srp.c 	    (!sdev || req->scmnd->device == sdev) &&
req              1304 drivers/infiniband/ulp/srp/ib_srp.c 	    (!scmnd || req->scmnd == scmnd)) {
req              1305 drivers/infiniband/ulp/srp/ib_srp.c 		scmnd = req->scmnd;
req              1306 drivers/infiniband/ulp/srp/ib_srp.c 		req->scmnd = NULL;
req              1322 drivers/infiniband/ulp/srp/ib_srp.c static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
req              1327 drivers/infiniband/ulp/srp/ib_srp.c 	srp_unmap_data(scmnd, ch, req);
req              1334 drivers/infiniband/ulp/srp/ib_srp.c static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
req              1337 drivers/infiniband/ulp/srp/ib_srp.c 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
req              1340 drivers/infiniband/ulp/srp/ib_srp.c 		srp_free_req(ch, req, scmnd, 0);
req              1356 drivers/infiniband/ulp/srp/ib_srp.c 			struct srp_request *req = &ch->req_ring[j];
req              1358 drivers/infiniband/ulp/srp/ib_srp.c 			srp_finish_req(ch, req, NULL,
req              1413 drivers/infiniband/ulp/srp/ib_srp.c 			struct srp_request *req = &ch->req_ring[j];
req              1415 drivers/infiniband/ulp/srp/ib_srp.c 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
req              1521 drivers/infiniband/ulp/srp/ib_srp.c 			     struct srp_request *req,
req              1564 drivers/infiniband/ulp/srp/ib_srp.c 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
req              1571 drivers/infiniband/ulp/srp/ib_srp.c 	req->reg_cqe.done = srp_reg_mr_err_done;
req              1575 drivers/infiniband/ulp/srp/ib_srp.c 	wr.wr.wr_cqe = &req->reg_cqe;
req              1644 drivers/infiniband/ulp/srp/ib_srp.c 			  struct srp_request *req, struct scatterlist *scat,
req              1650 drivers/infiniband/ulp/srp/ib_srp.c 	state->pages = req->map_page;
req              1651 drivers/infiniband/ulp/srp/ib_srp.c 	state->fmr.next = req->fmr_list;
req              1652 drivers/infiniband/ulp/srp/ib_srp.c 	state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
req              1668 drivers/infiniband/ulp/srp/ib_srp.c 			 struct srp_request *req, struct scatterlist *scat,
req              1673 drivers/infiniband/ulp/srp/ib_srp.c 	state->fr.next = req->fr_list;
req              1674 drivers/infiniband/ulp/srp/ib_srp.c 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
req              1683 drivers/infiniband/ulp/srp/ib_srp.c 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
req              1696 drivers/infiniband/ulp/srp/ib_srp.c 			  struct srp_request *req, struct scatterlist *scat,
req              1718 drivers/infiniband/ulp/srp/ib_srp.c static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
req              1735 drivers/infiniband/ulp/srp/ib_srp.c 	state.base_dma_addr = req->indirect_dma_addr;
req              1740 drivers/infiniband/ulp/srp/ib_srp.c 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
req              1741 drivers/infiniband/ulp/srp/ib_srp.c 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
req              1745 drivers/infiniband/ulp/srp/ib_srp.c 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
req              1751 drivers/infiniband/ulp/srp/ib_srp.c 		state.pages[0] = (req->indirect_dma_addr &
req              1767 drivers/infiniband/ulp/srp/ib_srp.c 			      struct srp_rdma_ch *ch, struct srp_request *req,
req              1776 drivers/infiniband/ulp/srp/ib_srp.c 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
req              1778 drivers/infiniband/ulp/srp/ib_srp.c 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
req              1782 drivers/infiniband/ulp/srp/ib_srp.c 			mr_len += be32_to_cpu(req->indirect_desc[i].len);
req              1783 drivers/infiniband/ulp/srp/ib_srp.c 	if (desc_len != scsi_bufflen(req->scmnd) ||
req              1784 drivers/infiniband/ulp/srp/ib_srp.c 	    mr_len > scsi_bufflen(req->scmnd))
req              1786 drivers/infiniband/ulp/srp/ib_srp.c 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
req              1801 drivers/infiniband/ulp/srp/ib_srp.c 			struct srp_request *req)
req              1805 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_cmd *cmd = req->cmd->buf;
req              1816 drivers/infiniband/ulp/srp/ib_srp.c 	req->cmd->num_sge = 1;
req              1845 drivers/infiniband/ulp/srp/ib_srp.c 		struct ib_sge *sge = &req->cmd->sge[1];
req              1849 drivers/infiniband/ulp/srp/ib_srp.c 		req->nmdesc = 0;
req              1858 drivers/infiniband/ulp/srp/ib_srp.c 		req->cmd->num_sge += count;
req              1880 drivers/infiniband/ulp/srp/ib_srp.c 		req->nmdesc = 0;
req              1890 drivers/infiniband/ulp/srp/ib_srp.c 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
req              1894 drivers/infiniband/ulp/srp/ib_srp.c 	state.desc = req->indirect_desc;
req              1896 drivers/infiniband/ulp/srp/ib_srp.c 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
req              1898 drivers/infiniband/ulp/srp/ib_srp.c 		ret = srp_map_sg_fmr(&state, ch, req, scat, count);
req              1900 drivers/infiniband/ulp/srp/ib_srp.c 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
req              1901 drivers/infiniband/ulp/srp/ib_srp.c 	req->nmdesc = state.nmdesc;
req              1909 drivers/infiniband/ulp/srp/ib_srp.c 			srp_check_mapping(&state, ch, req, scat, count);
req              1926 drivers/infiniband/ulp/srp/ib_srp.c 		*buf = req->indirect_desc[0];
req              1947 drivers/infiniband/ulp/srp/ib_srp.c 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
req              1951 drivers/infiniband/ulp/srp/ib_srp.c 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
req              1955 drivers/infiniband/ulp/srp/ib_srp.c 		req->nmdesc++;
req              1960 drivers/infiniband/ulp/srp/ib_srp.c 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
req              1970 drivers/infiniband/ulp/srp/ib_srp.c 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
req              1982 drivers/infiniband/ulp/srp/ib_srp.c 	srp_unmap_data(scmnd, ch, req);
req              1983 drivers/infiniband/ulp/srp/ib_srp.c 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
req              2118 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_request *req;
req              2139 drivers/infiniband/ulp/srp/ib_srp.c 			req = (void *)scmnd->host_scribble;
req              2140 drivers/infiniband/ulp/srp/ib_srp.c 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
req              2173 drivers/infiniband/ulp/srp/ib_srp.c 		srp_free_req(ch, req, scmnd,
req              2217 drivers/infiniband/ulp/srp/ib_srp.c 				 struct srp_cred_req *req)
req              2221 drivers/infiniband/ulp/srp/ib_srp.c 		.tag = req->tag,
req              2223 drivers/infiniband/ulp/srp/ib_srp.c 	s32 delta = be32_to_cpu(req->req_lim_delta);
req              2231 drivers/infiniband/ulp/srp/ib_srp.c 				struct srp_aer_req *req)
req              2236 drivers/infiniband/ulp/srp/ib_srp.c 		.tag = req->tag,
req              2238 drivers/infiniband/ulp/srp/ib_srp.c 	s32 delta = be32_to_cpu(req->req_lim_delta);
req              2241 drivers/infiniband/ulp/srp/ib_srp.c 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
req              2344 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_request *req;
req              2372 drivers/infiniband/ulp/srp/ib_srp.c 	req = &ch->req_ring[idx];
req              2377 drivers/infiniband/ulp/srp/ib_srp.c 	scmnd->host_scribble = (void *) req;
req              2393 drivers/infiniband/ulp/srp/ib_srp.c 	req->scmnd    = scmnd;
req              2394 drivers/infiniband/ulp/srp/ib_srp.c 	req->cmd      = iu;
req              2396 drivers/infiniband/ulp/srp/ib_srp.c 	len = srp_map_data(scmnd, ch, req);
req              2423 drivers/infiniband/ulp/srp/ib_srp.c 	srp_unmap_data(scmnd, ch, req);
req              2432 drivers/infiniband/ulp/srp/ib_srp.c 	req->scmnd = NULL;
req              2978 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
req              2986 drivers/infiniband/ulp/srp/ib_srp.c 	if (!req)
req              2993 drivers/infiniband/ulp/srp/ib_srp.c 	if (!srp_claim_req(ch, req, NULL, scmnd))
req              3005 drivers/infiniband/ulp/srp/ib_srp.c 		srp_free_req(ch, req, scmnd, 0);
req              2156 drivers/infiniband/ulp/srpt/ib_srpt.c 			    const struct srp_login_req *req,
req              2174 drivers/infiniband/ulp/srpt/ib_srpt.c 	if (WARN_ON(!sdev || !req))
req              2177 drivers/infiniband/ulp/srpt/ib_srpt.c 	it_iu_len = be32_to_cpu(req->req_it_iu_len);
req              2180 drivers/infiniband/ulp/srpt/ib_srpt.c 		req->initiator_port_id, req->target_port_id, it_iu_len,
req              2183 drivers/infiniband/ulp/srpt/ib_srpt.c 	nexus = srpt_get_nexus(sport, req->initiator_port_id,
req              2184 drivers/infiniband/ulp/srpt/ib_srpt.c 			       req->target_port_id);
req              2213 drivers/infiniband/ulp/srpt/ib_srpt.c 	if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
req              2214 drivers/infiniband/ulp/srpt/ib_srpt.c 	    || *(__be64 *)(req->target_port_id + 8) !=
req              2273 drivers/infiniband/ulp/srpt/ib_srpt.c 		u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
req              2274 drivers/infiniband/ulp/srpt/ib_srpt.c 			be16_to_cpu(req->imm_data_offset) : 0;
req              2278 drivers/infiniband/ulp/srpt/ib_srpt.c 		if (req->req_flags & SRP_IMMED_REQUESTED)
req              2280 drivers/infiniband/ulp/srpt/ib_srpt.c 				 be16_to_cpu(req->imm_data_offset));
req              2354 drivers/infiniband/ulp/srpt/ib_srpt.c 	if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
req              2394 drivers/infiniband/ulp/srpt/ib_srpt.c 	rsp->tag = req->tag;
req              2396 drivers/infiniband/ulp/srpt/ib_srpt.c 	rsp->max_ti_iu_len = req->req_it_iu_len;
req              2485 drivers/infiniband/ulp/srpt/ib_srpt.c 	rej->tag = req->tag;
req              2530 drivers/infiniband/ulp/srpt/ib_srpt.c 	struct srp_login_req req;
req              2543 drivers/infiniband/ulp/srpt/ib_srpt.c 	memset(&req, 0, sizeof(req));
req              2544 drivers/infiniband/ulp/srpt/ib_srpt.c 	req.opcode		= req_rdma->opcode;
req              2545 drivers/infiniband/ulp/srpt/ib_srpt.c 	req.tag			= req_rdma->tag;
req              2546 drivers/infiniband/ulp/srpt/ib_srpt.c 	req.req_it_iu_len	= req_rdma->req_it_iu_len;
req              2547 drivers/infiniband/ulp/srpt/ib_srpt.c 	req.req_buf_fmt		= req_rdma->req_buf_fmt;
req              2548 drivers/infiniband/ulp/srpt/ib_srpt.c 	req.req_flags		= req_rdma->req_flags;
req              2549 drivers/infiniband/ulp/srpt/ib_srpt.c 	memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
req              2550 drivers/infiniband/ulp/srpt/ib_srpt.c 	memcpy(req.target_port_id, req_rdma->target_port_id, 16);
req              2551 drivers/infiniband/ulp/srpt/ib_srpt.c 	req.imm_data_offset	= req_rdma->imm_data_offset;
req              2557 drivers/infiniband/ulp/srpt/ib_srpt.c 				cm_id->route.path_rec->pkey, &req, src_addr);
req               845 drivers/input/misc/uinput.c 	struct uinput_request   *req;
req               943 drivers/input/misc/uinput.c 		req = uinput_request_find(udev, ff_up.request_id);
req               944 drivers/input/misc/uinput.c 		if (!req || req->code != UI_FF_UPLOAD ||
req               945 drivers/input/misc/uinput.c 		    !req->u.upload.effect) {
req               951 drivers/input/misc/uinput.c 		ff_up.effect = *req->u.upload.effect;
req               952 drivers/input/misc/uinput.c 		if (req->u.upload.old)
req               953 drivers/input/misc/uinput.c 			ff_up.old = *req->u.upload.old;
req               966 drivers/input/misc/uinput.c 		req = uinput_request_find(udev, ff_erase.request_id);
req               967 drivers/input/misc/uinput.c 		if (!req || req->code != UI_FF_ERASE) {
req               973 drivers/input/misc/uinput.c 		ff_erase.effect_id = req->u.effect_id;
req               986 drivers/input/misc/uinput.c 		req = uinput_request_find(udev, ff_up.request_id);
req               987 drivers/input/misc/uinput.c 		if (!req || req->code != UI_FF_UPLOAD ||
req               988 drivers/input/misc/uinput.c 		    !req->u.upload.effect) {
req               993 drivers/input/misc/uinput.c 		req->retval = ff_up.retval;
req               994 drivers/input/misc/uinput.c 		complete(&req->done);
req              1003 drivers/input/misc/uinput.c 		req = uinput_request_find(udev, ff_erase.request_id);
req              1004 drivers/input/misc/uinput.c 		if (!req || req->code != UI_FF_ERASE) {
req              1009 drivers/input/misc/uinput.c 		req->retval = ff_erase.retval;
req              1010 drivers/input/misc/uinput.c 		complete(&req->done);
req               201 drivers/input/touchscreen/ad7877.c 	struct ser_req *req;
req               204 drivers/input/touchscreen/ad7877.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               205 drivers/input/touchscreen/ad7877.c 	if (!req)
req               208 drivers/input/touchscreen/ad7877.c 	spi_message_init(&req->msg);
req               210 drivers/input/touchscreen/ad7877.c 	req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) |
req               212 drivers/input/touchscreen/ad7877.c 	req->xfer[0].tx_buf = &req->command;
req               213 drivers/input/touchscreen/ad7877.c 	req->xfer[0].len = 2;
req               214 drivers/input/touchscreen/ad7877.c 	req->xfer[0].cs_change = 1;
req               216 drivers/input/touchscreen/ad7877.c 	req->xfer[1].rx_buf = &req->sample;
req               217 drivers/input/touchscreen/ad7877.c 	req->xfer[1].len = 2;
req               219 drivers/input/touchscreen/ad7877.c 	spi_message_add_tail(&req->xfer[0], &req->msg);
req               220 drivers/input/touchscreen/ad7877.c 	spi_message_add_tail(&req->xfer[1], &req->msg);
req               222 drivers/input/touchscreen/ad7877.c 	status = spi_sync(spi, &req->msg);
req               223 drivers/input/touchscreen/ad7877.c 	ret = status ? : req->sample;
req               225 drivers/input/touchscreen/ad7877.c 	kfree(req);
req               232 drivers/input/touchscreen/ad7877.c 	struct ser_req *req;
req               235 drivers/input/touchscreen/ad7877.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               236 drivers/input/touchscreen/ad7877.c 	if (!req)
req               239 drivers/input/touchscreen/ad7877.c 	spi_message_init(&req->msg);
req               241 drivers/input/touchscreen/ad7877.c 	req->command = (u16) (AD7877_WRITEADD(reg) | (val & MAX_12BIT));
req               242 drivers/input/touchscreen/ad7877.c 	req->xfer[0].tx_buf = &req->command;
req               243 drivers/input/touchscreen/ad7877.c 	req->xfer[0].len = 2;
req               245 drivers/input/touchscreen/ad7877.c 	spi_message_add_tail(&req->xfer[0], &req->msg);
req               247 drivers/input/touchscreen/ad7877.c 	status = spi_sync(spi, &req->msg);
req               249 drivers/input/touchscreen/ad7877.c 	kfree(req);
req               257 drivers/input/touchscreen/ad7877.c 	struct ser_req *req;
req               262 drivers/input/touchscreen/ad7877.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               263 drivers/input/touchscreen/ad7877.c 	if (!req)
req               266 drivers/input/touchscreen/ad7877.c 	spi_message_init(&req->msg);
req               269 drivers/input/touchscreen/ad7877.c 	req->ref_on = AD7877_WRITEADD(AD7877_REG_CTRL2) |
req               274 drivers/input/touchscreen/ad7877.c 	req->reset = AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_MODE_NOC;
req               276 drivers/input/touchscreen/ad7877.c 	req->command = (u16) command;
req               278 drivers/input/touchscreen/ad7877.c 	req->xfer[0].tx_buf = &req->reset;
req               279 drivers/input/touchscreen/ad7877.c 	req->xfer[0].len = 2;
req               280 drivers/input/touchscreen/ad7877.c 	req->xfer[0].cs_change = 1;
req               282 drivers/input/touchscreen/ad7877.c 	req->xfer[1].tx_buf = &req->ref_on;
req               283 drivers/input/touchscreen/ad7877.c 	req->xfer[1].len = 2;
req               284 drivers/input/touchscreen/ad7877.c 	req->xfer[1].delay_usecs = ts->vref_delay_usecs;
req               285 drivers/input/touchscreen/ad7877.c 	req->xfer[1].cs_change = 1;
req               287 drivers/input/touchscreen/ad7877.c 	req->xfer[2].tx_buf = &req->command;
req               288 drivers/input/touchscreen/ad7877.c 	req->xfer[2].len = 2;
req               289 drivers/input/touchscreen/ad7877.c 	req->xfer[2].delay_usecs = ts->vref_delay_usecs;
req               290 drivers/input/touchscreen/ad7877.c 	req->xfer[2].cs_change = 1;
req               292 drivers/input/touchscreen/ad7877.c 	req->xfer[3].rx_buf = &req->sample;
req               293 drivers/input/touchscreen/ad7877.c 	req->xfer[3].len = 2;
req               294 drivers/input/touchscreen/ad7877.c 	req->xfer[3].cs_change = 1;
req               296 drivers/input/touchscreen/ad7877.c 	req->xfer[4].tx_buf = &ts->cmd_crtl2;	/*REF OFF*/
req               297 drivers/input/touchscreen/ad7877.c 	req->xfer[4].len = 2;
req               298 drivers/input/touchscreen/ad7877.c 	req->xfer[4].cs_change = 1;
req               300 drivers/input/touchscreen/ad7877.c 	req->xfer[5].tx_buf = &ts->cmd_crtl1;	/*DEFAULT*/
req               301 drivers/input/touchscreen/ad7877.c 	req->xfer[5].len = 2;
req               307 drivers/input/touchscreen/ad7877.c 		spi_message_add_tail(&req->xfer[i], &req->msg);
req               309 drivers/input/touchscreen/ad7877.c 	status = spi_sync(spi, &req->msg);
req               310 drivers/input/touchscreen/ad7877.c 	sample = req->sample;
req               312 drivers/input/touchscreen/ad7877.c 	kfree(req);
req               316 drivers/input/touchscreen/ads7846.c 	struct ser_req *req;
req               319 drivers/input/touchscreen/ads7846.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               320 drivers/input/touchscreen/ads7846.c 	if (!req)
req               323 drivers/input/touchscreen/ads7846.c 	spi_message_init(&req->msg);
req               327 drivers/input/touchscreen/ads7846.c 		req->ref_on = REF_ON;
req               328 drivers/input/touchscreen/ads7846.c 		req->xfer[0].tx_buf = &req->ref_on;
req               329 drivers/input/touchscreen/ads7846.c 		req->xfer[0].len = 1;
req               330 drivers/input/touchscreen/ads7846.c 		spi_message_add_tail(&req->xfer[0], &req->msg);
req               332 drivers/input/touchscreen/ads7846.c 		req->xfer[1].rx_buf = &req->scratch;
req               333 drivers/input/touchscreen/ads7846.c 		req->xfer[1].len = 2;
req               336 drivers/input/touchscreen/ads7846.c 		req->xfer[1].delay_usecs = ts->vref_delay_usecs;
req               337 drivers/input/touchscreen/ads7846.c 		spi_message_add_tail(&req->xfer[1], &req->msg);
req               347 drivers/input/touchscreen/ads7846.c 	req->command = (u8) command;
req               348 drivers/input/touchscreen/ads7846.c 	req->xfer[2].tx_buf = &req->command;
req               349 drivers/input/touchscreen/ads7846.c 	req->xfer[2].len = 1;
req               350 drivers/input/touchscreen/ads7846.c 	spi_message_add_tail(&req->xfer[2], &req->msg);
req               352 drivers/input/touchscreen/ads7846.c 	req->xfer[3].rx_buf = &req->sample;
req               353 drivers/input/touchscreen/ads7846.c 	req->xfer[3].len = 2;
req               354 drivers/input/touchscreen/ads7846.c 	spi_message_add_tail(&req->xfer[3], &req->msg);
req               359 drivers/input/touchscreen/ads7846.c 	req->ref_off = PWRDOWN;
req               360 drivers/input/touchscreen/ads7846.c 	req->xfer[4].tx_buf = &req->ref_off;
req               361 drivers/input/touchscreen/ads7846.c 	req->xfer[4].len = 1;
req               362 drivers/input/touchscreen/ads7846.c 	spi_message_add_tail(&req->xfer[4], &req->msg);
req               364 drivers/input/touchscreen/ads7846.c 	req->xfer[5].rx_buf = &req->scratch;
req               365 drivers/input/touchscreen/ads7846.c 	req->xfer[5].len = 2;
req               366 drivers/input/touchscreen/ads7846.c 	CS_CHANGE(req->xfer[5]);
req               367 drivers/input/touchscreen/ads7846.c 	spi_message_add_tail(&req->xfer[5], &req->msg);
req               371 drivers/input/touchscreen/ads7846.c 	status = spi_sync(spi, &req->msg);
req               377 drivers/input/touchscreen/ads7846.c 		status = be16_to_cpu(req->sample);
req               382 drivers/input/touchscreen/ads7846.c 	kfree(req);
req               390 drivers/input/touchscreen/ads7846.c 	struct ads7845_ser_req *req;
req               393 drivers/input/touchscreen/ads7846.c 	req = kzalloc(sizeof *req, GFP_KERNEL);
req               394 drivers/input/touchscreen/ads7846.c 	if (!req)
req               397 drivers/input/touchscreen/ads7846.c 	spi_message_init(&req->msg);
req               399 drivers/input/touchscreen/ads7846.c 	req->command[0] = (u8) command;
req               400 drivers/input/touchscreen/ads7846.c 	req->xfer[0].tx_buf = req->command;
req               401 drivers/input/touchscreen/ads7846.c 	req->xfer[0].rx_buf = req->sample;
req               402 drivers/input/touchscreen/ads7846.c 	req->xfer[0].len = 3;
req               403 drivers/input/touchscreen/ads7846.c 	spi_message_add_tail(&req->xfer[0], &req->msg);
req               407 drivers/input/touchscreen/ads7846.c 	status = spi_sync(spi, &req->msg);
req               413 drivers/input/touchscreen/ads7846.c 		status = be16_to_cpu(*((u16 *)&req->sample[1]));
req               418 drivers/input/touchscreen/ads7846.c 	kfree(req);
req                36 drivers/interconnect/qcom/smd-rpm.c 	struct icc_rpm_smd_req req = {
req                42 drivers/interconnect/qcom/smd-rpm.c 	return qcom_rpm_smd_write(icc_smd_rpm, ctx, rsc_type, id, &req,
req                43 drivers/interconnect/qcom/smd-rpm.c 				  sizeof(req));
req               507 drivers/iommu/intel-svm.c static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
req               511 drivers/iommu/intel-svm.c 	if (req->exe_req)
req               514 drivers/iommu/intel-svm.c 	if (req->rd_req)
req               517 drivers/iommu/intel-svm.c 	if (req->wr_req)
req               546 drivers/iommu/intel-svm.c 		struct page_req_dsc *req;
req               554 drivers/iommu/intel-svm.c 		req = &iommu->prq[head / sizeof(*req)];
req               557 drivers/iommu/intel-svm.c 		address = (u64)req->addr << VTD_PAGE_SHIFT;
req               558 drivers/iommu/intel-svm.c 		if (!req->pasid_present) {
req               560 drivers/iommu/intel-svm.c 			       iommu->name, ((unsigned long long *)req)[0],
req               561 drivers/iommu/intel-svm.c 			       ((unsigned long long *)req)[1]);
req               565 drivers/iommu/intel-svm.c 		if (!svm || svm->pasid != req->pasid) {
req               567 drivers/iommu/intel-svm.c 			svm = intel_pasid_lookup_id(req->pasid);
req               575 drivers/iommu/intel-svm.c 				       iommu->name, req->pasid, ((unsigned long long *)req)[0],
req               576 drivers/iommu/intel-svm.c 				       ((unsigned long long *)req)[1]);
req               600 drivers/iommu/intel-svm.c 		if (access_error(vma, req))
req               604 drivers/iommu/intel-svm.c 				      req->wr_req ? FAULT_FLAG_WRITE : 0);
req               616 drivers/iommu/intel-svm.c 			if (sdev->sid == req->rid)
req               628 drivers/iommu/intel-svm.c 			int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
req               629 drivers/iommu/intel-svm.c 				(req->exe_req << 1) | (req->pm_req);
req               630 drivers/iommu/intel-svm.c 			sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
req               631 drivers/iommu/intel-svm.c 					    req->priv_data, rwxp, result);
req               638 drivers/iommu/intel-svm.c 		if (req->lpig || req->priv_data_present) {
req               646 drivers/iommu/intel-svm.c 			resp.qw0 = QI_PGRP_PASID(req->pasid) |
req               647 drivers/iommu/intel-svm.c 				QI_PGRP_DID(req->rid) |
req               648 drivers/iommu/intel-svm.c 				QI_PGRP_PASID_P(req->pasid_present) |
req               649 drivers/iommu/intel-svm.c 				QI_PGRP_PDP(req->pasid_present) |
req               652 drivers/iommu/intel-svm.c 			resp.qw1 = QI_PGRP_IDX(req->prg_index) |
req               653 drivers/iommu/intel-svm.c 				QI_PGRP_LPIG(req->lpig);
req               655 drivers/iommu/intel-svm.c 			if (req->priv_data_present)
req               656 drivers/iommu/intel-svm.c 				memcpy(&resp.qw2, req->priv_data,
req               657 drivers/iommu/intel-svm.c 				       sizeof(req->priv_data));
req               662 drivers/iommu/intel-svm.c 		head = (head + sizeof(*req)) & PRQ_RING_MASK;
req               137 drivers/iommu/virtio-iommu.c 					  struct virtio_iommu_req_head *req,
req               142 drivers/iommu/virtio-iommu.c 	if (req->type == VIRTIO_IOMMU_T_PROBE)
req               159 drivers/iommu/virtio-iommu.c 	struct viommu_request *req;
req               168 drivers/iommu/virtio-iommu.c 		req = virtqueue_get_buf(vq, &len);
req               169 drivers/iommu/virtio-iommu.c 		if (!req)
req               173 drivers/iommu/virtio-iommu.c 			viommu_set_req_status(req->buf, req->len,
req               176 drivers/iommu/virtio-iommu.c 		write_len = req->len - req->write_offset;
req               177 drivers/iommu/virtio-iommu.c 		if (req->writeback && len == write_len)
req               178 drivers/iommu/virtio-iommu.c 			memcpy(req->writeback, req->buf + req->write_offset,
req               181 drivers/iommu/virtio-iommu.c 		list_del(&req->list);
req               182 drivers/iommu/virtio-iommu.c 		kfree(req);
req               223 drivers/iommu/virtio-iommu.c 	struct viommu_request *req;
req               234 drivers/iommu/virtio-iommu.c 	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
req               235 drivers/iommu/virtio-iommu.c 	if (!req)
req               238 drivers/iommu/virtio-iommu.c 	req->len = len;
req               240 drivers/iommu/virtio-iommu.c 		req->writeback = buf + write_offset;
req               241 drivers/iommu/virtio-iommu.c 		req->write_offset = write_offset;
req               243 drivers/iommu/virtio-iommu.c 	memcpy(&req->buf, buf, write_offset);
req               245 drivers/iommu/virtio-iommu.c 	sg_init_one(&top_sg, req->buf, write_offset);
req               246 drivers/iommu/virtio-iommu.c 	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
req               248 drivers/iommu/virtio-iommu.c 	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
req               252 drivers/iommu/virtio-iommu.c 			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
req               257 drivers/iommu/virtio-iommu.c 	list_add_tail(&req->list, &viommu->requests);
req               261 drivers/iommu/virtio-iommu.c 	kfree(req);
req               652 drivers/iommu/virtio-iommu.c 	struct virtio_iommu_req_attach req;
req               688 drivers/iommu/virtio-iommu.c 	req = (struct virtio_iommu_req_attach) {
req               694 drivers/iommu/virtio-iommu.c 		req.endpoint = cpu_to_le32(fwspec->ids[i]);
req               696 drivers/iommu/virtio-iommu.c 		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
req                83 drivers/irqchip/irq-davinci-aintc.c 	void __iomem *req;
req                87 drivers/irqchip/irq-davinci-aintc.c 	req = request_mem_region(config->reg.start,
req                90 drivers/irqchip/irq-davinci-aintc.c 	if (!req) {
req               164 drivers/irqchip/irq-davinci-cp-intc.c 	void __iomem *req;
req               166 drivers/irqchip/irq-davinci-cp-intc.c 	req = request_mem_region(config->reg.start,
req               169 drivers/irqchip/irq-davinci-cp-intc.c 	if (!req) {
req                65 drivers/macintosh/adb-iop.c static void adb_iop_end_req(struct adb_request *req, int state)
req                67 drivers/macintosh/adb-iop.c 	req->complete = 1;
req                68 drivers/macintosh/adb-iop.c 	current_req = req->next;
req                69 drivers/macintosh/adb-iop.c 	if (req->done)
req                70 drivers/macintosh/adb-iop.c 		(*req->done)(req);
req                82 drivers/macintosh/adb-iop.c 	struct adb_request *req;
req                87 drivers/macintosh/adb-iop.c 	req = current_req;
req                88 drivers/macintosh/adb-iop.c 	if ((adb_iop_state == sending) && req && req->reply_expected) {
req               105 drivers/macintosh/adb-iop.c 	struct adb_request *req;
req               113 drivers/macintosh/adb-iop.c 	req = current_req;
req               116 drivers/macintosh/adb-iop.c 	printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req,
req               133 drivers/macintosh/adb-iop.c 		if (req && (adb_iop_state != idle)) {
req               134 drivers/macintosh/adb-iop.c 			adb_iop_end_req(req, idle);
req               142 drivers/macintosh/adb-iop.c 			req->reply_len = amsg->count + 1;
req               143 drivers/macintosh/adb-iop.c 			memcpy(req->reply, &amsg->cmd, req->reply_len);
req               164 drivers/macintosh/adb-iop.c 	struct adb_request *req;
req               171 drivers/macintosh/adb-iop.c 	req = current_req;
req               172 drivers/macintosh/adb-iop.c 	if (!req)
req               178 drivers/macintosh/adb-iop.c 	printk("adb_iop_start %p: sending packet, %d bytes:", req, req->nbytes);
req               179 drivers/macintosh/adb-iop.c 	for (i = 0; i < req->nbytes; i++)
req               180 drivers/macintosh/adb-iop.c 		printk(" %02X", (uint)req->data[i]);
req               188 drivers/macintosh/adb-iop.c 	amsg.count = req->nbytes - 2;
req               192 drivers/macintosh/adb-iop.c 	memcpy(&amsg.cmd, req->data + 1, req->nbytes - 1);
req               194 drivers/macintosh/adb-iop.c 	req->sent = 1;
req               201 drivers/macintosh/adb-iop.c 	iop_send_message(ADB_IOP, ADB_CHAN, req, sizeof(amsg), (__u8 *)&amsg,
req               219 drivers/macintosh/adb-iop.c int adb_iop_send_request(struct adb_request *req, int sync)
req               223 drivers/macintosh/adb-iop.c 	err = adb_iop_write(req);
req               228 drivers/macintosh/adb-iop.c 		while (!req->complete)
req               234 drivers/macintosh/adb-iop.c static int adb_iop_write(struct adb_request *req)
req               238 drivers/macintosh/adb-iop.c 	if ((req->nbytes < 2) || (req->data[0] != ADB_PACKET)) {
req               239 drivers/macintosh/adb-iop.c 		req->complete = 1;
req               245 drivers/macintosh/adb-iop.c 	req->next = NULL;
req               246 drivers/macintosh/adb-iop.c 	req->sent = 0;
req               247 drivers/macintosh/adb-iop.c 	req->complete = 0;
req               248 drivers/macintosh/adb-iop.c 	req->reply_len = 0;
req               251 drivers/macintosh/adb-iop.c 		last_req->next = req;
req               252 drivers/macintosh/adb-iop.c 		last_req = req;
req               254 drivers/macintosh/adb-iop.c 		current_req = req;
req               255 drivers/macintosh/adb-iop.c 		last_req = req;
req               280 drivers/macintosh/adb-iop.c 	struct adb_request req = {
req               286 drivers/macintosh/adb-iop.c 	adb_iop_write(&req);
req               287 drivers/macintosh/adb-iop.c 	while (!req.complete) {
req               113 drivers/macintosh/adb.c static void printADBreply(struct adb_request *req)
req               117 drivers/macintosh/adb.c         printk("adb reply (%d)", req->reply_len);
req               118 drivers/macintosh/adb.c         for(i = 0; i < req->reply_len; i++)
req               119 drivers/macintosh/adb.c                 printk(" %x", req->reply[i]);
req               129 drivers/macintosh/adb.c 	struct adb_request req;
req               134 drivers/macintosh/adb.c 		adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req               136 drivers/macintosh/adb.c 		if (req.reply_len > 1)
req               153 drivers/macintosh/adb.c 			adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req               160 drivers/macintosh/adb.c 			adb_request(&req, NULL, ADBREQ_SYNC, 3,
req               168 drivers/macintosh/adb.c 			adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req               170 drivers/macintosh/adb.c 			if (req.reply_len <= 1) continue;
req               175 drivers/macintosh/adb.c 			adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req               177 drivers/macintosh/adb.c 			if (req.reply_len > 1) {
req               198 drivers/macintosh/adb.c 				adb_request(&req, NULL, ADBREQ_SYNC, 3,
req               209 drivers/macintosh/adb.c 		adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req               211 drivers/macintosh/adb.c 		adb_handler[i].handler_id = req.reply[2];
req               412 drivers/macintosh/adb.c static void adb_sync_req_done(struct adb_request *req)
req               414 drivers/macintosh/adb.c 	struct completion *comp = req->arg;
req               420 drivers/macintosh/adb.c adb_request(struct adb_request *req, void (*done)(struct adb_request *),
req               433 drivers/macintosh/adb.c 	req->nbytes = nbytes+1;
req               434 drivers/macintosh/adb.c 	req->done = done;
req               435 drivers/macintosh/adb.c 	req->reply_expected = flags & ADBREQ_REPLY;
req               436 drivers/macintosh/adb.c 	req->data[0] = ADB_PACKET;
req               439 drivers/macintosh/adb.c 		req->data[i+1] = va_arg(list, int);
req               448 drivers/macintosh/adb.c 		req->done = adb_sync_req_done;
req               449 drivers/macintosh/adb.c 		req->arg = &comp;
req               453 drivers/macintosh/adb.c 	rc = adb_controller->send_request(req, 0);
req               455 drivers/macintosh/adb.c 	if ((flags & ADBREQ_SYNC) && !rc && !req->complete)
req               557 drivers/macintosh/adb.c 	struct adb_request req;
req               561 drivers/macintosh/adb.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req               563 drivers/macintosh/adb.c 	adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req               565 drivers/macintosh/adb.c 	if (req.reply_len < 2)
req               567 drivers/macintosh/adb.c 	if (req.reply[2] != new_id)
req               569 drivers/macintosh/adb.c 	adb_handler[address].handler_id = req.reply[2];
req               614 drivers/macintosh/adb.c static void adb_write_done(struct adb_request *req)
req               616 drivers/macintosh/adb.c 	struct adbdev_state *state = (struct adbdev_state *) req->arg;
req               619 drivers/macintosh/adb.c 	if (!req->complete) {
req               620 drivers/macintosh/adb.c 		req->reply_len = 0;
req               621 drivers/macintosh/adb.c 		req->complete = 1;
req               626 drivers/macintosh/adb.c 		kfree(req);
req               636 drivers/macintosh/adb.c 		req->next = NULL;
req               637 drivers/macintosh/adb.c 		*ap = req;
req               644 drivers/macintosh/adb.c do_adb_query(struct adb_request *req)
req               648 drivers/macintosh/adb.c 	switch(req->data[1]) {
req               650 drivers/macintosh/adb.c 		if (req->nbytes < 3)
req               653 drivers/macintosh/adb.c 		req->reply[0] = adb_handler[req->data[2]].original_address;
req               654 drivers/macintosh/adb.c 		req->reply[1] = adb_handler[req->data[2]].handler_id;
req               656 drivers/macintosh/adb.c 		req->complete = 1;
req               657 drivers/macintosh/adb.c 		req->reply_len = 2;
req               658 drivers/macintosh/adb.c 		adb_write_done(req);
req               719 drivers/macintosh/adb.c 	struct adb_request *req;
req               725 drivers/macintosh/adb.c 	if (count > sizeof(req->reply))
req               726 drivers/macintosh/adb.c 		count = sizeof(req->reply);
req               728 drivers/macintosh/adb.c 	req = NULL;
req               734 drivers/macintosh/adb.c 		req = state->completed;
req               735 drivers/macintosh/adb.c 		if (req != NULL)
req               736 drivers/macintosh/adb.c 			state->completed = req->next;
req               739 drivers/macintosh/adb.c 		if (req != NULL || ret != 0)
req               762 drivers/macintosh/adb.c 	ret = req->reply_len;
req               765 drivers/macintosh/adb.c 	if (ret > 0 && copy_to_user(buf, req->reply, ret))
req               768 drivers/macintosh/adb.c 	kfree(req);
req               777 drivers/macintosh/adb.c 	struct adb_request *req;
req               779 drivers/macintosh/adb.c 	if (count < 2 || count > sizeof(req->data))
req               784 drivers/macintosh/adb.c 	req = kmalloc(sizeof(struct adb_request),
req               786 drivers/macintosh/adb.c 	if (req == NULL)
req               789 drivers/macintosh/adb.c 	req->nbytes = count;
req               790 drivers/macintosh/adb.c 	req->done = adb_write_done;
req               791 drivers/macintosh/adb.c 	req->arg = (void *) state;
req               792 drivers/macintosh/adb.c 	req->complete = 0;
req               795 drivers/macintosh/adb.c 	if (copy_from_user(req->data, buf, count))
req               804 drivers/macintosh/adb.c 	if (req->data[0] == ADB_QUERY) {
req               806 drivers/macintosh/adb.c 			ret = do_adb_query(req);
req               813 drivers/macintosh/adb.c 	else if ((req->data[0] == ADB_PACKET) && (count > 1)
req               814 drivers/macintosh/adb.c 		&& (req->data[1] == ADB_BUSRESET)) {
req               822 drivers/macintosh/adb.c 		req->reply_expected = ((req->data[1] & 0xc) == 0xc);
req               824 drivers/macintosh/adb.c 			ret = adb_controller->send_request(req, 0);
req               837 drivers/macintosh/adb.c 	kfree(req);
req               636 drivers/macintosh/adbhid.c static void leds_done(struct adb_request *req)
req               955 drivers/macintosh/adbhid.c 	struct adb_request req;
req               969 drivers/macintosh/adbhid.c 		adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1044 drivers/macintosh/adbhid.c 			adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req              1047 drivers/macintosh/adbhid.c 			if ((req.reply_len) &&
req              1048 drivers/macintosh/adbhid.c 			    (req.reply[1] == 0x9a) && ((req.reply[2] == 0x21)
req              1049 drivers/macintosh/adbhid.c 			    	|| (req.reply[2] == 0x20))) {
req              1054 drivers/macintosh/adbhid.c 			else if ((req.reply_len >= 4) &&
req              1055 drivers/macintosh/adbhid.c 			    (req.reply[1] == 0x74) && (req.reply[2] == 0x70) &&
req              1056 drivers/macintosh/adbhid.c 			    (req.reply[3] == 0x61) && (req.reply[4] == 0x64)) {
req              1061 drivers/macintosh/adbhid.c 			else if ((req.reply_len >= 4) &&
req              1062 drivers/macintosh/adbhid.c 			    (req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) &&
req              1063 drivers/macintosh/adbhid.c 			    (req.reply[3] == 0x4c) && (req.reply[4] == 0x31)) {
req              1068 drivers/macintosh/adbhid.c 			else if ((req.reply_len == 9) &&
req              1069 drivers/macintosh/adbhid.c 			    (req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) &&
req              1070 drivers/macintosh/adbhid.c 			    (req.reply[3] == 0x49) && (req.reply[4] == 0x54)) {
req              1090 drivers/macintosh/adbhid.c 	struct adb_request req;
req              1093 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
req              1095 drivers/macintosh/adbhid.c 	if (req.reply_len < 8)
req              1099 drivers/macintosh/adbhid.c 	    memcpy(r1_buffer, &req.reply[1], 8);
req              1101 drivers/macintosh/adbhid.c 	    adb_request(&req, NULL, ADBREQ_SYNC, 9,
req              1112 drivers/macintosh/adbhid.c             adb_request(&req, NULL, ADBREQ_SYNC, 9,
req              1123 drivers/macintosh/adbhid.c 	    adb_request(&req, NULL, ADBREQ_SYNC, 9,
req              1135 drivers/macintosh/adbhid.c 	    adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
req              1142 drivers/macintosh/adbhid.c 	struct adb_request req;
req              1144 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1147 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1150 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1153 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1156 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1159 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1162 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1165 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1172 drivers/macintosh/adbhid.c 	struct adb_request req;
req              1174 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
req              1176 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
req              1178 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 9,
req              1189 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
req              1191 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 9,
req              1206 drivers/macintosh/adbhid.c 	struct adb_request req;
req              1208 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
req              1230 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 5,
req              1238 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
req              1244 drivers/macintosh/adbhid.c 	struct adb_request req;
req              1246 drivers/macintosh/adbhid.c 	adb_request(&req, NULL, ADBREQ_SYNC, 3,
req              1251 drivers/macintosh/adbhid.c  	adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
req                42 drivers/macintosh/ams/ams-pmu.c static void ams_pmu_req_complete(struct adb_request *req)
req                44 drivers/macintosh/ams/ams-pmu.c 	complete((struct completion *)req->arg);
req                50 drivers/macintosh/ams/ams-pmu.c 	static struct adb_request req;
req                53 drivers/macintosh/ams/ams-pmu.c 	req.arg = &req_complete;
req                54 drivers/macintosh/ams/ams-pmu.c 	if (pmu_request(&req, ams_pmu_req_complete, 4, ams_pmu_cmd, 0x00, reg, value))
req                63 drivers/macintosh/ams/ams-pmu.c 	static struct adb_request req;
req                66 drivers/macintosh/ams/ams-pmu.c 	req.arg = &req_complete;
req                67 drivers/macintosh/ams/ams-pmu.c 	if (pmu_request(&req, ams_pmu_req_complete, 3, ams_pmu_cmd, 0x01, reg))
req                72 drivers/macintosh/ams/ams-pmu.c 	if (req.reply_len > 0)
req                73 drivers/macintosh/ams/ams-pmu.c 		return req.reply[0];
req                67 drivers/macintosh/macio-adb.c static int macio_send_request(struct adb_request *req, int sync);
req               166 drivers/macintosh/macio-adb.c static int macio_send_request(struct adb_request *req, int sync)
req               171 drivers/macintosh/macio-adb.c 	if (req->data[0] != ADB_PACKET)
req               174 drivers/macintosh/macio-adb.c 	for (i = 0; i < req->nbytes - 1; ++i)
req               175 drivers/macintosh/macio-adb.c 		req->data[i] = req->data[i+1];
req               176 drivers/macintosh/macio-adb.c 	--req->nbytes;
req               178 drivers/macintosh/macio-adb.c 	req->next = NULL;
req               179 drivers/macintosh/macio-adb.c 	req->sent = 0;
req               180 drivers/macintosh/macio-adb.c 	req->complete = 0;
req               181 drivers/macintosh/macio-adb.c 	req->reply_len = 0;
req               185 drivers/macintosh/macio-adb.c 		last_req->next = req;
req               186 drivers/macintosh/macio-adb.c 		last_req = req;
req               188 drivers/macintosh/macio-adb.c 		current_req = last_req = req;
req               194 drivers/macintosh/macio-adb.c 		while (!req->complete)
req               204 drivers/macintosh/macio-adb.c 	struct adb_request *req = NULL;
req               214 drivers/macintosh/macio-adb.c 		if ((req = current_req) != 0) {
req               216 drivers/macintosh/macio-adb.c 			for (i = 0; i < req->nbytes; ++i)
req               217 drivers/macintosh/macio-adb.c 				out_8(&adb->data[i].r, req->data[i]);
req               218 drivers/macintosh/macio-adb.c 			out_8(&adb->dcount.r, req->nbytes & HMB);
req               219 drivers/macintosh/macio-adb.c 			req->sent = 1;
req               220 drivers/macintosh/macio-adb.c 			if (req->reply_expected) {
req               224 drivers/macintosh/macio-adb.c 				current_req = req->next;
req               238 drivers/macintosh/macio-adb.c 			req = current_req;
req               240 drivers/macintosh/macio-adb.c 				req->reply_len = in_8(&adb->dcount.r) & HMB;
req               241 drivers/macintosh/macio-adb.c 				for (i = 0; i < req->reply_len; ++i)
req               242 drivers/macintosh/macio-adb.c 					req->reply[i] = in_8(&adb->data[i].r);
req               244 drivers/macintosh/macio-adb.c 			current_req = req->next;
req               260 drivers/macintosh/macio-adb.c 	if (complete && req) {
req               261 drivers/macintosh/macio-adb.c 	    void (*done)(struct adb_request *) = req->done;
req               263 drivers/macintosh/macio-adb.c 	    req->complete = 1;
req               268 drivers/macintosh/macio-adb.c 		(*done)(req);
req               177 drivers/macintosh/via-cuda.c static int cuda_send_request(struct adb_request *req, int sync);
req               187 drivers/macintosh/via-cuda.c static int cuda_write(struct adb_request *req);
req               189 drivers/macintosh/via-cuda.c int cuda_request(struct adb_request *req,
req               206 drivers/macintosh/via-cuda.c     struct adb_request req;
req               225 drivers/macintosh/via-cuda.c     cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
req               226 drivers/macintosh/via-cuda.c     while (!req.complete)
req               234 drivers/macintosh/via-cuda.c     struct adb_request req;
req               278 drivers/macintosh/via-cuda.c     cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
req               279 drivers/macintosh/via-cuda.c     while (!req.complete)
req               427 drivers/macintosh/via-cuda.c cuda_send_request(struct adb_request *req, int sync)
req               432 drivers/macintosh/via-cuda.c 	req->complete = 1;
req               436 drivers/macintosh/via-cuda.c     req->reply_expected = 1;
req               438 drivers/macintosh/via-cuda.c     i = cuda_write(req);
req               443 drivers/macintosh/via-cuda.c 	while (!req->complete)
req               454 drivers/macintosh/via-cuda.c     struct adb_request req;
req               459 drivers/macintosh/via-cuda.c     cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, (devs? 1: 0));
req               460 drivers/macintosh/via-cuda.c     while (!req.complete)
req               469 drivers/macintosh/via-cuda.c     struct adb_request req;
req               474 drivers/macintosh/via-cuda.c     cuda_request(&req, NULL, 2, ADB_PACKET, 0);		/* maybe? */
req               475 drivers/macintosh/via-cuda.c     while (!req.complete)
req               483 drivers/macintosh/via-cuda.c cuda_request(struct adb_request *req, void (*done)(struct adb_request *),
req               490 drivers/macintosh/via-cuda.c 	req->complete = 1;
req               494 drivers/macintosh/via-cuda.c     req->nbytes = nbytes;
req               495 drivers/macintosh/via-cuda.c     req->done = done;
req               498 drivers/macintosh/via-cuda.c 	req->data[i] = va_arg(list, int);
req               500 drivers/macintosh/via-cuda.c     req->reply_expected = 1;
req               501 drivers/macintosh/via-cuda.c     return cuda_write(req);
req               506 drivers/macintosh/via-cuda.c cuda_write(struct adb_request *req)
req               510 drivers/macintosh/via-cuda.c     if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) {
req               511 drivers/macintosh/via-cuda.c 	req->complete = 1;
req               514 drivers/macintosh/via-cuda.c     req->next = NULL;
req               515 drivers/macintosh/via-cuda.c     req->sent = 0;
req               516 drivers/macintosh/via-cuda.c     req->complete = 0;
req               517 drivers/macintosh/via-cuda.c     req->reply_len = 0;
req               521 drivers/macintosh/via-cuda.c 	last_req->next = req;
req               522 drivers/macintosh/via-cuda.c 	last_req = req;
req               524 drivers/macintosh/via-cuda.c 	current_req = req;
req               525 drivers/macintosh/via-cuda.c 	last_req = req;
req               568 drivers/macintosh/via-cuda.c     struct adb_request *req = NULL;
req               636 drivers/macintosh/via-cuda.c 	req = current_req;
req               637 drivers/macintosh/via-cuda.c 	if (data_index >= req->nbytes) {
req               641 drivers/macintosh/via-cuda.c 	    req->sent = 1;
req               642 drivers/macintosh/via-cuda.c 	    if (req->reply_expected) {
req               645 drivers/macintosh/via-cuda.c 		current_req = req->next;
req               652 drivers/macintosh/via-cuda.c 	    out_8(&via[SR], req->data[data_index++]);
req               686 drivers/macintosh/via-cuda.c 	    req = current_req;
req               687 drivers/macintosh/via-cuda.c 	    req->reply_len = reply_ptr - req->reply;
req               688 drivers/macintosh/via-cuda.c 	    if (req->data[0] == ADB_PACKET) {
req               690 drivers/macintosh/via-cuda.c 		if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) {
req               692 drivers/macintosh/via-cuda.c 		    req->reply_len = 0;
req               695 drivers/macintosh/via-cuda.c 		    req->reply_len -= 2;
req               696 drivers/macintosh/via-cuda.c 		    memmove(req->reply, req->reply + 2, req->reply_len);
req               699 drivers/macintosh/via-cuda.c 	    current_req = req->next;
req               726 drivers/macintosh/via-cuda.c     if (complete && req) {
req               727 drivers/macintosh/via-cuda.c     	void (*done)(struct adb_request *) = req->done;
req               729 drivers/macintosh/via-cuda.c     	req->complete = 1;
req               734 drivers/macintosh/via-cuda.c 		(*done)(req);
req               777 drivers/macintosh/via-cuda.c 	struct adb_request req;
req               780 drivers/macintosh/via-cuda.c 	if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
req               782 drivers/macintosh/via-cuda.c 	while (!req.complete)
req               784 drivers/macintosh/via-cuda.c 	if (req.reply_len != 7)
req               785 drivers/macintosh/via-cuda.c 		pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
req               786 drivers/macintosh/via-cuda.c 	now = (req.reply[3] << 24) + (req.reply[4] << 16) +
req               787 drivers/macintosh/via-cuda.c 	      (req.reply[5] << 8) + req.reply[6];
req               794 drivers/macintosh/via-cuda.c 	struct adb_request req;
req               797 drivers/macintosh/via-cuda.c 	if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
req               800 drivers/macintosh/via-cuda.c 	while (!req.complete)
req               802 drivers/macintosh/via-cuda.c 	if ((req.reply_len != 3) && (req.reply_len != 7))
req               803 drivers/macintosh/via-cuda.c 		pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
req                87 drivers/macintosh/via-macii.c static int macii_send_request(struct adb_request *req, int sync);
req                88 drivers/macintosh/via-macii.c static int macii_write(struct adb_request *req);
req               192 drivers/macintosh/via-macii.c 	static struct adb_request req;
req               203 drivers/macintosh/via-macii.c 	adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_READREG(next_device, 0));
req               205 drivers/macintosh/via-macii.c 	req.sent = 0;
req               206 drivers/macintosh/via-macii.c 	req.complete = 0;
req               207 drivers/macintosh/via-macii.c 	req.reply_len = 0;
req               208 drivers/macintosh/via-macii.c 	req.next = current_req;
req               211 drivers/macintosh/via-macii.c 		current_req = &req;
req               213 drivers/macintosh/via-macii.c 		current_req = &req;
req               214 drivers/macintosh/via-macii.c 		last_req = &req;
req               219 drivers/macintosh/via-macii.c static int macii_send_request(struct adb_request *req, int sync)
req               223 drivers/macintosh/via-macii.c 	err = macii_write(req);
req               228 drivers/macintosh/via-macii.c 		while (!req->complete)
req               235 drivers/macintosh/via-macii.c static int macii_write(struct adb_request *req)
req               239 drivers/macintosh/via-macii.c 	if (req->nbytes < 2 || req->data[0] != ADB_PACKET || req->nbytes > 15) {
req               240 drivers/macintosh/via-macii.c 		req->complete = 1;
req               244 drivers/macintosh/via-macii.c 	req->next = NULL;
req               245 drivers/macintosh/via-macii.c 	req->sent = 0;
req               246 drivers/macintosh/via-macii.c 	req->complete = 0;
req               247 drivers/macintosh/via-macii.c 	req->reply_len = 0;
req               252 drivers/macintosh/via-macii.c 		last_req->next = req;
req               253 drivers/macintosh/via-macii.c 		last_req = req;
req               255 drivers/macintosh/via-macii.c 		current_req = req;
req               256 drivers/macintosh/via-macii.c 		last_req = req;
req               269 drivers/macintosh/via-macii.c 	static struct adb_request req;
req               285 drivers/macintosh/via-macii.c 		adb_request(&req, NULL, ADBREQ_NOSEND, 1,
req               287 drivers/macintosh/via-macii.c 		err = macii_write(&req);
req               314 drivers/macintosh/via-macii.c 	static struct adb_request req;
req               317 drivers/macintosh/via-macii.c 	adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_BUSRESET);
req               318 drivers/macintosh/via-macii.c 	macii_send_request(&req, 1);
req               329 drivers/macintosh/via-macii.c 	struct adb_request *req;
req               331 drivers/macintosh/via-macii.c 	req = current_req;
req               339 drivers/macintosh/via-macii.c 	command_byte = req->data[1];
req               343 drivers/macintosh/via-macii.c 	via[SR] = req->data[1];
req               370 drivers/macintosh/via-macii.c 	struct adb_request *req;
req               417 drivers/macintosh/via-macii.c 		req = current_req;
req               418 drivers/macintosh/via-macii.c 		if (data_index >= req->nbytes) {
req               419 drivers/macintosh/via-macii.c 			req->sent = 1;
req               422 drivers/macintosh/via-macii.c 			if (req->reply_expected) {
req               425 drivers/macintosh/via-macii.c 				req->complete = 1;
req               426 drivers/macintosh/via-macii.c 				current_req = req->next;
req               427 drivers/macintosh/via-macii.c 				if (req->done)
req               428 drivers/macintosh/via-macii.c 					(*req->done)(req);
req               444 drivers/macintosh/via-macii.c 			via[SR] = req->data[data_index++];
req               502 drivers/macintosh/via-macii.c 			req = current_req;
req               503 drivers/macintosh/via-macii.c 			req->reply_len = reply_len;
req               504 drivers/macintosh/via-macii.c 			req->complete = 1;
req               505 drivers/macintosh/via-macii.c 			current_req = req->next;
req               506 drivers/macintosh/via-macii.c 			if (req->done)
req               507 drivers/macintosh/via-macii.c 				(*req->done)(req);
req                74 drivers/macintosh/via-pmu-backlight.c 	struct adb_request req;
req                85 drivers/macintosh/via-pmu-backlight.c 		pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel);
req                86 drivers/macintosh/via-pmu-backlight.c 		pmu_wait_complete(&req);
req                88 drivers/macintosh/via-pmu-backlight.c 		pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
req                90 drivers/macintosh/via-pmu-backlight.c 		pmu_wait_complete(&req);
req                92 drivers/macintosh/via-pmu-backlight.c 		pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
req                94 drivers/macintosh/via-pmu-backlight.c 		pmu_wait_complete(&req);
req               128 drivers/macintosh/via-pmu-backlight.c 			struct adb_request req;
req               130 drivers/macintosh/via-pmu-backlight.c 			pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
req               132 drivers/macintosh/via-pmu-backlight.c 			pmu_wait_complete(&req);
req               176 drivers/macintosh/via-pmu-backlight.c 		struct adb_request req;
req               177 drivers/macintosh/via-pmu-backlight.c 		pmu_request(&req, NULL, 2, 0xd9, 0);
req               178 drivers/macintosh/via-pmu-backlight.c 		pmu_wait_complete(&req);
req               181 drivers/macintosh/via-pmu-backlight.c 				(req.reply[0] >> 4) *
req                35 drivers/macintosh/via-pmu-led.c static void pmu_req_done(struct adb_request * req)
req               202 drivers/macintosh/via-pmu.c static int pmu_send_request(struct adb_request *req, int sync);
req               234 drivers/macintosh/via-pmu.c int pmu_polled_request(struct adb_request *req);
req               587 drivers/macintosh/via-pmu.c 	struct adb_request req;
req               593 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
req               595 drivers/macintosh/via-pmu.c 	while (!req.complete) {
req               620 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
req               621 drivers/macintosh/via-pmu.c 		while (!req.complete)
req               626 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 1, PMU_GET_VERSION);
req               627 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req               628 drivers/macintosh/via-pmu.c 	if (req.reply_len > 0)
req               629 drivers/macintosh/via-pmu.c 		pmu_version = req.reply[0];
req               633 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 2, PMU_POWER_EVENTS,
req               635 drivers/macintosh/via-pmu.c 		pmu_wait_complete(&req);
req               636 drivers/macintosh/via-pmu.c 		if (req.reply_len == 2) {
req               637 drivers/macintosh/via-pmu.c 			if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT)
req               658 drivers/macintosh/via-pmu.c 	struct adb_request req;
req               664 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS);
req               665 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req               666 drivers/macintosh/via-pmu.c 	if (req.reply_len < 2)
req               669 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 4, PMU_POWER_EVENTS,
req               671 drivers/macintosh/via-pmu.c 			    req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 
req               673 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 4, PMU_POWER_EVENTS,
req               675 drivers/macintosh/via-pmu.c 			    req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 
req               676 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req               683 drivers/macintosh/via-pmu.c done_battery_state_ohare(struct adb_request* req)
req               711 drivers/macintosh/via-pmu.c 	if (req->reply[0] & 0x01)
req               728 drivers/macintosh/via-pmu.c 	if (req->reply[0] & 0x04) {
req               730 drivers/macintosh/via-pmu.c 		if (req->reply[0] & 0x02)
req               732 drivers/macintosh/via-pmu.c 		vb = (req->reply[1] << 8) | req->reply[2];
req               734 drivers/macintosh/via-pmu.c 		amperage = req->reply[5];
req               735 drivers/macintosh/via-pmu.c 		if ((req->reply[0] & 0x01) == 0) {
req               738 drivers/macintosh/via-pmu.c 		} else if (req->reply[0] & 0x02) {
req               743 drivers/macintosh/via-pmu.c 		if (req->reply[0] & 0x40) {
req               744 drivers/macintosh/via-pmu.c 			pcharge = (req->reply[6] << 8) + req->reply[7];
req               773 drivers/macintosh/via-pmu.c done_battery_state_smart(struct adb_request* req)
req               798 drivers/macintosh/via-pmu.c 	if (req->reply[1] & 0x01)
req               806 drivers/macintosh/via-pmu.c 	if (req->reply[1] & 0x04) {
req               808 drivers/macintosh/via-pmu.c 		switch(req->reply[0]) {
req               810 drivers/macintosh/via-pmu.c 			case 4: capa = req->reply[2];
req               811 drivers/macintosh/via-pmu.c 				max = req->reply[3];
req               812 drivers/macintosh/via-pmu.c 				amperage = *((signed char *)&req->reply[4]);
req               813 drivers/macintosh/via-pmu.c 				voltage = req->reply[5];
req               815 drivers/macintosh/via-pmu.c 			case 5: capa = (req->reply[2] << 8) | req->reply[3];
req               816 drivers/macintosh/via-pmu.c 				max = (req->reply[4] << 8) | req->reply[5];
req               817 drivers/macintosh/via-pmu.c 				amperage = *((signed short *)&req->reply[6]);
req               818 drivers/macintosh/via-pmu.c 				voltage = (req->reply[8] << 8) | req->reply[9];
req               822 drivers/macintosh/via-pmu.c 					"len: %d, %4ph\n", req->reply_len,
req               823 drivers/macintosh/via-pmu.c 							   req->reply);
req               828 drivers/macintosh/via-pmu.c 	if ((req->reply[1] & 0x01) && (amperage > 0))
req               837 drivers/macintosh/via-pmu.c 		if ((req->reply[1] & 0x01) && (amperage > 0))
req               988 drivers/macintosh/via-pmu.c static int pmu_send_request(struct adb_request *req, int sync)
req               993 drivers/macintosh/via-pmu.c 		req->complete = 1;
req               999 drivers/macintosh/via-pmu.c 	switch (req->data[0]) {
req              1001 drivers/macintosh/via-pmu.c 		for (i = 0; i < req->nbytes - 1; ++i)
req              1002 drivers/macintosh/via-pmu.c 			req->data[i] = req->data[i+1];
req              1003 drivers/macintosh/via-pmu.c 		--req->nbytes;
req              1004 drivers/macintosh/via-pmu.c 		if (pmu_data_len[req->data[0]][1] != 0) {
req              1005 drivers/macintosh/via-pmu.c 			req->reply[0] = ADB_RET_OK;
req              1006 drivers/macintosh/via-pmu.c 			req->reply_len = 1;
req              1008 drivers/macintosh/via-pmu.c 			req->reply_len = 0;
req              1009 drivers/macintosh/via-pmu.c 		ret = pmu_queue_request(req);
req              1012 drivers/macintosh/via-pmu.c 		switch (req->data[1]) {
req              1014 drivers/macintosh/via-pmu.c 			if (req->nbytes != 2)
req              1016 drivers/macintosh/via-pmu.c 			req->data[0] = PMU_READ_RTC;
req              1017 drivers/macintosh/via-pmu.c 			req->nbytes = 1;
req              1018 drivers/macintosh/via-pmu.c 			req->reply_len = 3;
req              1019 drivers/macintosh/via-pmu.c 			req->reply[0] = CUDA_PACKET;
req              1020 drivers/macintosh/via-pmu.c 			req->reply[1] = 0;
req              1021 drivers/macintosh/via-pmu.c 			req->reply[2] = CUDA_GET_TIME;
req              1022 drivers/macintosh/via-pmu.c 			ret = pmu_queue_request(req);
req              1025 drivers/macintosh/via-pmu.c 			if (req->nbytes != 6)
req              1027 drivers/macintosh/via-pmu.c 			req->data[0] = PMU_SET_RTC;
req              1028 drivers/macintosh/via-pmu.c 			req->nbytes = 5;
req              1030 drivers/macintosh/via-pmu.c 				req->data[i] = req->data[i+1];
req              1031 drivers/macintosh/via-pmu.c 			req->reply_len = 3;
req              1032 drivers/macintosh/via-pmu.c 			req->reply[0] = CUDA_PACKET;
req              1033 drivers/macintosh/via-pmu.c 			req->reply[1] = 0;
req              1034 drivers/macintosh/via-pmu.c 			req->reply[2] = CUDA_SET_TIME;
req              1035 drivers/macintosh/via-pmu.c 			ret = pmu_queue_request(req);
req              1042 drivers/macintosh/via-pmu.c 		for (i = req->nbytes - 1; i > 1; --i)
req              1043 drivers/macintosh/via-pmu.c 			req->data[i+2] = req->data[i];
req              1044 drivers/macintosh/via-pmu.c 		req->data[3] = req->nbytes - 2;
req              1045 drivers/macintosh/via-pmu.c 		req->data[2] = pmu_adb_flags;
req              1047 drivers/macintosh/via-pmu.c 		req->data[0] = PMU_ADB_CMD;
req              1048 drivers/macintosh/via-pmu.c 		req->nbytes += 2;
req              1049 drivers/macintosh/via-pmu.c 		req->reply_expected = 1;
req              1050 drivers/macintosh/via-pmu.c 		req->reply_len = 0;
req              1051 drivers/macintosh/via-pmu.c 		ret = pmu_queue_request(req);
req              1055 drivers/macintosh/via-pmu.c 		req->complete = 1;
req              1060 drivers/macintosh/via-pmu.c 		while (!req->complete)
req              1069 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1072 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86,
req              1076 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF);
req              1079 drivers/macintosh/via-pmu.c 	while (!req.complete)
req              1096 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1105 drivers/macintosh/via-pmu.c 	req.nbytes = 4;
req              1106 drivers/macintosh/via-pmu.c 	req.done = NULL;
req              1107 drivers/macintosh/via-pmu.c 	req.data[0] = PMU_ADB_CMD;
req              1108 drivers/macintosh/via-pmu.c 	req.data[1] = ADB_BUSRESET;
req              1109 drivers/macintosh/via-pmu.c 	req.data[2] = 0;
req              1110 drivers/macintosh/via-pmu.c 	req.data[3] = 0;
req              1111 drivers/macintosh/via-pmu.c 	req.data[4] = 0;
req              1112 drivers/macintosh/via-pmu.c 	req.reply_len = 0;
req              1113 drivers/macintosh/via-pmu.c 	req.reply_expected = 1;
req              1114 drivers/macintosh/via-pmu.c 	if (pmu_queue_request(&req) != 0) {
req              1118 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1129 drivers/macintosh/via-pmu.c pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
req              1140 drivers/macintosh/via-pmu.c 		req->complete = 1;
req              1143 drivers/macintosh/via-pmu.c 	req->nbytes = nbytes;
req              1144 drivers/macintosh/via-pmu.c 	req->done = done;
req              1147 drivers/macintosh/via-pmu.c 		req->data[i] = va_arg(list, int);
req              1149 drivers/macintosh/via-pmu.c 	req->reply_len = 0;
req              1150 drivers/macintosh/via-pmu.c 	req->reply_expected = 0;
req              1151 drivers/macintosh/via-pmu.c 	return pmu_queue_request(req);
req              1155 drivers/macintosh/via-pmu.c pmu_queue_request(struct adb_request *req)
req              1161 drivers/macintosh/via-pmu.c 		req->complete = 1;
req              1164 drivers/macintosh/via-pmu.c 	if (req->nbytes <= 0) {
req              1165 drivers/macintosh/via-pmu.c 		req->complete = 1;
req              1168 drivers/macintosh/via-pmu.c 	nsend = pmu_data_len[req->data[0]][0];
req              1169 drivers/macintosh/via-pmu.c 	if (nsend >= 0 && req->nbytes != nsend + 1) {
req              1170 drivers/macintosh/via-pmu.c 		req->complete = 1;
req              1174 drivers/macintosh/via-pmu.c 	req->next = NULL;
req              1175 drivers/macintosh/via-pmu.c 	req->sent = 0;
req              1176 drivers/macintosh/via-pmu.c 	req->complete = 0;
req              1180 drivers/macintosh/via-pmu.c 		last_req->next = req;
req              1181 drivers/macintosh/via-pmu.c 		last_req = req;
req              1183 drivers/macintosh/via-pmu.c 		current_req = req;
req              1184 drivers/macintosh/via-pmu.c 		last_req = req;
req              1230 drivers/macintosh/via-pmu.c pmu_done(struct adb_request *req)
req              1232 drivers/macintosh/via-pmu.c 	void (*done)(struct adb_request *) = req->done;
req              1234 drivers/macintosh/via-pmu.c 	req->complete = 1;
req              1239 drivers/macintosh/via-pmu.c 		(*done)(req);
req              1245 drivers/macintosh/via-pmu.c 	struct adb_request *req;
req              1249 drivers/macintosh/via-pmu.c 	req = current_req;
req              1250 drivers/macintosh/via-pmu.c 	if (!req || pmu_state != idle
req              1256 drivers/macintosh/via-pmu.c 	data_len = pmu_data_len[req->data[0]][0];
req              1263 drivers/macintosh/via-pmu.c 	send_byte(req->data[0]);
req              1292 drivers/macintosh/via-pmu.c pmu_wait_complete(struct adb_request *req)
req              1296 drivers/macintosh/via-pmu.c 	while((pmu_state != idle && pmu_state != locked) || !req->complete)
req              1404 drivers/macintosh/via-pmu.c 			struct adb_request *req = req_awaiting_reply;
req              1405 drivers/macintosh/via-pmu.c 			if (!req) {
req              1411 drivers/macintosh/via-pmu.c 				req->reply_len = 0;
req              1413 drivers/macintosh/via-pmu.c 				memcpy(req->reply, data + 1, len - 1);
req              1414 drivers/macintosh/via-pmu.c 				req->reply_len = len - 1;
req              1416 drivers/macintosh/via-pmu.c 			pmu_done(req);
req              1482 drivers/macintosh/via-pmu.c 	struct adb_request *req;
req              1503 drivers/macintosh/via-pmu.c 		req = current_req;
req              1505 drivers/macintosh/via-pmu.c 			data_len = req->nbytes - 1;
req              1510 drivers/macintosh/via-pmu.c 			send_byte(req->data[data_index++]);
req              1513 drivers/macintosh/via-pmu.c 		req->sent = 1;
req              1514 drivers/macintosh/via-pmu.c 		data_len = pmu_data_len[req->data[0]][1];
req              1517 drivers/macintosh/via-pmu.c 			current_req = req->next;
req              1518 drivers/macintosh/via-pmu.c 			if (req->reply_expected)
req              1519 drivers/macintosh/via-pmu.c 				req_awaiting_reply = req;
req              1521 drivers/macintosh/via-pmu.c 				return req;
req              1525 drivers/macintosh/via-pmu.c 			reply_ptr = req->reply + req->reply_len;
req              1561 drivers/macintosh/via-pmu.c 			req = current_req;
req              1567 drivers/macintosh/via-pmu.c 			current_req = req->next;
req              1568 drivers/macintosh/via-pmu.c 			req->reply_len += data_index;
req              1569 drivers/macintosh/via-pmu.c 			if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED)
req              1573 drivers/macintosh/via-pmu.c 			return req;
req              1591 drivers/macintosh/via-pmu.c 	struct adb_request *req = NULL;
req              1632 drivers/macintosh/via-pmu.c 			req = pmu_sr_intr();
req              1633 drivers/macintosh/via-pmu.c 			if (req)
req              1674 drivers/macintosh/via-pmu.c 	if (req) {
req              1675 drivers/macintosh/via-pmu.c 		pmu_done(req);
req              1676 drivers/macintosh/via-pmu.c 		req = NULL;
req              1728 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1735 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED |
req              1737 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1745 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1748 drivers/macintosh/via-pmu.c 	if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
req              1750 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1751 drivers/macintosh/via-pmu.c 	if (req.reply_len != 4)
req              1752 drivers/macintosh/via-pmu.c 		pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
req              1753 drivers/macintosh/via-pmu.c 	now = (req.reply[0] << 24) + (req.reply[1] << 16) +
req              1754 drivers/macintosh/via-pmu.c 	      (req.reply[2] << 8) + req.reply[3];
req              1761 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1764 drivers/macintosh/via-pmu.c 	if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
req              1767 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1768 drivers/macintosh/via-pmu.c 	if (req.reply_len != 0)
req              1769 drivers/macintosh/via-pmu.c 		pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
req              1776 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1786 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB |
req              1788 drivers/macintosh/via-pmu.c 		while(!req.complete)
req              1792 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 1, PMU_RESET);
req              1793 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1801 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1811 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB |
req              1813 drivers/macintosh/via-pmu.c 		pmu_wait_complete(&req);
req              1821 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 5, PMU_SHUTDOWN,
req              1823 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1878 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1886 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE);
req              1887 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1888 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
req              1890 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1897 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
req              1898 drivers/macintosh/via-pmu.c 		pmu_wait_complete(&req);
req              1938 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
req              1939 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1940 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_POWER_CTRL0,
req              1942 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1943 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
req              1945 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1955 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              1966 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
req              1967 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1970 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS,
req              1972 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1973 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS,
req              1976 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              1984 drivers/macintosh/via-pmu.c 		pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
req              1985 drivers/macintosh/via-pmu.c 		pmu_wait_complete(&req);
req              2027 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
req              2028 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              2029 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
req              2030 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req              2541 drivers/macintosh/via-pmu.c pmu_polled_request(struct adb_request *req)
req              2546 drivers/macintosh/via-pmu.c 	req->complete = 1;
req              2547 drivers/macintosh/via-pmu.c 	c = req->data[0];
req              2549 drivers/macintosh/via-pmu.c 	if (l >= 0 && req->nbytes != l + 1)
req              2560 drivers/macintosh/via-pmu.c 		l = req->nbytes - 1;
req              2564 drivers/macintosh/via-pmu.c 		polled_send_byte(req->data[i]);
req              2570 drivers/macintosh/via-pmu.c 		req->reply[i + req->reply_len] = polled_recv_byte();
req              2572 drivers/macintosh/via-pmu.c 	if (req->done)
req              2573 drivers/macintosh/via-pmu.c 		(*req->done)(req);
req              2582 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              2584 drivers/macintosh/via-pmu.c 	memset(&req, 0, sizeof(req));
req              2587 drivers/macintosh/via-pmu.c 		req.nbytes = 4;
req              2588 drivers/macintosh/via-pmu.c 		req.done = NULL;
req              2589 drivers/macintosh/via-pmu.c 		req.data[0] = 0xee;
req              2590 drivers/macintosh/via-pmu.c 		req.data[1] = 4;
req              2591 drivers/macintosh/via-pmu.c 		req.data[2] = 0;
req              2592 drivers/macintosh/via-pmu.c 		req.data[3] = 1;
req              2593 drivers/macintosh/via-pmu.c 		req.reply[0] = ADB_RET_OK;
req              2594 drivers/macintosh/via-pmu.c 		req.reply_len = 1;
req              2595 drivers/macintosh/via-pmu.c 		req.reply_expected = 0;
req              2596 drivers/macintosh/via-pmu.c 		pmu_polled_request(&req);
req              2598 drivers/macintosh/via-pmu.c 		req.nbytes = 4;
req              2599 drivers/macintosh/via-pmu.c 		req.done = NULL;
req              2600 drivers/macintosh/via-pmu.c 		req.data[0] = 0xee;
req              2601 drivers/macintosh/via-pmu.c 		req.data[1] = 4;
req              2602 drivers/macintosh/via-pmu.c 		req.data[2] = 0;
req              2603 drivers/macintosh/via-pmu.c 		req.data[3] = 0;
req              2604 drivers/macintosh/via-pmu.c 		req.reply[0] = ADB_RET_OK;
req              2605 drivers/macintosh/via-pmu.c 		req.reply_len = 1;
req              2606 drivers/macintosh/via-pmu.c 		req.reply_expected = 0;
req              2607 drivers/macintosh/via-pmu.c 		pmu_polled_request(&req);
req              2633 drivers/macintosh/via-pmu.c 	struct adb_request req;
req              2639 drivers/macintosh/via-pmu.c 	pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
req              2640 drivers/macintosh/via-pmu.c 	pmu_wait_complete(&req);
req                55 drivers/md/dm-crypt.c 		struct skcipher_request *req;
req               721 drivers/md/dm-crypt.c 	struct skcipher_request *req;
req               726 drivers/md/dm-crypt.c 	req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
req               727 drivers/md/dm-crypt.c 	if (!req)
req               735 drivers/md/dm-crypt.c 	skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
req               736 drivers/md/dm-crypt.c 	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
req               737 drivers/md/dm-crypt.c 	err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
req               738 drivers/md/dm-crypt.c 	skcipher_request_free(req);
req               908 drivers/md/dm-crypt.c 					     void *req)
req               910 drivers/md/dm-crypt.c 	return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
req               968 drivers/md/dm-crypt.c 				     struct aead_request *req,
req               984 drivers/md/dm-crypt.c 	dmreq = dmreq_of_req(cc, req);
req              1033 drivers/md/dm-crypt.c 	aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
req              1035 drivers/md/dm-crypt.c 		aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
req              1037 drivers/md/dm-crypt.c 		r = crypto_aead_encrypt(req);
req              1042 drivers/md/dm-crypt.c 		aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
req              1044 drivers/md/dm-crypt.c 		r = crypto_aead_decrypt(req);
req              1064 drivers/md/dm-crypt.c 					struct skcipher_request *req,
req              1079 drivers/md/dm-crypt.c 	dmreq = dmreq_of_req(cc, req);
req              1120 drivers/md/dm-crypt.c 	skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
req              1123 drivers/md/dm-crypt.c 		r = crypto_skcipher_encrypt(req);
req              1125 drivers/md/dm-crypt.c 		r = crypto_skcipher_decrypt(req);
req              1144 drivers/md/dm-crypt.c 	if (!ctx->r.req)
req              1145 drivers/md/dm-crypt.c 		ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
req              1147 drivers/md/dm-crypt.c 	skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
req              1153 drivers/md/dm-crypt.c 	skcipher_request_set_callback(ctx->r.req,
req              1155 drivers/md/dm-crypt.c 	    kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
req              1185 drivers/md/dm-crypt.c 				    struct skcipher_request *req, struct bio *base_bio)
req              1189 drivers/md/dm-crypt.c 	if ((struct skcipher_request *)(io + 1) != req)
req              1190 drivers/md/dm-crypt.c 		mempool_free(req, &cc->req_pool);
req              1194 drivers/md/dm-crypt.c 				struct aead_request *req, struct bio *base_bio)
req              1198 drivers/md/dm-crypt.c 	if ((struct aead_request *)(io + 1) != req)
req              1199 drivers/md/dm-crypt.c 		mempool_free(req, &cc->req_pool);
req              1202 drivers/md/dm-crypt.c static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
req              1205 drivers/md/dm-crypt.c 		crypt_free_req_aead(cc, req, base_bio);
req              1207 drivers/md/dm-crypt.c 		crypt_free_req_skcipher(cc, req, base_bio);
req              1230 drivers/md/dm-crypt.c 			r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
req              1246 drivers/md/dm-crypt.c 			ctx->r.req = NULL;
req              1364 drivers/md/dm-crypt.c 	io->ctx.r.req = NULL;
req              1388 drivers/md/dm-crypt.c 	if (io->ctx.r.req)
req              1389 drivers/md/dm-crypt.c 		crypt_free_req(cc, io->ctx.r.req, base_bio);
req              2803 drivers/md/dm-crypt.c 		io->ctx.r.req = (struct skcipher_request *)(io + 1);
req               839 drivers/md/dm-integrity.c static void complete_journal_encrypt(struct crypto_async_request *req, int err)
req               841 drivers/md/dm-integrity.c 	struct journal_completion *comp = req->data;
req               852 drivers/md/dm-integrity.c static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
req               855 drivers/md/dm-integrity.c 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               858 drivers/md/dm-integrity.c 		r = crypto_skcipher_encrypt(req);
req               860 drivers/md/dm-integrity.c 		r = crypto_skcipher_decrypt(req);
req               891 drivers/md/dm-integrity.c 		struct skcipher_request *req;
req               898 drivers/md/dm-integrity.c 		req = ic->sk_requests[section];
req               900 drivers/md/dm-integrity.c 		iv = req->iv;
req               904 drivers/md/dm-integrity.c 		req->src = source_sg[section];
req               905 drivers/md/dm-integrity.c 		req->dst = target_sg[section];
req               907 drivers/md/dm-integrity.c 		if (unlikely(do_crypt(encrypt, req, comp)))
req              1462 drivers/md/dm-integrity.c 	SHASH_DESC_ON_STACK(req, ic->internal_hash);
req              1466 drivers/md/dm-integrity.c 	req->tfm = ic->internal_hash;
req              1468 drivers/md/dm-integrity.c 	r = crypto_shash_init(req);
req              1474 drivers/md/dm-integrity.c 	r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
req              1480 drivers/md/dm-integrity.c 	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
req              1486 drivers/md/dm-integrity.c 	r = crypto_shash_final(req, result);
req              3313 drivers/md/dm-integrity.c 	struct skcipher_request *req = NULL;
req              3372 drivers/md/dm-integrity.c 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
req              3373 drivers/md/dm-integrity.c 			if (!req) {
req              3409 drivers/md/dm-integrity.c 			skcipher_request_set_crypt(req, sg, sg,
req              3413 drivers/md/dm-integrity.c 			if (do_crypt(true, req, &comp))
req              3428 drivers/md/dm-integrity.c 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
req              3429 drivers/md/dm-integrity.c 			if (!req) {
req              3479 drivers/md/dm-integrity.c 				skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
req              3482 drivers/md/dm-integrity.c 				if (do_crypt(true, req, &comp))
req              3539 drivers/md/dm-integrity.c 	skcipher_request_free(req);
req              4158 drivers/md/dm-integrity.c 			struct skcipher_request *req = ic->sk_requests[i];
req              4159 drivers/md/dm-integrity.c 			if (req) {
req              4160 drivers/md/dm-integrity.c 				kzfree(req->iv);
req              4161 drivers/md/dm-integrity.c 				skcipher_request_free(req);
req               221 drivers/md/dm-snap-persistent.c 	struct mdata_req *req = container_of(work, struct mdata_req, work);
req               223 drivers/md/dm-snap-persistent.c 	req->result = dm_io(req->io_req, 1, req->where, NULL);
req               245 drivers/md/dm-snap-persistent.c 	struct mdata_req req;
req               250 drivers/md/dm-snap-persistent.c 	req.where = &where;
req               251 drivers/md/dm-snap-persistent.c 	req.io_req = &io_req;
req               257 drivers/md/dm-snap-persistent.c 	INIT_WORK_ONSTACK(&req.work, do_metadata);
req               258 drivers/md/dm-snap-persistent.c 	queue_work(ps->metadata_wq, &req.work);
req               260 drivers/md/dm-snap-persistent.c 	destroy_work_on_stack(&req.work);
req               262 drivers/md/dm-snap-persistent.c 	return req.result;
req                96 drivers/md/dm-verity-target.c static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
req               104 drivers/md/dm-verity-target.c 		ahash_request_set_crypt(req, &sg, NULL, len);
req               105 drivers/md/dm-verity-target.c 		return crypto_wait_req(crypto_ahash_update(req), wait);
req               113 drivers/md/dm-verity-target.c 			ahash_request_set_crypt(req, &sg, NULL, this_step);
req               114 drivers/md/dm-verity-target.c 			r = crypto_wait_req(crypto_ahash_update(req), wait);
req               127 drivers/md/dm-verity-target.c static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
req               132 drivers/md/dm-verity-target.c 	ahash_request_set_tfm(req, v->tfm);
req               133 drivers/md/dm-verity-target.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
req               138 drivers/md/dm-verity-target.c 	r = crypto_wait_req(crypto_ahash_init(req), wait);
req               146 drivers/md/dm-verity-target.c 		r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
req               151 drivers/md/dm-verity-target.c static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
req               157 drivers/md/dm-verity-target.c 		r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
req               165 drivers/md/dm-verity-target.c 	ahash_request_set_crypt(req, NULL, digest, 0);
req               166 drivers/md/dm-verity-target.c 	r = crypto_wait_req(crypto_ahash_final(req), wait);
req               171 drivers/md/dm-verity-target.c int verity_hash(struct dm_verity *v, struct ahash_request *req,
req               177 drivers/md/dm-verity-target.c 	r = verity_hash_init(v, req, &wait);
req               181 drivers/md/dm-verity-target.c 	r = verity_hash_update(v, req, data, len, &wait);
req               185 drivers/md/dm-verity-target.c 	r = verity_hash_final(v, req, digest, &wait);
req               373 drivers/md/dm-verity-target.c 	struct ahash_request *req = verity_io_hash_req(v, io);
req               392 drivers/md/dm-verity-target.c 		ahash_request_set_crypt(req, &sg, NULL, len);
req               393 drivers/md/dm-verity-target.c 		r = crypto_wait_req(crypto_ahash_update(req), wait);
req               478 drivers/md/dm-verity-target.c 		struct ahash_request *req = verity_io_hash_req(v, io);
req               505 drivers/md/dm-verity-target.c 		r = verity_hash_init(v, req, &wait);
req               514 drivers/md/dm-verity-target.c 		r = verity_hash_final(v, req, verity_io_real_digest(v, io),
req               837 drivers/md/dm-verity-target.c 	struct ahash_request *req;
req               845 drivers/md/dm-verity-target.c 	req = kmalloc(v->ahash_reqsize, GFP_KERNEL);
req               847 drivers/md/dm-verity-target.c 	if (!req)
req               855 drivers/md/dm-verity-target.c 	r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
req               859 drivers/md/dm-verity-target.c 	kfree(req);
req               125 drivers/md/dm-verity.h extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
req               454 drivers/md/dm-writecache.c 	struct dm_io_request req;
req               481 drivers/md/dm-writecache.c 		req.bi_op = REQ_OP_WRITE;
req               482 drivers/md/dm-writecache.c 		req.bi_op_flags = REQ_SYNC;
req               483 drivers/md/dm-writecache.c 		req.mem.type = DM_IO_VMA;
req               484 drivers/md/dm-writecache.c 		req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
req               485 drivers/md/dm-writecache.c 		req.client = wc->dm_io;
req               486 drivers/md/dm-writecache.c 		req.notify.fn = writecache_notify_io;
req               487 drivers/md/dm-writecache.c 		req.notify.context = &endio;
req               490 drivers/md/dm-writecache.c 	        (void) dm_io(&req, 1, &region, NULL);
req               517 drivers/md/dm-writecache.c 	struct dm_io_request req;
req               522 drivers/md/dm-writecache.c 	req.bi_op = REQ_OP_WRITE;
req               523 drivers/md/dm-writecache.c 	req.bi_op_flags = REQ_PREFLUSH;
req               524 drivers/md/dm-writecache.c 	req.mem.type = DM_IO_KMEM;
req               525 drivers/md/dm-writecache.c 	req.mem.ptr.addr = NULL;
req               526 drivers/md/dm-writecache.c 	req.client = wc->dm_io;
req               527 drivers/md/dm-writecache.c 	req.notify.fn = NULL;
req               529 drivers/md/dm-writecache.c 	r = dm_io(&req, 1, &region, NULL);
req               884 drivers/md/dm-writecache.c 	struct dm_io_request req;
req               889 drivers/md/dm-writecache.c 	req.bi_op = REQ_OP_READ;
req               890 drivers/md/dm-writecache.c 	req.bi_op_flags = REQ_SYNC;
req               891 drivers/md/dm-writecache.c 	req.mem.type = DM_IO_VMA;
req               892 drivers/md/dm-writecache.c 	req.mem.ptr.vma = (char *)wc->memory_map;
req               893 drivers/md/dm-writecache.c 	req.client = wc->dm_io;
req               894 drivers/md/dm-writecache.c 	req.notify.fn = NULL;
req               896 drivers/md/dm-writecache.c 	return dm_io(&req, 1, &region, NULL);
req              1019 drivers/md/raid5.c 		bi = &sh->dev[i].req;
req              2151 drivers/md/raid5.c 			bio_init(&dev->req, &dev->vec, 1);
req              2471 drivers/md/raid5.c 		if (bi == &sh->dev[i].req)
req              2603 drivers/md/raid5.c 		if (bi == &sh->dev[i].req) {
req               253 drivers/md/raid5.h 		struct bio	req, rreq;
req               962 drivers/media/common/videobuf2/videobuf2-core.c 	if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
req              1354 drivers/media/common/videobuf2/videobuf2-core.c 	WARN_ON(!vb->req_obj.req);
req              1358 drivers/media/common/videobuf2/videobuf2-core.c 		  struct media_request *req);
req              1403 drivers/media/common/videobuf2/videobuf2-core.c unsigned int vb2_request_buffer_cnt(struct media_request *req)
req              1409 drivers/media/common/videobuf2/videobuf2-core.c 	spin_lock_irqsave(&req->lock, flags);
req              1410 drivers/media/common/videobuf2/videobuf2-core.c 	list_for_each_entry(obj, &req->objects, list)
req              1413 drivers/media/common/videobuf2/videobuf2-core.c 	spin_unlock_irqrestore(&req->lock, flags);
req              1512 drivers/media/common/videobuf2/videobuf2-core.c 		  struct media_request *req)
req              1524 drivers/media/common/videobuf2/videobuf2-core.c 	if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
req              1530 drivers/media/common/videobuf2/videobuf2-core.c 	if ((req && q->uses_qbuf) ||
req              1531 drivers/media/common/videobuf2/videobuf2-core.c 	    (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
req              1537 drivers/media/common/videobuf2/videobuf2-core.c 	if (req) {
req              1558 drivers/media/common/videobuf2/videobuf2-core.c 		ret = media_request_lock_for_update(req);
req              1561 drivers/media/common/videobuf2/videobuf2-core.c 		ret = media_request_object_bind(req, &vb2_core_req_ops,
req              1563 drivers/media/common/videobuf2/videobuf2-core.c 		media_request_unlock_for_update(req);
req              1577 drivers/media/common/videobuf2/videobuf2-core.c 		media_request_get(req);
req              1578 drivers/media/common/videobuf2/videobuf2-core.c 		vb->request = req;
req              1843 drivers/media/common/videobuf2/videobuf2-core.c 	if (WARN_ON(vb->req_obj.req)) {
req              1923 drivers/media/common/videobuf2/videobuf2-core.c 		struct media_request *req = vb->req_obj.req;
req              1931 drivers/media/common/videobuf2/videobuf2-core.c 		if (req) {
req              1935 drivers/media/common/videobuf2/videobuf2-core.c 			spin_lock_irqsave(&req->lock, flags);
req              1936 drivers/media/common/videobuf2/videobuf2-core.c 			state = req->state;
req              1937 drivers/media/common/videobuf2/videobuf2-core.c 			spin_unlock_irqrestore(&req->lock, flags);
req              1958 drivers/media/common/videobuf2/videobuf2-core.c 		if (vb->req_obj.req) {
req               339 drivers/media/common/videobuf2/videobuf2-v4l2.c 	struct media_request *req;
req               437 drivers/media/common/videobuf2/videobuf2-v4l2.c 	req = media_request_get_by_fd(mdev, b->request_fd);
req               438 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (IS_ERR(req)) {
req               440 drivers/media/common/videobuf2/videobuf2-v4l2.c 		return PTR_ERR(req);
req               447 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
req               448 drivers/media/common/videobuf2/videobuf2-v4l2.c 	    req->state != MEDIA_REQUEST_STATE_UPDATING) {
req               450 drivers/media/common/videobuf2/videobuf2-v4l2.c 		media_request_put(req);
req               454 drivers/media/common/videobuf2/videobuf2-v4l2.c 	*p_req = req;
req               663 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
req               665 drivers/media/common/videobuf2/videobuf2-v4l2.c 	int ret = vb2_verify_memory_type(q, req->memory, req->type);
req               667 drivers/media/common/videobuf2/videobuf2-v4l2.c 	fill_buf_caps(q, &req->capabilities);
req               668 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
req               750 drivers/media/common/videobuf2/videobuf2-v4l2.c 	struct media_request *req = NULL;
req               758 drivers/media/common/videobuf2/videobuf2-v4l2.c 	ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
req               761 drivers/media/common/videobuf2/videobuf2-v4l2.c 	ret = vb2_core_qbuf(q, b->index, b, req);
req               762 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (req)
req               763 drivers/media/common/videobuf2/videobuf2-v4l2.c 		media_request_put(req);
req              1168 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_request_validate(struct media_request *req)
req              1173 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (!vb2_request_buffer_cnt(req))
req              1176 drivers/media/common/videobuf2/videobuf2-v4l2.c 	list_for_each_entry(obj, &req->objects, list) {
req              1186 drivers/media/common/videobuf2/videobuf2-v4l2.c 		list_for_each_entry_continue_reverse(obj, &req->objects, list)
req              1195 drivers/media/common/videobuf2/videobuf2-v4l2.c void vb2_request_queue(struct media_request *req)
req              1207 drivers/media/common/videobuf2/videobuf2-v4l2.c 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
req               333 drivers/media/dvb-core/dvb_vb2.c int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
req               338 drivers/media/dvb-core/dvb_vb2.c 	if (req->size > DVB_V2_MAX_SIZE)
req               339 drivers/media/dvb-core/dvb_vb2.c 		req->size = DVB_V2_MAX_SIZE;
req               343 drivers/media/dvb-core/dvb_vb2.c 	ctx->buf_siz = req->size;
req               344 drivers/media/dvb-core/dvb_vb2.c 	ctx->buf_cnt = req->count;
req               345 drivers/media/dvb-core/dvb_vb2.c 	ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count);
req                65 drivers/media/dvb-frontends/gp8psk-fe.h 	int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
req                66 drivers/media/dvb-frontends/gp8psk-fe.h 	int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
req                39 drivers/media/mc/mc-request.c static void media_request_clean(struct media_request *req)
req                44 drivers/media/mc/mc-request.c 	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
req                45 drivers/media/mc/mc-request.c 	WARN_ON(req->updating_count);
req                46 drivers/media/mc/mc-request.c 	WARN_ON(req->access_count);
req                48 drivers/media/mc/mc-request.c 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
req                53 drivers/media/mc/mc-request.c 	req->updating_count = 0;
req                54 drivers/media/mc/mc-request.c 	req->access_count = 0;
req                55 drivers/media/mc/mc-request.c 	WARN_ON(req->num_incomplete_objects);
req                56 drivers/media/mc/mc-request.c 	req->num_incomplete_objects = 0;
req                57 drivers/media/mc/mc-request.c 	wake_up_interruptible_all(&req->poll_wait);
req                62 drivers/media/mc/mc-request.c 	struct media_request *req =
req                64 drivers/media/mc/mc-request.c 	struct media_device *mdev = req->mdev;
req                66 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
req                69 drivers/media/mc/mc-request.c 	req->state = MEDIA_REQUEST_STATE_CLEANING;
req                71 drivers/media/mc/mc-request.c 	media_request_clean(req);
req                74 drivers/media/mc/mc-request.c 		mdev->ops->req_free(req);
req                76 drivers/media/mc/mc-request.c 		kfree(req);
req                79 drivers/media/mc/mc-request.c void media_request_put(struct media_request *req)
req                81 drivers/media/mc/mc-request.c 	kref_put(&req->kref, media_request_release);
req                87 drivers/media/mc/mc-request.c 	struct media_request *req = filp->private_data;
req                89 drivers/media/mc/mc-request.c 	media_request_put(req);
req                96 drivers/media/mc/mc-request.c 	struct media_request *req = filp->private_data;
req               103 drivers/media/mc/mc-request.c 	poll_wait(filp, &req->poll_wait, wait);
req               104 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               105 drivers/media/mc/mc-request.c 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
req               109 drivers/media/mc/mc-request.c 	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
req               115 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               119 drivers/media/mc/mc-request.c static long media_request_ioctl_queue(struct media_request *req)
req               121 drivers/media/mc/mc-request.c 	struct media_device *mdev = req->mdev;
req               126 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
req               136 drivers/media/mc/mc-request.c 	media_request_get(req);
req               138 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               139 drivers/media/mc/mc-request.c 	if (req->state == MEDIA_REQUEST_STATE_IDLE)
req               140 drivers/media/mc/mc-request.c 		req->state = MEDIA_REQUEST_STATE_VALIDATING;
req               141 drivers/media/mc/mc-request.c 	state = req->state;
req               142 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               146 drivers/media/mc/mc-request.c 			req->debug_str, media_request_state_str(state));
req               147 drivers/media/mc/mc-request.c 		media_request_put(req);
req               152 drivers/media/mc/mc-request.c 	ret = mdev->ops->req_validate(req);
req               169 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               170 drivers/media/mc/mc-request.c 	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
req               172 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               175 drivers/media/mc/mc-request.c 		mdev->ops->req_queue(req);
req               181 drivers/media/mc/mc-request.c 			req->debug_str, ret);
req               182 drivers/media/mc/mc-request.c 		media_request_put(req);
req               188 drivers/media/mc/mc-request.c static long media_request_ioctl_reinit(struct media_request *req)
req               190 drivers/media/mc/mc-request.c 	struct media_device *mdev = req->mdev;
req               193 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               194 drivers/media/mc/mc-request.c 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
req               195 drivers/media/mc/mc-request.c 	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
req               198 drivers/media/mc/mc-request.c 			req->debug_str);
req               199 drivers/media/mc/mc-request.c 		spin_unlock_irqrestore(&req->lock, flags);
req               202 drivers/media/mc/mc-request.c 	if (req->access_count) {
req               205 drivers/media/mc/mc-request.c 			req->debug_str);
req               206 drivers/media/mc/mc-request.c 		spin_unlock_irqrestore(&req->lock, flags);
req               209 drivers/media/mc/mc-request.c 	req->state = MEDIA_REQUEST_STATE_CLEANING;
req               210 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               212 drivers/media/mc/mc-request.c 	media_request_clean(req);
req               214 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               215 drivers/media/mc/mc-request.c 	req->state = MEDIA_REQUEST_STATE_IDLE;
req               216 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               224 drivers/media/mc/mc-request.c 	struct media_request *req = filp->private_data;
req               228 drivers/media/mc/mc-request.c 		return media_request_ioctl_queue(req);
req               230 drivers/media/mc/mc-request.c 		return media_request_ioctl_reinit(req);
req               250 drivers/media/mc/mc-request.c 	struct media_request *req;
req               262 drivers/media/mc/mc-request.c 	req = f.file->private_data;
req               263 drivers/media/mc/mc-request.c 	if (req->mdev != mdev)
req               274 drivers/media/mc/mc-request.c 	media_request_get(req);
req               277 drivers/media/mc/mc-request.c 	return req;
req               290 drivers/media/mc/mc-request.c 	struct media_request *req;
req               310 drivers/media/mc/mc-request.c 		req = mdev->ops->req_alloc(mdev);
req               312 drivers/media/mc/mc-request.c 		req = kzalloc(sizeof(*req), GFP_KERNEL);
req               313 drivers/media/mc/mc-request.c 	if (!req) {
req               318 drivers/media/mc/mc-request.c 	filp->private_data = req;
req               319 drivers/media/mc/mc-request.c 	req->mdev = mdev;
req               320 drivers/media/mc/mc-request.c 	req->state = MEDIA_REQUEST_STATE_IDLE;
req               321 drivers/media/mc/mc-request.c 	req->num_incomplete_objects = 0;
req               322 drivers/media/mc/mc-request.c 	kref_init(&req->kref);
req               323 drivers/media/mc/mc-request.c 	INIT_LIST_HEAD(&req->objects);
req               324 drivers/media/mc/mc-request.c 	spin_lock_init(&req->lock);
req               325 drivers/media/mc/mc-request.c 	init_waitqueue_head(&req->poll_wait);
req               326 drivers/media/mc/mc-request.c 	req->updating_count = 0;
req               327 drivers/media/mc/mc-request.c 	req->access_count = 0;
req               331 drivers/media/mc/mc-request.c 	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
req               333 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
req               352 drivers/media/mc/mc-request.c 	struct media_request *req = obj->req;
req               354 drivers/media/mc/mc-request.c 	if (WARN_ON(req))
req               360 drivers/media/mc/mc-request.c media_request_object_find(struct media_request *req,
req               371 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               372 drivers/media/mc/mc-request.c 	list_for_each_entry(obj, &req->objects, list) {
req               379 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               393 drivers/media/mc/mc-request.c 	obj->req = NULL;
req               401 drivers/media/mc/mc-request.c int media_request_object_bind(struct media_request *req,
req               412 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               414 drivers/media/mc/mc-request.c 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
req               417 drivers/media/mc/mc-request.c 	obj->req = req;
req               422 drivers/media/mc/mc-request.c 		list_add_tail(&obj->list, &req->objects);
req               424 drivers/media/mc/mc-request.c 		list_add(&obj->list, &req->objects);
req               425 drivers/media/mc/mc-request.c 	req->num_incomplete_objects++;
req               429 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               436 drivers/media/mc/mc-request.c 	struct media_request *req = obj->req;
req               440 drivers/media/mc/mc-request.c 	if (WARN_ON(!req))
req               443 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               445 drivers/media/mc/mc-request.c 	obj->req = NULL;
req               447 drivers/media/mc/mc-request.c 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
req               450 drivers/media/mc/mc-request.c 	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
req               453 drivers/media/mc/mc-request.c 	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
req               455 drivers/media/mc/mc-request.c 			req->num_incomplete_objects--;
req               459 drivers/media/mc/mc-request.c 	if (WARN_ON(!req->num_incomplete_objects))
req               462 drivers/media/mc/mc-request.c 	req->num_incomplete_objects--;
req               463 drivers/media/mc/mc-request.c 	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
req               464 drivers/media/mc/mc-request.c 	    !req->num_incomplete_objects) {
req               465 drivers/media/mc/mc-request.c 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
req               467 drivers/media/mc/mc-request.c 		wake_up_interruptible_all(&req->poll_wait);
req               471 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               475 drivers/media/mc/mc-request.c 		media_request_put(req);
req               481 drivers/media/mc/mc-request.c 	struct media_request *req = obj->req;
req               485 drivers/media/mc/mc-request.c 	spin_lock_irqsave(&req->lock, flags);
req               489 drivers/media/mc/mc-request.c 	if (WARN_ON(!req->num_incomplete_objects) ||
req               490 drivers/media/mc/mc-request.c 	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
req               493 drivers/media/mc/mc-request.c 	if (!--req->num_incomplete_objects) {
req               494 drivers/media/mc/mc-request.c 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
req               495 drivers/media/mc/mc-request.c 		wake_up_interruptible_all(&req->poll_wait);
req               499 drivers/media/mc/mc-request.c 	spin_unlock_irqrestore(&req->lock, flags);
req               501 drivers/media/mc/mc-request.c 		media_request_put(req);
req               385 drivers/media/pci/cx18/cx18-mailbox.c 	u32 ack_irq, req;
req               402 drivers/media/pci/cx18/cx18-mailbox.c 	req = order->mb.request;
req               404 drivers/media/pci/cx18/cx18-mailbox.c 	if (req != cx18_readl(cx, &ack_mb->request) ||
req               405 drivers/media/pci/cx18/cx18-mailbox.c 	    req == cx18_readl(cx, &ack_mb->ack)) {
req               407 drivers/media/pci/cx18/cx18-mailbox.c 				rpu_str[order->rpu], rpu_str[order->rpu], req);
req               411 drivers/media/pci/cx18/cx18-mailbox.c 	cx18_writel(cx, req, &ack_mb->ack);
req               583 drivers/media/pci/cx18/cx18-mailbox.c 	u32 irq, req, ack, err;
req               638 drivers/media/pci/cx18/cx18-mailbox.c 	req = cx18_readl(cx, &mb->request);
req               641 drivers/media/pci/cx18/cx18-mailbox.c 				 (ack = cx18_readl(cx, &mb->ack)) == req,
req               643 drivers/media/pci/cx18/cx18-mailbox.c 	if (req != ack) {
req               645 drivers/media/pci/cx18/cx18-mailbox.c 		cx18_writel(cx, req, &mb->ack);
req               653 drivers/media/pci/cx18/cx18-mailbox.c 	req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
req               659 drivers/media/pci/cx18/cx18-mailbox.c 	cx18_writel(cx, req, &mb->request);
req               660 drivers/media/pci/cx18/cx18-mailbox.c 	cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
req               676 drivers/media/pci/cx18/cx18-mailbox.c 	if (ack != req) {
req               686 drivers/media/pci/cx18/cx18-mailbox.c 	if (req != ack) {
req               696 drivers/media/pci/cx18/cx18-mailbox.c 					req, ack);
req              1211 drivers/media/pci/meye/meye.c 				struct v4l2_requestbuffers *req)
req              1215 drivers/media/pci/meye/meye.c 	if (req->memory != V4L2_MEMORY_MMAP)
req              1218 drivers/media/pci/meye/meye.c 	if (meye.grab_fbuffer && req->count == gbuffers) {
req              1234 drivers/media/pci/meye/meye.c 	gbuffers = max(2, min((int)req->count, MEYE_MAX_BUFNBRS));
req              1235 drivers/media/pci/meye/meye.c 	req->count = gbuffers;
req               349 drivers/media/platform/atmel/atmel-isc-base.c 				   struct clk_rate_request *req)
req               373 drivers/media/platform/atmel/atmel-isc-base.c 			diff = abs(req->rate - rate);
req               378 drivers/media/platform/atmel/atmel-isc-base.c 				req->best_parent_rate = parent_rate;
req               379 drivers/media/platform/atmel/atmel-isc-base.c 				req->best_parent_hw = parent;
req               382 drivers/media/platform/atmel/atmel-isc-base.c 			if (!best_diff || rate < req->rate)
req               393 drivers/media/platform/atmel/atmel-isc-base.c 		__clk_get_name((req->best_parent_hw)->clk),
req               394 drivers/media/platform/atmel/atmel-isc-base.c 		req->best_parent_rate);
req               399 drivers/media/platform/atmel/atmel-isc-base.c 	req->rate = best_rate;
req               309 drivers/media/platform/omap3isp/ispccdc.c 				struct ispccdc_lsc_config_req *req)
req               311 drivers/media/platform/omap3isp/ispccdc.c 	if (!req->enable)
req               314 drivers/media/platform/omap3isp/ispccdc.c 	if (ccdc_lsc_validate_config(ccdc, &req->config) < 0) {
req               322 drivers/media/platform/omap3isp/ispccdc.c 	ccdc_lsc_setup_regs(ccdc, &req->config);
req               323 drivers/media/platform/omap3isp/ispccdc.c 	ccdc_lsc_program_table(ccdc, req->table.dma);
req               351 drivers/media/platform/omap3isp/ispccdc.c 				  struct ispccdc_lsc_config_req *req)
req               355 drivers/media/platform/omap3isp/ispccdc.c 	if (req == NULL)
req               358 drivers/media/platform/omap3isp/ispccdc.c 	if (req->table.addr) {
req               359 drivers/media/platform/omap3isp/ispccdc.c 		sg_free_table(&req->table.sgt);
req               360 drivers/media/platform/omap3isp/ispccdc.c 		dma_free_coherent(isp->dev, req->config.size, req->table.addr,
req               361 drivers/media/platform/omap3isp/ispccdc.c 				  req->table.dma);
req               364 drivers/media/platform/omap3isp/ispccdc.c 	kfree(req);
req               370 drivers/media/platform/omap3isp/ispccdc.c 	struct ispccdc_lsc_config_req *req, *n;
req               374 drivers/media/platform/omap3isp/ispccdc.c 	list_for_each_entry_safe(req, n, queue, list) {
req               375 drivers/media/platform/omap3isp/ispccdc.c 		list_del(&req->list);
req               377 drivers/media/platform/omap3isp/ispccdc.c 		ccdc_lsc_free_request(ccdc, req);
req               405 drivers/media/platform/omap3isp/ispccdc.c 	struct ispccdc_lsc_config_req *req;
req               422 drivers/media/platform/omap3isp/ispccdc.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               423 drivers/media/platform/omap3isp/ispccdc.c 	if (req == NULL)
req               427 drivers/media/platform/omap3isp/ispccdc.c 		if (copy_from_user(&req->config, config->lsc_cfg,
req               428 drivers/media/platform/omap3isp/ispccdc.c 				   sizeof(req->config))) {
req               433 drivers/media/platform/omap3isp/ispccdc.c 		req->enable = 1;
req               435 drivers/media/platform/omap3isp/ispccdc.c 		req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
req               436 drivers/media/platform/omap3isp/ispccdc.c 						     &req->table.dma,
req               438 drivers/media/platform/omap3isp/ispccdc.c 		if (req->table.addr == NULL) {
req               443 drivers/media/platform/omap3isp/ispccdc.c 		ret = dma_get_sgtable(isp->dev, &req->table.sgt,
req               444 drivers/media/platform/omap3isp/ispccdc.c 				      req->table.addr, req->table.dma,
req               445 drivers/media/platform/omap3isp/ispccdc.c 				      req->config.size);
req               449 drivers/media/platform/omap3isp/ispccdc.c 		dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
req               450 drivers/media/platform/omap3isp/ispccdc.c 				    req->table.sgt.nents, DMA_TO_DEVICE);
req               452 drivers/media/platform/omap3isp/ispccdc.c 		if (copy_from_user(req->table.addr, config->lsc,
req               453 drivers/media/platform/omap3isp/ispccdc.c 				   req->config.size)) {
req               458 drivers/media/platform/omap3isp/ispccdc.c 		dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
req               459 drivers/media/platform/omap3isp/ispccdc.c 				       req->table.sgt.nents, DMA_TO_DEVICE);
req               467 drivers/media/platform/omap3isp/ispccdc.c 	ccdc->lsc.request = req;
req               474 drivers/media/platform/omap3isp/ispccdc.c 		ccdc_lsc_free_request(ccdc, req);
req               636 drivers/media/platform/qcom/venus/helpers.c 			    struct hfi_buffer_requirements *req)
req               643 drivers/media/platform/qcom/venus/helpers.c 	if (req)
req               644 drivers/media/platform/qcom/venus/helpers.c 		memset(req, 0, sizeof(*req));
req               656 drivers/media/platform/qcom/venus/helpers.c 		if (req)
req               657 drivers/media/platform/qcom/venus/helpers.c 			memcpy(req, &hprop.bufreq[i], sizeof(*req));
req                27 drivers/media/platform/qcom/venus/helpers.h 			    struct hfi_buffer_requirements *req);
req               273 drivers/media/platform/vicodec/vicodec-core.c 		struct media_request *src_req = src_vb->vb2_buf.req_obj.req;
req               420 drivers/media/platform/vicodec/vicodec-core.c 	src_req = src_buf->vb2_buf.req_obj.req;
req              1570 drivers/media/platform/vicodec/vicodec-core.c 		v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
req              1741 drivers/media/platform/vicodec/vicodec-core.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
req              2011 drivers/media/platform/vicodec/vicodec-core.c static int vicodec_request_validate(struct media_request *req)
req              2019 drivers/media/platform/vicodec/vicodec-core.c 	list_for_each_entry(obj, &req->objects, list) {
req              2035 drivers/media/platform/vicodec/vicodec-core.c 	count = vb2_request_buffer_cnt(req);
req              2048 drivers/media/platform/vicodec/vicodec-core.c 	hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
req              2061 drivers/media/platform/vicodec/vicodec-core.c 	return vb2_request_validate(req);
req               607 drivers/media/platform/vim2m.c 	v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
req               613 drivers/media/platform/vim2m.c 	v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
req              1098 drivers/media/platform/vim2m.c 		v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
req              1110 drivers/media/platform/vim2m.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
req               634 drivers/media/platform/vivid/vivid-core.c static int vivid_req_validate(struct media_request *req)
req               636 drivers/media/platform/vivid/vivid-core.c 	struct vivid_dev *dev = container_of(req->mdev, struct vivid_dev, mdev);
req               642 drivers/media/platform/vivid/vivid-core.c 	return vb2_request_validate(req);
req               718 drivers/media/platform/vivid/vivid-kthread-cap.c 		v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
req               730 drivers/media/platform/vivid/vivid-kthread-cap.c 		v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req,
req               743 drivers/media/platform/vivid/vivid-kthread-cap.c 		v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
req               749 drivers/media/platform/vivid/vivid-kthread-cap.c 		v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req,
req               935 drivers/media/platform/vivid/vivid-kthread-cap.c 			v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
req               950 drivers/media/platform/vivid/vivid-kthread-cap.c 			v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
req                78 drivers/media/platform/vivid/vivid-kthread-out.c 		v4l2_ctrl_request_setup(vid_out_buf->vb.vb2_buf.req_obj.req,
req                80 drivers/media/platform/vivid/vivid-kthread-out.c 		v4l2_ctrl_request_complete(vid_out_buf->vb.vb2_buf.req_obj.req,
req                99 drivers/media/platform/vivid/vivid-kthread-out.c 		v4l2_ctrl_request_setup(vbi_out_buf->vb.vb2_buf.req_obj.req,
req               101 drivers/media/platform/vivid/vivid-kthread-out.c 		v4l2_ctrl_request_complete(vbi_out_buf->vb.vb2_buf.req_obj.req,
req               280 drivers/media/platform/vivid/vivid-kthread-out.c 			v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
req               295 drivers/media/platform/vivid/vivid-kthread-out.c 			v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
req               105 drivers/media/platform/vivid/vivid-sdr-cap.c 		v4l2_ctrl_request_setup(sdr_cap_buf->vb.vb2_buf.req_obj.req,
req               107 drivers/media/platform/vivid/vivid-sdr-cap.c 		v4l2_ctrl_request_complete(sdr_cap_buf->vb.vb2_buf.req_obj.req,
req               304 drivers/media/platform/vivid/vivid-sdr-cap.c 		v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
req               318 drivers/media/platform/vivid/vivid-sdr-cap.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_sdr_cap);
req               223 drivers/media/platform/vivid/vivid-vbi-cap.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_cap);
req               122 drivers/media/platform/vivid/vivid-vbi-out.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_out);
req               262 drivers/media/platform/vivid/vivid-vid-cap.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap);
req               198 drivers/media/platform/vivid/vivid-vid-out.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_out);
req                94 drivers/media/rc/igorplugusb.c 	struct usb_ctrlrequest *req;
req                97 drivers/media/rc/igorplugusb.c 	req = (struct usb_ctrlrequest *)urb->setup_packet;
req               101 drivers/media/rc/igorplugusb.c 		if (req->bRequest == GET_INFRACODE &&
req                28 drivers/media/usb/as102/as10x_cmd.c 			sizeof(pcmd->body.turn_on.req));
req                31 drivers/media/usb/as102/as10x_cmd.c 	pcmd->body.turn_on.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNON);
req                36 drivers/media/usb/as102/as10x_cmd.c 					    sizeof(pcmd->body.turn_on.req) +
req                69 drivers/media/usb/as102/as10x_cmd.c 			sizeof(pcmd->body.turn_off.req));
req                72 drivers/media/usb/as102/as10x_cmd.c 	pcmd->body.turn_off.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNOFF);
req                78 drivers/media/usb/as102/as10x_cmd.c 			sizeof(pcmd->body.turn_off.req) + HEADER_SIZE,
req               111 drivers/media/usb/as102/as10x_cmd.c 			sizeof(preq->body.set_tune.req));
req               114 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.proc_id = cpu_to_le16(CONTROL_PROC_SETTUNE);
req               115 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.freq = (__force __u32)cpu_to_le32(ptune->freq);
req               116 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.bandwidth = ptune->bandwidth;
req               117 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.hier_select = ptune->hier_select;
req               118 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.modulation = ptune->modulation;
req               119 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.hierarchy = ptune->hierarchy;
req               120 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.interleaving_mode  =
req               122 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.code_rate  = ptune->code_rate;
req               123 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.guard_interval = ptune->guard_interval;
req               124 drivers/media/usb/as102/as10x_cmd.c 	preq->body.set_tune.req.args.transmission_mode  =
req               131 drivers/media/usb/as102/as10x_cmd.c 					    sizeof(preq->body.set_tune.req)
req               166 drivers/media/usb/as102/as10x_cmd.c 			sizeof(preq->body.get_tune_status.req));
req               169 drivers/media/usb/as102/as10x_cmd.c 	preq->body.get_tune_status.req.proc_id =
req               177 drivers/media/usb/as102/as10x_cmd.c 			sizeof(preq->body.get_tune_status.req) + HEADER_SIZE,
req               218 drivers/media/usb/as102/as10x_cmd.c 			sizeof(pcmd->body.get_tps.req));
req               221 drivers/media/usb/as102/as10x_cmd.c 	pcmd->body.get_tune_status.req.proc_id =
req               228 drivers/media/usb/as102/as10x_cmd.c 					    sizeof(pcmd->body.get_tps.req) +
req               277 drivers/media/usb/as102/as10x_cmd.c 			sizeof(pcmd->body.get_demod_stats.req));
req               280 drivers/media/usb/as102/as10x_cmd.c 	pcmd->body.get_demod_stats.req.proc_id =
req               287 drivers/media/usb/as102/as10x_cmd.c 				sizeof(pcmd->body.get_demod_stats.req)
req               337 drivers/media/usb/as102/as10x_cmd.c 			sizeof(pcmd->body.get_impulse_rsp.req));
req               340 drivers/media/usb/as102/as10x_cmd.c 	pcmd->body.get_impulse_rsp.req.proc_id =
req               347 drivers/media/usb/as102/as10x_cmd.c 					sizeof(pcmd->body.get_impulse_rsp.req)
req                87 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               102 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               119 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               134 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               151 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               168 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               189 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               208 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               223 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               238 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               253 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               270 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               293 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               316 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               333 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               352 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               382 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               406 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req               425 drivers/media/usb/as102/as10x_cmd.h 	} __packed req;
req                34 drivers/media/usb/as102/as10x_cmd_cfg.c 			sizeof(pcmd->body.context.req));
req                37 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT);
req                38 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.tag = cpu_to_le16(tag);
req                39 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA);
req                45 drivers/media/usb/as102/as10x_cmd_cfg.c 					     sizeof(pcmd->body.context.req)
req                90 drivers/media/usb/as102/as10x_cmd_cfg.c 			sizeof(pcmd->body.context.req));
req                93 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT);
req                95 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.reg_val.u.value32 = (__force u32)cpu_to_le32(value);
req                96 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.tag = cpu_to_le16(tag);
req                97 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA);
req               103 drivers/media/usb/as102/as10x_cmd_cfg.c 					     sizeof(pcmd->body.context.req)
req               144 drivers/media/usb/as102/as10x_cmd_cfg.c 			sizeof(pcmd->body.cfg_change_mode.req));
req               147 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.cfg_change_mode.req.proc_id =
req               149 drivers/media/usb/as102/as10x_cmd_cfg.c 	pcmd->body.cfg_change_mode.req.mode = mode;
req               154 drivers/media/usb/as102/as10x_cmd_cfg.c 				sizeof(pcmd->body.cfg_change_mode.req)
req                29 drivers/media/usb/as102/as10x_cmd_stream.c 			sizeof(pcmd->body.add_pid_filter.req));
req                32 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.add_pid_filter.req.proc_id =
req                34 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.add_pid_filter.req.pid = cpu_to_le16(filter->pid);
req                35 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.add_pid_filter.req.stream_type = filter->type;
req                38 drivers/media/usb/as102/as10x_cmd_stream.c 		pcmd->body.add_pid_filter.req.idx = filter->idx;
req                40 drivers/media/usb/as102/as10x_cmd_stream.c 		pcmd->body.add_pid_filter.req.idx = 0xFF;
req                45 drivers/media/usb/as102/as10x_cmd_stream.c 				sizeof(pcmd->body.add_pid_filter.req)
req                86 drivers/media/usb/as102/as10x_cmd_stream.c 			sizeof(pcmd->body.del_pid_filter.req));
req                89 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.del_pid_filter.req.proc_id =
req                91 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.del_pid_filter.req.pid = cpu_to_le16(pid_value);
req                96 drivers/media/usb/as102/as10x_cmd_stream.c 				sizeof(pcmd->body.del_pid_filter.req)
req               130 drivers/media/usb/as102/as10x_cmd_stream.c 			sizeof(pcmd->body.start_streaming.req));
req               133 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.start_streaming.req.proc_id =
req               139 drivers/media/usb/as102/as10x_cmd_stream.c 				sizeof(pcmd->body.start_streaming.req)
req               173 drivers/media/usb/as102/as10x_cmd_stream.c 			sizeof(pcmd->body.stop_streaming.req));
req               176 drivers/media/usb/as102/as10x_cmd_stream.c 	pcmd->body.stop_streaming.req.proc_id =
req               182 drivers/media/usb/as102/as10x_cmd_stream.c 				sizeof(pcmd->body.stop_streaming.req)
req               109 drivers/media/usb/b2c2/flexcop-usb.c 		flexcop_usb_request_t req, u8 page, u16 wAddress,
req               122 drivers/media/usb/b2c2/flexcop-usb.c 	switch (req) {
req               140 drivers/media/usb/b2c2/flexcop-usb.c 		deb_info("unsupported request for v8_mem_req %x.\n", req);
req               143 drivers/media/usb/b2c2/flexcop-usb.c 	deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
req               152 drivers/media/usb/b2c2/flexcop-usb.c 			req,
req               179 drivers/media/usb/b2c2/flexcop-usb.c 		flexcop_usb_request_t req, flexcop_usb_mem_page_t page_start,
req               186 drivers/media/usb/b2c2/flexcop-usb.c 	switch(req) {
req               209 drivers/media/usb/b2c2/flexcop-usb.c 		ret = flexcop_usb_v8_memory_req(fc_usb, req,
req               232 drivers/media/usb/b2c2/flexcop-usb.c 		flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
req               269 drivers/media/usb/b2c2/flexcop-usb.c 			func, request_type, req,
req               279 drivers/media/usb/b2c2/flexcop-usb.c 			req,
req               751 drivers/media/usb/cpia2/cpia2_v4l.c static int cpia2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req)
req               755 drivers/media/usb/cpia2/cpia2_v4l.c 	if(req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
req               756 drivers/media/usb/cpia2/cpia2_v4l.c 	   req->memory != V4L2_MEMORY_MMAP)
req               759 drivers/media/usb/cpia2/cpia2_v4l.c 	DBG("REQBUFS requested:%d returning:%d\n", req->count, cam->num_frames);
req               760 drivers/media/usb/cpia2/cpia2_v4l.c 	req->count = cam->num_frames;
req               761 drivers/media/usb/cpia2/cpia2_v4l.c 	memset(&req->reserved, 0, sizeof(req->reserved));
req               282 drivers/media/usb/cx231xx/cx231xx-core.c int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg,
req               315 drivers/media/usb/cx231xx/cx231xx-core.c 	ret = __usb_control_msg(dev, pipe, req,
req               400 drivers/media/usb/cx231xx/cx231xx-core.c int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf,
req               439 drivers/media/usb/cx231xx/cx231xx-core.c 			req, 0, val, reg & 0xff,
req               447 drivers/media/usb/cx231xx/cx231xx-core.c 	ret = __usb_control_msg(dev, pipe, req,
req               673 drivers/media/usb/cx231xx/cx231xx.h 	int (*cx231xx_read_ctrl_reg) (struct cx231xx *dev, u8 req, u16 reg,
req               675 drivers/media/usb/cx231xx/cx231xx.h 	int (*cx231xx_write_ctrl_reg) (struct cx231xx *dev, u8 req, u16 reg,
req               836 drivers/media/usb/cx231xx/cx231xx.h int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg,
req               840 drivers/media/usb/cx231xx/cx231xx.h int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg,
req                17 drivers/media/usb/dvb-usb-v2/af9015.c static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req)
req                28 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[0] = req->cmd;
req                30 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[2] = req->i2c_addr << 1;
req                31 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[3] = req->addr >> 8;
req                32 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[4] = req->addr & 0xff;
req                33 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[5] = req->mbox;
req                34 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[6] = req->addr_len;
req                35 drivers/media/usb/dvb-usb-v2/af9015.c 	state->buf[7] = req->data_len;
req                37 drivers/media/usb/dvb-usb-v2/af9015.c 	switch (req->cmd) {
req                51 drivers/media/usb/dvb-usb-v2/af9015.c 		if (((req->addr & 0xff00) == 0xff00) ||
req                52 drivers/media/usb/dvb-usb-v2/af9015.c 		    ((req->addr & 0xff00) == 0xae00))
req                60 drivers/media/usb/dvb-usb-v2/af9015.c 		dev_err(&intf->dev, "unknown cmd %d\n", req->cmd);
req                66 drivers/media/usb/dvb-usb-v2/af9015.c 	if ((write && (req->data_len > BUF_LEN - REQ_HDR_LEN)) ||
req                67 drivers/media/usb/dvb-usb-v2/af9015.c 	    (!write && (req->data_len > BUF_LEN - ACK_HDR_LEN))) {
req                69 drivers/media/usb/dvb-usb-v2/af9015.c 			req->cmd, req->data_len);
req                81 drivers/media/usb/dvb-usb-v2/af9015.c 		wlen += req->data_len;
req                82 drivers/media/usb/dvb-usb-v2/af9015.c 		memcpy(&state->buf[REQ_HDR_LEN], req->data, req->data_len);
req                84 drivers/media/usb/dvb-usb-v2/af9015.c 		rlen += req->data_len;
req                88 drivers/media/usb/dvb-usb-v2/af9015.c 	if (req->cmd == DOWNLOAD_FIRMWARE || req->cmd == RECONNECT_USB)
req               105 drivers/media/usb/dvb-usb-v2/af9015.c 		memcpy(req->data, &state->buf[ACK_HDR_LEN], req->data_len);
req               116 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {WRITE_I2C, addr, reg, 1, 1, 1, &val};
req               120 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr_len = 3;
req               122 drivers/media/usb/dvb-usb-v2/af9015.c 	return af9015_ctrl_msg(d, &req);
req               129 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {READ_I2C, addr, reg, 0, 1, 1, val};
req               133 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr_len = 3;
req               135 drivers/media/usb/dvb-usb-v2/af9015.c 	return af9015_ctrl_msg(d, &req);
req               147 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req;
req               198 drivers/media/usb/dvb-usb-v2/af9015.c 			req.cmd = WRITE_MEMORY;
req               200 drivers/media/usb/dvb-usb-v2/af9015.c 			req.cmd = WRITE_I2C;
req               201 drivers/media/usb/dvb-usb-v2/af9015.c 		req.i2c_addr = msg[0].addr;
req               202 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = addr;
req               203 drivers/media/usb/dvb-usb-v2/af9015.c 		req.mbox = mbox;
req               204 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr_len = addr_len;
req               205 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data_len = msg[0].len - addr_len;
req               206 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data = &msg[0].buf[addr_len];
req               207 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               216 drivers/media/usb/dvb-usb-v2/af9015.c 			req.cmd = READ_MEMORY;
req               218 drivers/media/usb/dvb-usb-v2/af9015.c 			req.cmd = READ_I2C;
req               219 drivers/media/usb/dvb-usb-v2/af9015.c 		req.i2c_addr = msg[0].addr;
req               220 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = addr;
req               221 drivers/media/usb/dvb-usb-v2/af9015.c 		req.mbox = mbox;
req               222 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr_len = addr_len;
req               223 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data_len = msg[1].len;
req               224 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data = &msg[1].buf[0];
req               225 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               236 drivers/media/usb/dvb-usb-v2/af9015.c 		req.cmd = READ_I2C;
req               237 drivers/media/usb/dvb-usb-v2/af9015.c 		req.i2c_addr = msg[0].addr;
req               238 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = addr;
req               239 drivers/media/usb/dvb-usb-v2/af9015.c 		req.mbox = mbox;
req               240 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr_len = addr_len;
req               241 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data_len = msg[0].len;
req               242 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data = &msg[0].buf[0];
req               243 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               272 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {GET_CONFIG, 0, 0, 0, 0, 1, &reply};
req               274 drivers/media/usb/dvb-usb-v2/af9015.c 	ret = af9015_ctrl_msg(d, &req);
req               294 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {DOWNLOAD_FIRMWARE, 0, 0, 0, 0, 0, NULL};
req               308 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data_len = min(LEN_MAX, rem);
req               309 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data = (u8 *)&firmware->data[firmware->size - rem];
req               310 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = 0x5100 + firmware->size - rem;
req               311 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               319 drivers/media/usb/dvb-usb-v2/af9015.c 	req.cmd = BOOT;
req               320 drivers/media/usb/dvb-usb-v2/af9015.c 	req.data_len = 0;
req               321 drivers/media/usb/dvb-usb-v2/af9015.c 	ret = af9015_ctrl_msg(d, &req);
req               344 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, NULL};
req               348 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = i;
req               349 drivers/media/usb/dvb-usb-v2/af9015.c 		req.data = &buf[i];
req               350 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               377 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, &val};
req               382 drivers/media/usb/dvb-usb-v2/af9015.c 	req.addr = AF9015_EEPROM_IR_MODE;
req               385 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               400 drivers/media/usb/dvb-usb-v2/af9015.c 	req.addr = AF9015_EEPROM_TS_MODE;
req               401 drivers/media/usb/dvb-usb-v2/af9015.c 	ret = af9015_ctrl_msg(d, &req);
req               412 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = AF9015_EEPROM_DEMOD2_I2C;
req               413 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               424 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = AF9015_EEPROM_XTAL_TYPE1 + offset;
req               425 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               446 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = AF9015_EEPROM_IF1H + offset;
req               447 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               453 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = AF9015_EEPROM_IF1L + offset;
req               454 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               464 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = AF9015_EEPROM_MT2060_IF1H  + offset;
req               465 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               469 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr = AF9015_EEPROM_MT2060_IF1L + offset;
req               470 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               478 drivers/media/usb/dvb-usb-v2/af9015.c 		req.addr =  AF9015_EEPROM_TUNER_ID1 + offset;
req               479 drivers/media/usb/dvb-usb-v2/af9015.c 		ret = af9015_ctrl_msg(d, &req);
req               769 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {COPY_FIRMWARE, 0, 0x5100, 0, 0, 4, firmware_info};
req               794 drivers/media/usb/dvb-usb-v2/af9015.c 	ret = af9015_ctrl_msg(d, &req);
req              1302 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {WRITE_MEMORY, 0, reg, 0, 0, len, val};
req              1304 drivers/media/usb/dvb-usb-v2/af9015.c 	ret = af9015_ctrl_msg(d, &req);
req              1323 drivers/media/usb/dvb-usb-v2/af9015.c 	struct req_t req = {READ_MEMORY, 0, reg, 0, 0, len, val};
req              1325 drivers/media/usb/dvb-usb-v2/af9015.c 	ret = af9015_ctrl_msg(d, &req);
req                32 drivers/media/usb/dvb-usb-v2/af9035.c static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
req                46 drivers/media/usb/dvb-usb-v2/af9035.c 	if (req->wlen > (BUF_LEN - REQ_HDR_LEN - CHECKSUM_LEN) ||
req                47 drivers/media/usb/dvb-usb-v2/af9035.c 			req->rlen > (BUF_LEN - ACK_HDR_LEN - CHECKSUM_LEN)) {
req                49 drivers/media/usb/dvb-usb-v2/af9035.c 			req->wlen, req->rlen);
req                54 drivers/media/usb/dvb-usb-v2/af9035.c 	state->buf[0] = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN - 1;
req                55 drivers/media/usb/dvb-usb-v2/af9035.c 	state->buf[1] = req->mbox;
req                56 drivers/media/usb/dvb-usb-v2/af9035.c 	state->buf[2] = req->cmd;
req                58 drivers/media/usb/dvb-usb-v2/af9035.c 	memcpy(&state->buf[REQ_HDR_LEN], req->wbuf, req->wlen);
req                60 drivers/media/usb/dvb-usb-v2/af9035.c 	wlen = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN;
req                61 drivers/media/usb/dvb-usb-v2/af9035.c 	rlen = ACK_HDR_LEN + req->rlen + CHECKSUM_LEN;
req                69 drivers/media/usb/dvb-usb-v2/af9035.c 	if (req->cmd == CMD_FW_DL)
req                78 drivers/media/usb/dvb-usb-v2/af9035.c 	if (req->cmd == CMD_FW_DL)
req                86 drivers/media/usb/dvb-usb-v2/af9035.c 			req->cmd, tmp_checksum, checksum);
req                94 drivers/media/usb/dvb-usb-v2/af9035.c 		if (req->cmd == CMD_IR_GET || state->buf[2] == 1) {
req               100 drivers/media/usb/dvb-usb-v2/af9035.c 			req->cmd, state->buf[2]);
req               106 drivers/media/usb/dvb-usb-v2/af9035.c 	if (req->rlen)
req               107 drivers/media/usb/dvb-usb-v2/af9035.c 		memcpy(req->rbuf, &state->buf[ACK_HDR_LEN], req->rlen);
req               119 drivers/media/usb/dvb-usb-v2/af9035.c 	struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
req               134 drivers/media/usb/dvb-usb-v2/af9035.c 	return af9035_ctrl_msg(d, &req);
req               142 drivers/media/usb/dvb-usb-v2/af9035.c 	struct usb_req req = { CMD_MEM_RD, mbox, sizeof(wbuf), wbuf, len, val };
req               144 drivers/media/usb/dvb-usb-v2/af9035.c 	return af9035_ctrl_msg(d, &req);
req               339 drivers/media/usb/dvb-usb-v2/af9035.c 			struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
req               343 drivers/media/usb/dvb-usb-v2/af9035.c 				req.cmd = CMD_GENERIC_I2C_RD;
req               344 drivers/media/usb/dvb-usb-v2/af9035.c 				req.wlen = 3 + msg[0].len;
req               346 drivers/media/usb/dvb-usb-v2/af9035.c 			req.mbox |= ((msg[0].addr & 0x80)  >>  3);
req               365 drivers/media/usb/dvb-usb-v2/af9035.c 					req.wlen = 5;
req               375 drivers/media/usb/dvb-usb-v2/af9035.c 			ret = af9035_ctrl_msg(d, &req);
req               397 drivers/media/usb/dvb-usb-v2/af9035.c 			struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
req               401 drivers/media/usb/dvb-usb-v2/af9035.c 				req.cmd = CMD_GENERIC_I2C_WR;
req               402 drivers/media/usb/dvb-usb-v2/af9035.c 				req.wlen = 3 + msg[0].len;
req               405 drivers/media/usb/dvb-usb-v2/af9035.c 			req.mbox |= ((msg[0].addr & 0x80)  >>  3);
req               418 drivers/media/usb/dvb-usb-v2/af9035.c 			ret = af9035_ctrl_msg(d, &req);
req               430 drivers/media/usb/dvb-usb-v2/af9035.c 			struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
req               434 drivers/media/usb/dvb-usb-v2/af9035.c 				req.cmd = CMD_GENERIC_I2C_RD;
req               435 drivers/media/usb/dvb-usb-v2/af9035.c 				req.wlen = 3;
req               437 drivers/media/usb/dvb-usb-v2/af9035.c 			req.mbox |= ((msg[0].addr & 0x80)  >>  3);
req               448 drivers/media/usb/dvb-usb-v2/af9035.c 			ret = af9035_ctrl_msg(d, &req);
req               487 drivers/media/usb/dvb-usb-v2/af9035.c 	struct usb_req req = { CMD_FW_QUERYINFO, 0, sizeof(wbuf), wbuf,
req               571 drivers/media/usb/dvb-usb-v2/af9035.c 	ret = af9035_ctrl_msg(d, &req);
req               595 drivers/media/usb/dvb-usb-v2/af9035.c 	struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
req               633 drivers/media/usb/dvb-usb-v2/af9035.c 		req.cmd = CMD_FW_DL_BEGIN;
req               634 drivers/media/usb/dvb-usb-v2/af9035.c 		ret = af9035_ctrl_msg(d, &req);
req               652 drivers/media/usb/dvb-usb-v2/af9035.c 		req.cmd = CMD_FW_DL_END;
req               653 drivers/media/usb/dvb-usb-v2/af9035.c 		ret = af9035_ctrl_msg(d, &req);
req               728 drivers/media/usb/dvb-usb-v2/af9035.c 	struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
req               791 drivers/media/usb/dvb-usb-v2/af9035.c 	req.cmd = CMD_FW_BOOT;
req               792 drivers/media/usb/dvb-usb-v2/af9035.c 	ret = af9035_ctrl_msg(d, &req);
req              1827 drivers/media/usb/dvb-usb-v2/af9035.c 	struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, buf };
req              1829 drivers/media/usb/dvb-usb-v2/af9035.c 	ret = af9035_ctrl_msg(d, &req);
req               100 drivers/media/usb/dvb-usb-v2/az6007.c static int __az6007_read(struct usb_device *udev, u8 req, u16 value,
req               107 drivers/media/usb/dvb-usb-v2/az6007.c 			      req,
req               117 drivers/media/usb/dvb-usb-v2/az6007.c 		       req, value, index);
req               125 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_read(struct dvb_usb_device *d, u8 req, u16 value,
req               134 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = __az6007_read(d->udev, req, value, index, b, blen);
req               141 drivers/media/usb/dvb-usb-v2/az6007.c static int __az6007_write(struct usb_device *udev, u8 req, u16 value,
req               148 drivers/media/usb/dvb-usb-v2/az6007.c 		       req, value, index);
req               161 drivers/media/usb/dvb-usb-v2/az6007.c 			      req,
req               172 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_write(struct dvb_usb_device *d, u8 req, u16 value,
req               181 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = __az6007_write(d->udev, req, value, index, b, blen);
req               255 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               270 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC1;
req               275 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_read(d, req, value, index, b, blen);
req               297 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               307 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC2;
req               312 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_write(d, req, value1, index, NULL, blen);
req               328 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               343 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC3;
req               348 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_read(d, req, value, index, b, blen);
req               374 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               383 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC4;
req               388 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_write(d, req, value1, index, NULL, blen);
req               404 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               414 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC8;
req               419 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_read(d, req, value, index, b, blen);
req               436 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               443 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC6;
req               448 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_write(d, req, value, index, NULL, blen);
req               455 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC6;
req               460 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_write(d, req, value, index, NULL, blen);
req               492 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               499 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC7;
req               504 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_write(d, req, value, index, NULL, blen);
req               520 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req;
req               531 drivers/media/usb/dvb-usb-v2/az6007.c 	req = 0xC5;
req               536 drivers/media/usb/dvb-usb-v2/az6007.c 	ret = az6007_read(d, req, value, index, b, blen);
req               752 drivers/media/usb/dvb-usb-v2/az6007.c 	u8 req, addr;
req               772 drivers/media/usb/dvb-usb-v2/az6007.c 			req = AZ6007_I2C_RD;
req               777 drivers/media/usb/dvb-usb-v2/az6007.c 			ret = __az6007_read(d->udev, req, value, index,
req               790 drivers/media/usb/dvb-usb-v2/az6007.c 			req = AZ6007_I2C_WR;
req               797 drivers/media/usb/dvb-usb-v2/az6007.c 			ret =  __az6007_write(d->udev, req, value, index,
req               804 drivers/media/usb/dvb-usb-v2/az6007.c 			req = AZ6007_I2C_RD;
req               809 drivers/media/usb/dvb-usb-v2/az6007.c 			ret = __az6007_read(d->udev, req, value, index,
req                12 drivers/media/usb/dvb-usb-v2/ce6230.c static int ce6230_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
req                22 drivers/media/usb/dvb-usb-v2/ce6230.c 	request = req->cmd;
req                23 drivers/media/usb/dvb-usb-v2/ce6230.c 	value = req->value;
req                24 drivers/media/usb/dvb-usb-v2/ce6230.c 	index = req->index;
req                26 drivers/media/usb/dvb-usb-v2/ce6230.c 	switch (req->cmd) {
req                39 drivers/media/usb/dvb-usb-v2/ce6230.c 				KBUILD_MODNAME, req->cmd);
req                44 drivers/media/usb/dvb-usb-v2/ce6230.c 	buf = kmalloc(req->data_len, GFP_KERNEL);
req                52 drivers/media/usb/dvb-usb-v2/ce6230.c 		memcpy(buf, req->data, req->data_len);
req                62 drivers/media/usb/dvb-usb-v2/ce6230.c 			buf, req->data_len, CE6230_USB_TIMEOUT);
req                65 drivers/media/usb/dvb-usb-v2/ce6230.c 			buf, req->data_len);
req                75 drivers/media/usb/dvb-usb-v2/ce6230.c 		memcpy(req->data, buf, req->data_len);
req                90 drivers/media/usb/dvb-usb-v2/ce6230.c 	struct usb_req req;
req                95 drivers/media/usb/dvb-usb-v2/ce6230.c 	memset(&req, 0, sizeof(req));
req               104 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.cmd = DEMOD_READ;
req               105 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.value = msg[i].addr >> 1;
req               106 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.index = msg[i].buf[0];
req               107 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.data_len = msg[i+1].len;
req               108 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.data = &msg[i+1].buf[0];
req               109 drivers/media/usb/dvb-usb-v2/ce6230.c 				ret = ce6230_ctrl_msg(d, &req);
req               120 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.cmd = DEMOD_WRITE;
req               121 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.value = msg[i].addr >> 1;
req               122 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.index = msg[i].buf[0];
req               123 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.data_len = msg[i].len-1;
req               124 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.data = &msg[i].buf[1];
req               125 drivers/media/usb/dvb-usb-v2/ce6230.c 				ret = ce6230_ctrl_msg(d, &req);
req               127 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.cmd = I2C_WRITE;
req               128 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.value = 0x2000 + (msg[i].addr >> 1);
req               129 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.index = 0x0000;
req               130 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.data_len = msg[i].len;
req               131 drivers/media/usb/dvb-usb-v2/ce6230.c 				req.data = &msg[i].buf[0];
req               132 drivers/media/usb/dvb-usb-v2/ce6230.c 				ret = ce6230_ctrl_msg(d, &req);
req                14 drivers/media/usb/dvb-usb-v2/ec168.c static int ec168_ctrl_msg(struct dvb_usb_device *d, struct ec168_req *req)
req                21 drivers/media/usb/dvb-usb-v2/ec168.c 	switch (req->cmd) {
req                27 drivers/media/usb/dvb-usb-v2/ec168.c 		request = req->cmd;
req                31 drivers/media/usb/dvb-usb-v2/ec168.c 		request = req->cmd;
req                51 drivers/media/usb/dvb-usb-v2/ec168.c 				KBUILD_MODNAME, req->cmd);
req                56 drivers/media/usb/dvb-usb-v2/ec168.c 	buf = kmalloc(req->size, GFP_KERNEL);
req                64 drivers/media/usb/dvb-usb-v2/ec168.c 		memcpy(buf, req->data, req->size);
req                73 drivers/media/usb/dvb-usb-v2/ec168.c 	ret = usb_control_msg(d->udev, pipe, request, requesttype, req->value,
req                74 drivers/media/usb/dvb-usb-v2/ec168.c 		req->index, buf, req->size, EC168_USB_TIMEOUT);
req                76 drivers/media/usb/dvb-usb-v2/ec168.c 	dvb_usb_dbg_usb_control_msg(d->udev, request, requesttype, req->value,
req                77 drivers/media/usb/dvb-usb-v2/ec168.c 			req->index, buf, req->size);
req                86 drivers/media/usb/dvb-usb-v2/ec168.c 		memcpy(req->data, buf, req->size);
req               105 drivers/media/usb/dvb-usb-v2/ec168.c 	struct ec168_req req;
req               118 drivers/media/usb/dvb-usb-v2/ec168.c 				req.cmd = READ_DEMOD;
req               119 drivers/media/usb/dvb-usb-v2/ec168.c 				req.value = 0;
req               120 drivers/media/usb/dvb-usb-v2/ec168.c 				req.index = 0xff00 + msg[i].buf[0]; /* reg */
req               121 drivers/media/usb/dvb-usb-v2/ec168.c 				req.size = msg[i+1].len; /* bytes to read */
req               122 drivers/media/usb/dvb-usb-v2/ec168.c 				req.data = &msg[i+1].buf[0];
req               123 drivers/media/usb/dvb-usb-v2/ec168.c 				ret = ec168_ctrl_msg(d, &req);
req               134 drivers/media/usb/dvb-usb-v2/ec168.c 				req.cmd = WRITE_DEMOD;
req               135 drivers/media/usb/dvb-usb-v2/ec168.c 				req.value = msg[i].buf[1]; /* val */
req               136 drivers/media/usb/dvb-usb-v2/ec168.c 				req.index = 0xff00 + msg[i].buf[0]; /* reg */
req               137 drivers/media/usb/dvb-usb-v2/ec168.c 				req.size = 0;
req               138 drivers/media/usb/dvb-usb-v2/ec168.c 				req.data = NULL;
req               139 drivers/media/usb/dvb-usb-v2/ec168.c 				ret = ec168_ctrl_msg(d, &req);
req               142 drivers/media/usb/dvb-usb-v2/ec168.c 				req.cmd = WRITE_I2C;
req               143 drivers/media/usb/dvb-usb-v2/ec168.c 				req.value = msg[i].buf[0]; /* val */
req               144 drivers/media/usb/dvb-usb-v2/ec168.c 				req.index = 0x0100 + msg[i].addr; /* I2C addr */
req               145 drivers/media/usb/dvb-usb-v2/ec168.c 				req.size = msg[i].len-1;
req               146 drivers/media/usb/dvb-usb-v2/ec168.c 				req.data = &msg[i].buf[1];
req               147 drivers/media/usb/dvb-usb-v2/ec168.c 				ret = ec168_ctrl_msg(d, &req);
req               177 drivers/media/usb/dvb-usb-v2/ec168.c 	struct ec168_req req = {GET_CONFIG, 0, 1, sizeof(reply), &reply};
req               180 drivers/media/usb/dvb-usb-v2/ec168.c 	ret = ec168_ctrl_msg(d, &req);
req               201 drivers/media/usb/dvb-usb-v2/ec168.c 	struct ec168_req req = {DOWNLOAD_FIRMWARE, 0, 0, 0, NULL};
req               210 drivers/media/usb/dvb-usb-v2/ec168.c 		req.size = len;
req               211 drivers/media/usb/dvb-usb-v2/ec168.c 		req.data = (u8 *) &fw->data[fw->size - remaining];
req               212 drivers/media/usb/dvb-usb-v2/ec168.c 		req.index = fw->size - remaining;
req               214 drivers/media/usb/dvb-usb-v2/ec168.c 		ret = ec168_ctrl_msg(d, &req);
req               223 drivers/media/usb/dvb-usb-v2/ec168.c 	req.size = 0;
req               226 drivers/media/usb/dvb-usb-v2/ec168.c 	req.cmd = SET_CONFIG;
req               227 drivers/media/usb/dvb-usb-v2/ec168.c 	req.value = 0;
req               228 drivers/media/usb/dvb-usb-v2/ec168.c 	req.index = 0x0001;
req               229 drivers/media/usb/dvb-usb-v2/ec168.c 	ret = ec168_ctrl_msg(d, &req);
req               234 drivers/media/usb/dvb-usb-v2/ec168.c 	req.cmd = GPIO;
req               235 drivers/media/usb/dvb-usb-v2/ec168.c 	req.value = 0;
req               236 drivers/media/usb/dvb-usb-v2/ec168.c 	req.index = 0x0206;
req               237 drivers/media/usb/dvb-usb-v2/ec168.c 	ret = ec168_ctrl_msg(d, &req);
req               242 drivers/media/usb/dvb-usb-v2/ec168.c 	req.cmd = WRITE_I2C;
req               243 drivers/media/usb/dvb-usb-v2/ec168.c 	req.value = 0;
req               244 drivers/media/usb/dvb-usb-v2/ec168.c 	req.index = 0x00c6;
req               245 drivers/media/usb/dvb-usb-v2/ec168.c 	ret = ec168_ctrl_msg(d, &req);
req               301 drivers/media/usb/dvb-usb-v2/ec168.c 	struct ec168_req req = {STREAMING_CTRL, 0x7f01, 0x0202, 0, NULL};
req               305 drivers/media/usb/dvb-usb-v2/ec168.c 		req.index = 0x0102;
req               306 drivers/media/usb/dvb-usb-v2/ec168.c 	return ec168_ctrl_msg(d, &req);
req                23 drivers/media/usb/dvb-usb-v2/gl861.c 	u8 req, type;
req                28 drivers/media/usb/dvb-usb-v2/gl861.c 		req = GL861_REQ_I2C_WRITE;
req                32 drivers/media/usb/dvb-usb-v2/gl861.c 		req = GL861_REQ_I2C_READ;
req                56 drivers/media/usb/dvb-usb-v2/gl861.c 	ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
req                17 drivers/media/usb/dvb-usb-v2/rtl28xxu.c static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
req                26 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	if (req->size > sizeof(dev->buf)) {
req                27 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		dev_err(&d->intf->dev, "too large message %u\n", req->size);
req                32 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	if (req->index & CMD_WR_FLAG) {
req                34 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		memcpy(dev->buf, req->data, req->size);
req                43 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
req                44 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req->index, dev->buf, req->size, 1000);
req                45 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value,
req                46 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req->index, dev->buf, req->size);
req                52 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		memcpy(req->data, dev->buf, req->size);
req                65 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	struct rtl28xxu_req req;
req                68 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_USB_WR;
req                70 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_SYS_WR;
req                72 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_IR_WR;
req                74 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	req.value = reg;
req                75 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	req.size = len;
req                76 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	req.data = val;
req                78 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	return rtl28xxu_ctrl_msg(d, &req);
req                83 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	struct rtl28xxu_req req;
req                86 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_USB_RD;
req                88 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_SYS_RD;
req                90 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_IR_RD;
req                92 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	req.value = reg;
req                93 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	req.size = len;
req                94 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	req.data = val;
req                96 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	return rtl28xxu_ctrl_msg(d, &req);
req               136 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 	struct rtl28xxu_req req;
req               176 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req               177 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.index = CMD_DEMOD_RD | dev->page;
req               178 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.size = msg[1].len;
req               179 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.data = &msg[1].buf[0];
req               180 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				ret = rtl28xxu_ctrl_msg(d, &req);
req               184 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req               185 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.index = CMD_I2C_RD;
req               186 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.size = msg[1].len;
req               187 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.data = &msg[1].buf[0];
req               188 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			ret = rtl28xxu_ctrl_msg(d, &req);
req               191 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.value = (msg[0].addr << 1);
req               192 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.index = CMD_I2C_DA_WR;
req               193 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.size = msg[0].len;
req               194 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.data = msg[0].buf;
req               195 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			ret = rtl28xxu_ctrl_msg(d, &req);
req               199 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.value = (msg[0].addr << 1);
req               200 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.index = CMD_I2C_DA_RD;
req               201 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.size = msg[1].len;
req               202 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.data = msg[1].buf;
req               203 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			ret = rtl28xxu_ctrl_msg(d, &req);
req               217 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.value = (msg[0].buf[0] << 8) |
req               219 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.index = CMD_DEMOD_WR | dev->page;
req               220 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.size = msg[0].len-1;
req               221 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				req.data = &msg[0].buf[1];
req               222 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 				ret = rtl28xxu_ctrl_msg(d, &req);
req               226 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req               227 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.index = CMD_I2C_WR;
req               228 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.size = msg[0].len-1;
req               229 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.data = &msg[0].buf[1];
req               230 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			ret = rtl28xxu_ctrl_msg(d, &req);
req               233 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.value = (msg[0].addr << 1);
req               234 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.index = CMD_I2C_DA_WR;
req               235 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.size = msg[0].len;
req               236 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			req.data = msg[0].buf;
req               237 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 			ret = rtl28xxu_ctrl_msg(d, &req);
req               240 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.value = (msg[0].addr << 1);
req               241 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.index = CMD_I2C_DA_RD;
req               242 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.size = msg[0].len;
req               243 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		req.data = msg[0].buf;
req               244 drivers/media/usb/dvb-usb-v2/rtl28xxu.c 		ret = rtl28xxu_ctrl_msg(d, &req);
req               298 drivers/media/usb/dvb-usb/az6027.c static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req,
req               307 drivers/media/usb/dvb-usb/az6027.c 			      req,
req               321 drivers/media/usb/dvb-usb/az6027.c 	deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index);
req               329 drivers/media/usb/dvb-usb/az6027.c 			     u8 req,
req               337 drivers/media/usb/dvb-usb/az6027.c 	deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index);
req               345 drivers/media/usb/dvb-usb/az6027.c 			      req,
req               366 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               373 drivers/media/usb/dvb-usb/az6027.c 	req = 0xBC;
req               378 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
req               413 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               428 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC1;
req               433 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_in_op(d, req, value, index, b, blen);
req               455 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               465 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC2;
req               470 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(d, req, value1, index, NULL, blen);
req               486 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               501 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC3;
req               506 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_in_op(d, req, value, index, b, blen);
req               532 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               541 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC4;
req               546 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(d, req, value1, index, NULL, blen);
req               562 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               572 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC8;
req               577 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_in_op(d, req, value, index, b, blen);
req               594 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               601 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC6;
req               606 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(d, req, value, index, NULL, blen);
req               613 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC6;
req               618 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(d, req, value, index, NULL, blen);
req               650 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               657 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC7;
req               662 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(d, req, value, index, NULL, blen);
req               678 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               689 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC5;
req               694 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_in_op(d, req, value, index, b, blen);
req               823 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               828 drivers/media/usb/dvb-usb/az6027.c 	req = 0xBC;
req               833 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
req               842 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               848 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC0;
req               853 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
req               857 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC0;
req               863 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
req               869 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC0;
req               874 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
req               885 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               891 drivers/media/usb/dvb-usb/az6027.c 	req = 0xC7;
req               896 drivers/media/usb/dvb-usb/az6027.c 	ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
req               957 drivers/media/usb/dvb-usb/az6027.c 	u8 req;
req               975 drivers/media/usb/dvb-usb/az6027.c 			req = 0xBE;
req               979 drivers/media/usb/dvb-usb/az6027.c 			az6027_usb_out_op(d, req, value, index, data, length);
req               985 drivers/media/usb/dvb-usb/az6027.c 				req = 0xB9;
req               989 drivers/media/usb/dvb-usb/az6027.c 				az6027_usb_in_op(d, req, value, index, data, length);
req               998 drivers/media/usb/dvb-usb/az6027.c 				req = 0xBD;
req              1005 drivers/media/usb/dvb-usb/az6027.c 				az6027_usb_out_op(d, req, value, index, data, length);
req              1012 drivers/media/usb/dvb-usb/az6027.c 				req = 0xB9;
req              1016 drivers/media/usb/dvb-usb/az6027.c 				az6027_usb_in_op(d, req, value, index, data, length);
req              1023 drivers/media/usb/dvb-usb/az6027.c 				req = 0xBD;
req              1032 drivers/media/usb/dvb-usb/az6027.c 				az6027_usb_out_op(d, req, value, index, data, length);
req                29 drivers/media/usb/dvb-usb/gp8psk.c static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
req                44 drivers/media/usb/dvb-usb/gp8psk.c 			req,
req                53 drivers/media/usb/dvb-usb/gp8psk.c 		warn("usb in %d operation failed.", req);
req                60 drivers/media/usb/dvb-usb/gp8psk.c 	deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
req                68 drivers/media/usb/dvb-usb/gp8psk.c static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
req                74 drivers/media/usb/dvb-usb/gp8psk.c 	deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
req                86 drivers/media/usb/dvb-usb/gp8psk.c 			req,
req               255 drivers/media/usb/dvb-usb/gp8psk.c static int gp8psk_fe_in(void *priv, u8 req, u16 value,
req               260 drivers/media/usb/dvb-usb/gp8psk.c 	return gp8psk_usb_in_op(d, req, value, index, b, blen);
req               263 drivers/media/usb/dvb-usb/gp8psk.c static int gp8psk_fe_out(void *priv, u8 req, u16 value,
req               268 drivers/media/usb/dvb-usb/gp8psk.c 	return gp8psk_usb_out_op(d, req, value, index, b, blen);
req                30 drivers/media/usb/dvb-usb/vp702x.c static int vp702x_usb_in_op_unlocked(struct dvb_usb_device *d, u8 req,
req                37 drivers/media/usb/dvb-usb/vp702x.c 		req,
req                49 drivers/media/usb/dvb-usb/vp702x.c 	deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index);
req                55 drivers/media/usb/dvb-usb/vp702x.c int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
req                61 drivers/media/usb/dvb-usb/vp702x.c 	ret = vp702x_usb_in_op_unlocked(d, req, value, index, b, blen);
req                67 drivers/media/usb/dvb-usb/vp702x.c static int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req,
req                71 drivers/media/usb/dvb-usb/vp702x.c 	deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index);
req                76 drivers/media/usb/dvb-usb/vp702x.c 			req,
req                86 drivers/media/usb/dvb-usb/vp702x.c static int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
req                92 drivers/media/usb/dvb-usb/vp702x.c 	ret = vp702x_usb_out_op_unlocked(d, req, value, index, b, blen);
req               112 drivers/media/usb/dvb-usb/vp702x.h extern int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
req                76 drivers/media/usb/em28xx/em28xx-core.c int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
req                90 drivers/media/usb/em28xx/em28xx-core.c 	ret = usb_control_msg(udev, pipe, req,
req                97 drivers/media/usb/em28xx/em28xx-core.c 			      req, 0, 0,
req               111 drivers/media/usb/em28xx/em28xx-core.c 		      req, 0, 0,
req               122 drivers/media/usb/em28xx/em28xx-core.c int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg)
req               127 drivers/media/usb/em28xx/em28xx-core.c 	ret = em28xx_read_reg_req_len(dev, req, reg, &val, 1);
req               144 drivers/media/usb/em28xx/em28xx-core.c int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
req               159 drivers/media/usb/em28xx/em28xx-core.c 	ret = usb_control_msg(udev, pipe, req,
req               168 drivers/media/usb/em28xx/em28xx-core.c 			      req, 0, 0,
req               177 drivers/media/usb/em28xx/em28xx-core.c 		      req, 0, 0,
req               756 drivers/media/usb/em28xx/em28xx.h 	int (*em28xx_read_reg_req_len)(struct em28xx *dev, u8 req, u16 reg,
req               758 drivers/media/usb/em28xx/em28xx.h 	int (*em28xx_write_regs_req)(struct em28xx *dev, u8 req, u16 reg,
req               760 drivers/media/usb/em28xx/em28xx.h 	int (*em28xx_read_reg_req)(struct em28xx *dev, u8 req, u16 reg);
req               803 drivers/media/usb/em28xx/em28xx.h int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
req               805 drivers/media/usb/em28xx/em28xx.h int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg);
req               807 drivers/media/usb/em28xx/em28xx.h int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
req               539 drivers/media/usb/gspca/gl860/gl860.c 		unsigned char pref, u32 req, u16 val, u16 index,
req               549 drivers/media/usb/gspca/gl860/gl860.c 					req, pref, val, index,
req               554 drivers/media/usb/gspca/gl860/gl860.c 					req, pref, val, index, NULL, len, 400);
req               559 drivers/media/usb/gspca/gl860/gl860.c 					req, pref, val, index,
req               565 drivers/media/usb/gspca/gl860/gl860.c 					req, pref, val, index, NULL, len, 400);
req               571 drivers/media/usb/gspca/gl860/gl860.c 		       r, pref, req, val, index, len);
req                85 drivers/media/usb/gspca/gl860/gl860.h 			unsigned char pref, u32 req, u16 val, u16 index,
req              1992 drivers/media/usb/gspca/ov519.c 	int ret, req = 0;
req              2003 drivers/media/usb/gspca/ov519.c 		req = 2;
req              2006 drivers/media/usb/gspca/ov519.c 		req = 0x0a;
req              2010 drivers/media/usb/gspca/ov519.c 			  req, value, index);
req              2013 drivers/media/usb/gspca/ov519.c 			req,
req              2018 drivers/media/usb/gspca/ov519.c 		req = 1;
req              2022 drivers/media/usb/gspca/ov519.c 		  req, index, value);
req              2026 drivers/media/usb/gspca/ov519.c 			req,
req              2044 drivers/media/usb/gspca/ov519.c 	int req;
req              2052 drivers/media/usb/gspca/ov519.c 		req = 3;
req              2055 drivers/media/usb/gspca/ov519.c 		req = 0x0b;
req              2058 drivers/media/usb/gspca/ov519.c 		req = 1;
req              2065 drivers/media/usb/gspca/ov519.c 			req,
req              2072 drivers/media/usb/gspca/ov519.c 			  req, index, ret);
req                62 drivers/media/usb/gspca/se401.c static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value,
req                71 drivers/media/usb/gspca/se401.c 			      usb_sndctrlpipe(gspca_dev->dev, 0), req,
req                77 drivers/media/usb/gspca/se401.c 			       req, value, err);
req                82 drivers/media/usb/gspca/se401.c static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
req                96 drivers/media/usb/gspca/se401.c 			      usb_rcvctrlpipe(gspca_dev->dev, 0), req,
req               102 drivers/media/usb/gspca/se401.c 			       req, err);
req                50 drivers/media/usb/gspca/spca1528.c 			u8 req,
req                63 drivers/media/usb/gspca/spca1528.c 			req,
req                69 drivers/media/usb/gspca/spca1528.c 	gspca_dbg(gspca_dev, D_USBI, "GET %02x 0000 %04x %02x\n", req, index,
req                83 drivers/media/usb/gspca/spca1528.c 			u8 req,
req                92 drivers/media/usb/gspca/spca1528.c 	gspca_dbg(gspca_dev, D_USBO, "SET %02x %04x %04x\n", req, value, index);
req                94 drivers/media/usb/gspca/spca1528.c 			req,
req               105 drivers/media/usb/gspca/spca1528.c 			u8 req,
req               116 drivers/media/usb/gspca/spca1528.c 		  req, value, index, byte);
req               119 drivers/media/usb/gspca/spca1528.c 			req,
req               317 drivers/media/usb/gspca/spca500.c 		     __u16 req, __u16 index, __u16 value)
req               325 drivers/media/usb/gspca/spca500.c 			req,
req               335 drivers/media/usb/gspca/spca500.c 			__u16 req,	/* bRequest */
req               344 drivers/media/usb/gspca/spca500.c 			req,
req              1746 drivers/media/usb/gspca/spca501.c 					__u16 req, __u16 index, __u16 value)
req              1753 drivers/media/usb/gspca/spca501.c 			req,
req              1757 drivers/media/usb/gspca/spca501.c 		  req, index, value);
req               534 drivers/media/usb/gspca/spca505.c 		     u16 req, u16 index, u16 value)
req               541 drivers/media/usb/gspca/spca505.c 			req,
req               545 drivers/media/usb/gspca/spca505.c 		  req, index, value, ret);
req               553 drivers/media/usb/gspca/spca505.c 			u16 req,	/* bRequest */
req               560 drivers/media/usb/gspca/spca505.c 			req,
req                65 drivers/media/usb/gspca/spca506.c 		  __u16 req,
req                71 drivers/media/usb/gspca/spca506.c 			req,
req                79 drivers/media/usb/gspca/spca506.c 		  __u16 req,
req                85 drivers/media/usb/gspca/spca506.c 			req,
req                35 drivers/media/usb/gspca/stv0680.c static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
req                62 drivers/media/usb/gspca/stv0680.c 			      req, req_type,
req                65 drivers/media/usb/gspca/stv0680.c 	if ((ret < 0) && (req != 0x0a))
req                67 drivers/media/usb/gspca/stv0680.c 		       set, req, ret);
req               106 drivers/media/usb/gspca/sunplus.c 	u8 req;
req               235 drivers/media/usb/gspca/sunplus.c 		  u8 req,
req               249 drivers/media/usb/gspca/sunplus.c 			req,
req               268 drivers/media/usb/gspca/sunplus.c 		   u8 req,
req               280 drivers/media/usb/gspca/sunplus.c 			req,
req               293 drivers/media/usb/gspca/sunplus.c 		     u8 req, u16 index, u16 value)
req               302 drivers/media/usb/gspca/sunplus.c 			req,
req               311 drivers/media/usb/gspca/sunplus.c 		  req, index, value);
req               318 drivers/media/usb/gspca/sunplus.c 		reg_w_riv(gspca_dev, data->req, data->idx, data->val);
req               338 drivers/media/usb/gspca/sunplus.c 			     u8 req, u16 idx, u16 val)
req               340 drivers/media/usb/gspca/sunplus.c 	reg_w_riv(gspca_dev, req, idx, val);
req               344 drivers/media/usb/gspca/sunplus.c 	reg_w_riv(gspca_dev, req, idx, val);
req               371 drivers/media/usb/gspca/sunplus.c 			u8 req,
req               376 drivers/media/usb/gspca/sunplus.c 	reg_w_riv(gspca_dev, req, idx, val);
req              2891 drivers/media/usb/gspca/vc032x.c 		  u16 req,
req              2901 drivers/media/usb/gspca/vc032x.c 			req,
req              2917 drivers/media/usb/gspca/vc032x.c 		  u16 req,
req              2921 drivers/media/usb/gspca/vc032x.c 	reg_r_i(gspca_dev, req, index, len);
req              2926 drivers/media/usb/gspca/vc032x.c 			  req, index,
req              2930 drivers/media/usb/gspca/vc032x.c 			  req, index, 3, gspca_dev->usb_buf);
req              2934 drivers/media/usb/gspca/vc032x.c 			    u16 req,
req              2944 drivers/media/usb/gspca/vc032x.c 			req,
req              2954 drivers/media/usb/gspca/vc032x.c 			    u16 req,
req              2960 drivers/media/usb/gspca/vc032x.c 	gspca_dbg(gspca_dev, D_USBO, "SET %02x %04x %04x\n", req, value, index);
req              2961 drivers/media/usb/gspca/vc032x.c 	reg_w_i(gspca_dev, req, value, index);
req               125 drivers/media/usb/gspca/zc3xx.c 	u8	req;
req              5613 drivers/media/usb/gspca/zc3xx.c 	while (action->req) {
req              5614 drivers/media/usb/gspca/zc3xx.c 		switch (action->req) {
req               345 drivers/media/usb/s2255/s2255drv.c static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
req               184 drivers/media/usb/stk1160/stk1160.h int stk1160_write_regs_req(struct stk1160 *dev, u8 req, u16 reg,
req               186 drivers/media/usb/stk1160/stk1160.h int stk1160_read_reg_req_len(struct stk1160 *dev, u8 req, u16 reg,
req                21 drivers/media/usb/tm6000/tm6000-core.c int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
req                49 drivers/media/usb/tm6000/tm6000-core.c 			req_type, req, value&0xff, value>>8, index&0xff,
req                60 drivers/media/usb/tm6000/tm6000-core.c 	ret = usb_control_msg(dev->udev, pipe, req, req_type, value, index,
req                85 drivers/media/usb/tm6000/tm6000-core.c 	if (req == REQ_16_SET_GET_I2C_WR1_RDN && !(req_type & USB_DIR_IN)) {
req               100 drivers/media/usb/tm6000/tm6000-core.c int tm6000_set_reg(struct tm6000_core *dev, u8 req, u16 value, u16 index)
req               104 drivers/media/usb/tm6000/tm6000-core.c 				      req, value, index, NULL, 0);
req               108 drivers/media/usb/tm6000/tm6000-core.c int tm6000_get_reg(struct tm6000_core *dev, u8 req, u16 value, u16 index)
req               113 drivers/media/usb/tm6000/tm6000-core.c 	rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
req               123 drivers/media/usb/tm6000/tm6000-core.c int tm6000_set_reg_mask(struct tm6000_core *dev, u8 req, u16 value,
req               130 drivers/media/usb/tm6000/tm6000-core.c 	rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
req               142 drivers/media/usb/tm6000/tm6000-core.c 				      req, value, new_index, NULL, 0);
req               146 drivers/media/usb/tm6000/tm6000-core.c int tm6000_get_reg16(struct tm6000_core *dev, u8 req, u16 value, u16 index)
req               151 drivers/media/usb/tm6000/tm6000-core.c 	rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
req               160 drivers/media/usb/tm6000/tm6000-core.c int tm6000_get_reg32(struct tm6000_core *dev, u8 req, u16 value, u16 index)
req               165 drivers/media/usb/tm6000/tm6000-core.c 	rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
req               396 drivers/media/usb/tm6000/tm6000-core.c 	u8 req;
req               589 drivers/media/usb/tm6000/tm6000-core.c 		rc = tm6000_set_reg(dev, tab[i].req, tab[i].reg, tab[i].val);
req               593 drivers/media/usb/tm6000/tm6000-core.c 					tab[i].req, tab[i].reg, tab[i].val);
req                16 drivers/media/usb/tm6000/tm6000-stds.c 	unsigned char req;
req               450 drivers/media/usb/tm6000/tm6000-stds.c 	for (i = 0; set[i].req; i++) {
req               451 drivers/media/usb/tm6000/tm6000-stds.c 		rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
req               454 drivers/media/usb/tm6000/tm6000-stds.c 			       rc, set[i].req, set[i].reg, set[i].value);
req               306 drivers/media/usb/tm6000/tm6000.h int tm6000_read_write_usb(struct tm6000_core *dev, u8 reqtype, u8 req,
req               308 drivers/media/usb/tm6000/tm6000.h int tm6000_get_reg(struct tm6000_core *dev, u8 req, u16 value, u16 index);
req               309 drivers/media/usb/tm6000/tm6000.h int tm6000_get_reg16(struct tm6000_core *dev, u8 req, u16 value, u16 index);
req               310 drivers/media/usb/tm6000/tm6000.h int tm6000_get_reg32(struct tm6000_core *dev, u8 req, u16 value, u16 index);
req               311 drivers/media/usb/tm6000/tm6000.h int tm6000_set_reg(struct tm6000_core *dev, u8 req, u16 value, u16 index);
req               312 drivers/media/usb/tm6000/tm6000.h int tm6000_set_reg_mask(struct tm6000_core *dev, u8 req, u16 value,
req              2011 drivers/media/v4l2-core/v4l2-ctrls.c 	ref->req = ref;
req              2019 drivers/media/v4l2-core/v4l2-ctrls.c 	if (ref->req)
req              2020 drivers/media/v4l2-core/v4l2-ctrls.c 		ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
req              2157 drivers/media/v4l2-core/v4l2-ctrls.c 	if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
req              2158 drivers/media/v4l2-core/v4l2-ctrls.c 		struct v4l2_ctrl_handler *req, *next_req;
req              2160 drivers/media/v4l2-core/v4l2-ctrls.c 		list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
req              2161 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_object_unbind(&req->req_obj);
req              2162 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_object_put(&req->req_obj);
req              3162 drivers/media/v4l2-core/v4l2-ctrls.c 		if (ref_ctrl->req)
req              3173 drivers/media/v4l2-core/v4l2-ctrls.c 		ref_ctrl->req = ref_ctrl_prev->req;
req              3212 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
req              3217 drivers/media/v4l2-core/v4l2-ctrls.c 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
req              3218 drivers/media/v4l2-core/v4l2-ctrls.c 		    req->state != MEDIA_REQUEST_STATE_QUEUED))
req              3221 drivers/media/v4l2-core/v4l2-ctrls.c 	obj = media_request_object_find(req, &req_ops, parent);
req              3233 drivers/media/v4l2-core/v4l2-ctrls.c 	return (ref && ref->req == ref) ? ref->ctrl : NULL;
req              3237 drivers/media/v4l2-core/v4l2-ctrls.c static int v4l2_ctrl_request_bind(struct media_request *req,
req              3246 drivers/media/v4l2-core/v4l2-ctrls.c 		ret = media_request_object_bind(req, &req_ops,
req              3486 drivers/media/v4l2-core/v4l2-ctrls.c 				if (helpers[idx].ref->req)
req              3488 drivers/media/v4l2-core/v4l2-ctrls.c 						helpers[idx].ref->req);
req              3505 drivers/media/v4l2-core/v4l2-ctrls.c 			struct media_request *req, bool set)
req              3511 drivers/media/v4l2-core/v4l2-ctrls.c 	if (IS_ERR(req))
req              3512 drivers/media/v4l2-core/v4l2-ctrls.c 		return ERR_CAST(req);
req              3514 drivers/media/v4l2-core/v4l2-ctrls.c 	if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
req              3517 drivers/media/v4l2-core/v4l2-ctrls.c 	obj = media_request_object_find(req, &req_ops, hdl);
req              3530 drivers/media/v4l2-core/v4l2-ctrls.c 		ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
req              3545 drivers/media/v4l2-core/v4l2-ctrls.c 	struct media_request *req = NULL;
req              3552 drivers/media/v4l2-core/v4l2-ctrls.c 		req = media_request_get_by_fd(mdev, cs->request_fd);
req              3553 drivers/media/v4l2-core/v4l2-ctrls.c 		if (IS_ERR(req))
req              3554 drivers/media/v4l2-core/v4l2-ctrls.c 			return PTR_ERR(req);
req              3556 drivers/media/v4l2-core/v4l2-ctrls.c 		if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
req              3557 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_put(req);
req              3561 drivers/media/v4l2-core/v4l2-ctrls.c 		ret = media_request_lock_for_access(req);
req              3563 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_put(req);
req              3567 drivers/media/v4l2-core/v4l2-ctrls.c 		obj = v4l2_ctrls_find_req_obj(hdl, req, false);
req              3569 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_unlock_for_access(req);
req              3570 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_put(req);
req              3581 drivers/media/v4l2-core/v4l2-ctrls.c 		media_request_unlock_for_access(req);
req              3583 drivers/media/v4l2-core/v4l2-ctrls.c 		media_request_put(req);
req              3880 drivers/media/v4l2-core/v4l2-ctrls.c 						 !hdl->req_obj.req && set, 0);
req              3881 drivers/media/v4l2-core/v4l2-ctrls.c 		if (!ret && hdl->req_obj.req && set) {
req              3914 drivers/media/v4l2-core/v4l2-ctrls.c 	struct media_request *req = NULL;
req              3930 drivers/media/v4l2-core/v4l2-ctrls.c 		req = media_request_get_by_fd(mdev, cs->request_fd);
req              3931 drivers/media/v4l2-core/v4l2-ctrls.c 		if (IS_ERR(req)) {
req              3934 drivers/media/v4l2-core/v4l2-ctrls.c 			return PTR_ERR(req);
req              3937 drivers/media/v4l2-core/v4l2-ctrls.c 		ret = media_request_lock_for_update(req);
req              3941 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_put(req);
req              3945 drivers/media/v4l2-core/v4l2-ctrls.c 		obj = v4l2_ctrls_find_req_obj(hdl, req, set);
req              3951 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_unlock_for_update(req);
req              3952 drivers/media/v4l2-core/v4l2-ctrls.c 			media_request_put(req);
req              3966 drivers/media/v4l2-core/v4l2-ctrls.c 		media_request_unlock_for_update(req);
req              3968 drivers/media/v4l2-core/v4l2-ctrls.c 		media_request_put(req);
req              4088 drivers/media/v4l2-core/v4l2-ctrls.c void v4l2_ctrl_request_complete(struct media_request *req,
req              4095 drivers/media/v4l2-core/v4l2-ctrls.c 	if (!req || !main_hdl)
req              4103 drivers/media/v4l2-core/v4l2-ctrls.c 	obj = media_request_object_find(req, &req_ops, main_hdl);
req              4114 drivers/media/v4l2-core/v4l2-ctrls.c 			ref->req = ref;
req              4125 drivers/media/v4l2-core/v4l2-ctrls.c 		if (ref->req == ref)
req              4129 drivers/media/v4l2-core/v4l2-ctrls.c 		if (ref->req)
req              4130 drivers/media/v4l2-core/v4l2-ctrls.c 			ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
req              4146 drivers/media/v4l2-core/v4l2-ctrls.c int v4l2_ctrl_request_setup(struct media_request *req,
req              4154 drivers/media/v4l2-core/v4l2-ctrls.c 	if (!req || !main_hdl)
req              4157 drivers/media/v4l2-core/v4l2-ctrls.c 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
req              4165 drivers/media/v4l2-core/v4l2-ctrls.c 	obj = media_request_object_find(req, &req_ops, main_hdl);
req              4197 drivers/media/v4l2-core/v4l2-ctrls.c 				if (r->req && r == r->req) {
req               999 drivers/media/v4l2-core/v4l2-mem2mem.c void v4l2_m2m_request_queue(struct media_request *req)
req              1012 drivers/media/v4l2-core/v4l2-mem2mem.c 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
req               440 drivers/media/v4l2-core/videobuf-core.c 		 struct v4l2_requestbuffers *req)
req               445 drivers/media/v4l2-core/videobuf-core.c 	if (req->memory != V4L2_MEMORY_MMAP     &&
req               446 drivers/media/v4l2-core/videobuf-core.c 	    req->memory != V4L2_MEMORY_USERPTR  &&
req               447 drivers/media/v4l2-core/videobuf-core.c 	    req->memory != V4L2_MEMORY_OVERLAY) {
req               453 drivers/media/v4l2-core/videobuf-core.c 	if (req->type != q->type) {
req               470 drivers/media/v4l2-core/videobuf-core.c 	if (req->count == 0) {
req               471 drivers/media/v4l2-core/videobuf-core.c 		dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
req               476 drivers/media/v4l2-core/videobuf-core.c 	count = req->count;
req               485 drivers/media/v4l2-core/videobuf-core.c 	retval = __videobuf_mmap_setup(q, count, size, req->memory);
req               491 drivers/media/v4l2-core/videobuf-core.c 	req->count = retval;
req               218 drivers/memstick/core/ms_block.c 		(struct memstick_dev *card, struct memstick_request **req))
req               284 drivers/memstick/core/ms_block.c 	struct memstick_request *req = &msb->card->current_mrq;
req               293 drivers/memstick/core/ms_block.c 		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
req               298 drivers/memstick/core/ms_block.c 	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
req               305 drivers/memstick/core/ms_block.c 	struct memstick_request *req = &msb->card->current_mrq;
req               314 drivers/memstick/core/ms_block.c 		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
req               319 drivers/memstick/core/ms_block.c 	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
req              1873 drivers/memstick/core/ms_block.c 	struct request *req;
req              1887 drivers/memstick/core/ms_block.c 		req = msb->req;
req              1888 drivers/memstick/core/ms_block.c 		if (!req) {
req              1898 drivers/memstick/core/ms_block.c 		blk_rq_map_sg(msb->queue, req, sg);
req              1900 drivers/memstick/core/ms_block.c 		lba = blk_rq_pos(req);
req              1905 drivers/memstick/core/ms_block.c 		if (rq_data_dir(msb->req) == READ)
req              1907 drivers/memstick/core/ms_block.c 				blk_rq_bytes(req), &len);
req              1910 drivers/memstick/core/ms_block.c 				blk_rq_bytes(req), &len);
req              1912 drivers/memstick/core/ms_block.c 		if (len && !blk_update_request(req, BLK_STS_OK, len)) {
req              1913 drivers/memstick/core/ms_block.c 			__blk_mq_end_request(req, BLK_STS_OK);
req              1915 drivers/memstick/core/ms_block.c 			msb->req = NULL;
req              1919 drivers/memstick/core/ms_block.c 		if (error && msb->req) {
req              1923 drivers/memstick/core/ms_block.c 			blk_mq_end_request(req, ret);
req              1925 drivers/memstick/core/ms_block.c 			msb->req = NULL;
req              1929 drivers/memstick/core/ms_block.c 		if (msb->req)
req              2002 drivers/memstick/core/ms_block.c 	struct request *req = bd->rq;
req              2014 drivers/memstick/core/ms_block.c 		blk_mq_start_request(req);
req              2018 drivers/memstick/core/ms_block.c 	if (msb->req) {
req              2023 drivers/memstick/core/ms_block.c 	blk_mq_start_request(req);
req              2024 drivers/memstick/core/ms_block.c 	msb->req = req;
req              2055 drivers/memstick/core/ms_block.c 	if (msb->req) {
req              2056 drivers/memstick/core/ms_block.c 		blk_mq_requeue_request(msb->req, false);
req              2057 drivers/memstick/core/ms_block.c 		msb->req = NULL;
req               154 drivers/memstick/core/ms_block.h 	struct request			*req;
req                59 drivers/memstick/host/jmb38x_ms.c 	struct memstick_request *req;
req               308 drivers/memstick/host/jmb38x_ms.c 	if (host->req->long_data) {
req               309 drivers/memstick/host/jmb38x_ms.c 		length = host->req->sg.length - host->block_pos;
req               310 drivers/memstick/host/jmb38x_ms.c 		off = host->req->sg.offset + host->block_pos;
req               312 drivers/memstick/host/jmb38x_ms.c 		length = host->req->data_len - host->block_pos;
req               319 drivers/memstick/host/jmb38x_ms.c 		if (host->req->long_data) {
req               320 drivers/memstick/host/jmb38x_ms.c 			pg = nth_page(sg_page(&host->req->sg),
req               329 drivers/memstick/host/jmb38x_ms.c 			buf = host->req->data + host->block_pos;
req               330 drivers/memstick/host/jmb38x_ms.c 			p_cnt = host->req->data_len - host->block_pos;
req               333 drivers/memstick/host/jmb38x_ms.c 		if (host->req->data_dir == WRITE)
req               342 drivers/memstick/host/jmb38x_ms.c 		if (host->req->long_data) {
req               354 drivers/memstick/host/jmb38x_ms.c 	if (!length && host->req->data_dir == WRITE) {
req               373 drivers/memstick/host/jmb38x_ms.c 		host->req->error = -ETIME;
req               374 drivers/memstick/host/jmb38x_ms.c 		return host->req->error;
req               387 drivers/memstick/host/jmb38x_ms.c 	cmd = host->req->tpc << 16;
req               390 drivers/memstick/host/jmb38x_ms.c 	if (host->req->data_dir == READ)
req               393 drivers/memstick/host/jmb38x_ms.c 	if (host->req->need_card_int) {
req               403 drivers/memstick/host/jmb38x_ms.c 	if (host->req->long_data) {
req               404 drivers/memstick/host/jmb38x_ms.c 		data_len = host->req->sg.length;
req               406 drivers/memstick/host/jmb38x_ms.c 		data_len = host->req->data_len;
req               418 drivers/memstick/host/jmb38x_ms.c 		if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1,
req               419 drivers/memstick/host/jmb38x_ms.c 				    host->req->data_dir == READ
req               422 drivers/memstick/host/jmb38x_ms.c 			host->req->error = -ENOMEM;
req               423 drivers/memstick/host/jmb38x_ms.c 			return host->req->error;
req               425 drivers/memstick/host/jmb38x_ms.c 		data_len = sg_dma_len(&host->req->sg);
req               426 drivers/memstick/host/jmb38x_ms.c 		writel(sg_dma_address(&host->req->sg),
req               437 drivers/memstick/host/jmb38x_ms.c 			t_val |= host->req->data_dir == READ
req               448 drivers/memstick/host/jmb38x_ms.c 		if (host->req->data_dir == WRITE) {
req               458 drivers/memstick/host/jmb38x_ms.c 	host->req->error = 0;
req               480 drivers/memstick/host/jmb38x_ms.c 	host->req->int_reg = readl(host->addr + STATUS) & 0xff;
req               486 drivers/memstick/host/jmb38x_ms.c 		dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1,
req               487 drivers/memstick/host/jmb38x_ms.c 			     host->req->data_dir == READ
req               491 drivers/memstick/host/jmb38x_ms.c 		if (host->req->data_dir == READ)
req               505 drivers/memstick/host/jmb38x_ms.c 			rc = memstick_next_req(msh, &host->req);
req               509 drivers/memstick/host/jmb38x_ms.c 			rc = memstick_next_req(msh, &host->req);
req               511 drivers/memstick/host/jmb38x_ms.c 				host->req->error = -ETIME;
req               530 drivers/memstick/host/jmb38x_ms.c 	if (host->req) {
req               533 drivers/memstick/host/jmb38x_ms.c 				host->req->error = -EILSEQ;
req               538 drivers/memstick/host/jmb38x_ms.c 				host->req->error = -ETIME;
req               557 drivers/memstick/host/jmb38x_ms.c 					if (host->req->data_dir == READ) {
req               581 drivers/memstick/host/jmb38x_ms.c 	if (host->req
req               584 drivers/memstick/host/jmb38x_ms.c 		|| host->req->error))
req               599 drivers/memstick/host/jmb38x_ms.c 	if (host->req) {
req               600 drivers/memstick/host/jmb38x_ms.c 		host->req->error = -ETIME;
req               614 drivers/memstick/host/jmb38x_ms.c 	if (!host->req) {
req               616 drivers/memstick/host/jmb38x_ms.c 			rc = memstick_next_req(msh, &host->req);
req              1006 drivers/memstick/host/jmb38x_ms.c 		if (host->req) {
req              1007 drivers/memstick/host/jmb38x_ms.c 			host->req->error = -ETIME;
req               236 drivers/memstick/host/r592.c 	r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg));
req               280 drivers/memstick/host/r592.c 	if (!dev->dma_capable || !dev->req->long_data)
req               283 drivers/memstick/host/r592.c 	len = dev->req->sg.length;
req               284 drivers/memstick/host/r592.c 	is_write = dev->req->data_dir == WRITE;
req               295 drivers/memstick/host/r592.c 	sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
req               298 drivers/memstick/host/r592.c 	if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
req               312 drivers/memstick/host/r592.c 	dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
req               415 drivers/memstick/host/r592.c 	bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
req               420 drivers/memstick/host/r592.c 	if (!dev->req->long_data) {
req               422 drivers/memstick/host/r592.c 			r592_write_fifo_pio(dev, dev->req->data,
req               423 drivers/memstick/host/r592.c 							dev->req->data_len);
req               426 drivers/memstick/host/r592.c 			r592_read_fifo_pio(dev, dev->req->data,
req               427 drivers/memstick/host/r592.c 							dev->req->data_len);
req               432 drivers/memstick/host/r592.c 	sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC |
req               459 drivers/memstick/host/r592.c 	if (!dev->req) {
req               464 drivers/memstick/host/r592.c 	is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
req               465 drivers/memstick/host/r592.c 	len = dev->req->long_data ?
req               466 drivers/memstick/host/r592.c 		dev->req->sg.length : dev->req->data_len;
req               482 drivers/memstick/host/r592.c 			memstick_debug_get_tpc_name(dev->req->tpc), len);
req               507 drivers/memstick/host/r592.c 		(dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) |
req               514 drivers/memstick/host/r592.c 	if (dev->req->need_card_int)
req               539 drivers/memstick/host/r592.c 	if (dev->parallel_mode && dev->req->need_card_int) {
req               541 drivers/memstick/host/r592.c 		dev->req->int_reg = 0;
req               545 drivers/memstick/host/r592.c 			dev->req->int_reg |= MEMSTICK_INT_CMDNAK;
req               547 drivers/memstick/host/r592.c 			dev->req->int_reg |= MEMSTICK_INT_BREQ;
req               549 drivers/memstick/host/r592.c 			dev->req->int_reg |= MEMSTICK_INT_ERR;
req               551 drivers/memstick/host/r592.c 			dev->req->int_reg |= MEMSTICK_INT_CED;
req               557 drivers/memstick/host/r592.c 	dev->req->error = error;
req               572 drivers/memstick/host/r592.c 		error = memstick_next_req(dev->host, &dev->req);
req               715 drivers/memstick/host/r592.c 	if (dev->req)
req               830 drivers/memstick/host/r592.c 	while (!error && dev->req) {
req               831 drivers/memstick/host/r592.c 		dev->req->error = -ETIME;
req               832 drivers/memstick/host/r592.c 		error = memstick_next_req(dev->host, &dev->req);
req               131 drivers/memstick/host/r592.h 	struct memstick_request *req;		/* current request */
req                22 drivers/memstick/host/rtsx_pci_ms.c 	struct memstick_request	*req;
req               340 drivers/memstick/host/rtsx_pci_ms.c 	struct memstick_request *req = host->req;
req               346 drivers/memstick/host/rtsx_pci_ms.c 	if (req->need_card_int) {
req               351 drivers/memstick/host/rtsx_pci_ms.c 	if (req->long_data) {
req               352 drivers/memstick/host/rtsx_pci_ms.c 		err = ms_transfer_data(host, req->data_dir,
req               353 drivers/memstick/host/rtsx_pci_ms.c 				req->tpc, cfg, &(req->sg));
req               355 drivers/memstick/host/rtsx_pci_ms.c 		if (req->data_dir == READ) {
req               356 drivers/memstick/host/rtsx_pci_ms.c 			err = ms_read_bytes(host, req->tpc, cfg,
req               357 drivers/memstick/host/rtsx_pci_ms.c 					req->data_len, req->data, &int_reg);
req               359 drivers/memstick/host/rtsx_pci_ms.c 			err = ms_write_bytes(host, req->tpc, cfg,
req               360 drivers/memstick/host/rtsx_pci_ms.c 					req->data_len, req->data, &int_reg);
req               366 drivers/memstick/host/rtsx_pci_ms.c 	if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) {
req               373 drivers/memstick/host/rtsx_pci_ms.c 	if (req->need_card_int) {
req               377 drivers/memstick/host/rtsx_pci_ms.c 			req->int_reg |= MEMSTICK_INT_CMDNAK;
req               379 drivers/memstick/host/rtsx_pci_ms.c 			req->int_reg |= MEMSTICK_INT_BREQ;
req               381 drivers/memstick/host/rtsx_pci_ms.c 			req->int_reg |= MEMSTICK_INT_ERR;
req               383 drivers/memstick/host/rtsx_pci_ms.c 			req->int_reg |= MEMSTICK_INT_CED;
req               407 drivers/memstick/host/rtsx_pci_ms.c 	if (!host->req) {
req               409 drivers/memstick/host/rtsx_pci_ms.c 			rc = memstick_next_req(msh, &host->req);
req               413 drivers/memstick/host/rtsx_pci_ms.c 				host->req->error = rtsx_pci_ms_issue_cmd(host);
req               595 drivers/memstick/host/rtsx_pci_ms.c 	if (host->req) {
req               602 drivers/memstick/host/rtsx_pci_ms.c 		host->req->error = -ENOMEDIUM;
req               604 drivers/memstick/host/rtsx_pci_ms.c 			rc = memstick_next_req(msh, &host->req);
req               606 drivers/memstick/host/rtsx_pci_ms.c 				host->req->error = -ENOMEDIUM;
req                28 drivers/memstick/host/rtsx_usb_ms.c 	struct memstick_request	*req;
req               458 drivers/memstick/host/rtsx_usb_ms.c 	struct memstick_request *req = host->req;
req               464 drivers/memstick/host/rtsx_usb_ms.c 	if (req->need_card_int) {
req               469 drivers/memstick/host/rtsx_usb_ms.c 	if (req->long_data) {
req               470 drivers/memstick/host/rtsx_usb_ms.c 		err = ms_transfer_data(host, req->data_dir,
req               471 drivers/memstick/host/rtsx_usb_ms.c 				req->tpc, cfg, &(req->sg));
req               473 drivers/memstick/host/rtsx_usb_ms.c 		if (req->data_dir == READ)
req               474 drivers/memstick/host/rtsx_usb_ms.c 			err = ms_read_bytes(host, req->tpc, cfg,
req               475 drivers/memstick/host/rtsx_usb_ms.c 					req->data_len, req->data, &int_reg);
req               477 drivers/memstick/host/rtsx_usb_ms.c 			err = ms_write_bytes(host, req->tpc, cfg,
req               478 drivers/memstick/host/rtsx_usb_ms.c 					req->data_len, req->data, &int_reg);
req               483 drivers/memstick/host/rtsx_usb_ms.c 	if (req->need_card_int) {
req               486 drivers/memstick/host/rtsx_usb_ms.c 					NO_WAIT_INT, 1, &req->int_reg, NULL);
req               492 drivers/memstick/host/rtsx_usb_ms.c 				req->int_reg |= MEMSTICK_INT_CMDNAK;
req               494 drivers/memstick/host/rtsx_usb_ms.c 				req->int_reg |= MEMSTICK_INT_BREQ;
req               496 drivers/memstick/host/rtsx_usb_ms.c 				req->int_reg |= MEMSTICK_INT_ERR;
req               498 drivers/memstick/host/rtsx_usb_ms.c 				req->int_reg |= MEMSTICK_INT_CED;
req               500 drivers/memstick/host/rtsx_usb_ms.c 		dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", req->int_reg);
req               514 drivers/memstick/host/rtsx_usb_ms.c 	if (!host->req) {
req               517 drivers/memstick/host/rtsx_usb_ms.c 			rc = memstick_next_req(msh, &host->req);
req               525 drivers/memstick/host/rtsx_usb_ms.c 					host->req->error = -EIO;
req               527 drivers/memstick/host/rtsx_usb_ms.c 					host->req->error =
req               533 drivers/memstick/host/rtsx_usb_ms.c 						host->req->error);
req               818 drivers/memstick/host/rtsx_usb_ms.c 	if (host->req) {
req               822 drivers/memstick/host/rtsx_usb_ms.c 		host->req->error = -ENOMEDIUM;
req               824 drivers/memstick/host/rtsx_usb_ms.c 			err = memstick_next_req(msh, &host->req);
req               826 drivers/memstick/host/rtsx_usb_ms.c 				host->req->error = -ENOMEDIUM;
req                70 drivers/memstick/host/tifm_ms.c 	struct memstick_request *req;
req               190 drivers/memstick/host/tifm_ms.c 	if (host->req->long_data) {
req               191 drivers/memstick/host/tifm_ms.c 		length = host->req->sg.length - host->block_pos;
req               192 drivers/memstick/host/tifm_ms.c 		off = host->req->sg.offset + host->block_pos;
req               194 drivers/memstick/host/tifm_ms.c 		length = host->req->data_len - host->block_pos;
req               203 drivers/memstick/host/tifm_ms.c 		if (host->req->long_data) {
req               204 drivers/memstick/host/tifm_ms.c 			pg = nth_page(sg_page(&host->req->sg),
req               213 drivers/memstick/host/tifm_ms.c 			buf = host->req->data + host->block_pos;
req               214 drivers/memstick/host/tifm_ms.c 			p_cnt = host->req->data_len - host->block_pos;
req               217 drivers/memstick/host/tifm_ms.c 		t_size = host->req->data_dir == WRITE
req               221 drivers/memstick/host/tifm_ms.c 		if (host->req->long_data) {
req               234 drivers/memstick/host/tifm_ms.c 	if (!length && (host->req->data_dir == WRITE)) {
req               265 drivers/memstick/host/tifm_ms.c 	if (host->req->long_data) {
req               266 drivers/memstick/host/tifm_ms.c 		data_len = host->req->sg.length;
req               270 drivers/memstick/host/tifm_ms.c 		data_len = host->req->data_len;
req               280 drivers/memstick/host/tifm_ms.c 		if (1 != tifm_map_sg(sock, &host->req->sg, 1,
req               281 drivers/memstick/host/tifm_ms.c 				     host->req->data_dir == READ
req               284 drivers/memstick/host/tifm_ms.c 			host->req->error = -ENOMEM;
req               285 drivers/memstick/host/tifm_ms.c 			return host->req->error;
req               287 drivers/memstick/host/tifm_ms.c 		data_len = sg_dma_len(&host->req->sg);
req               294 drivers/memstick/host/tifm_ms.c 		if (host->req->data_dir == WRITE)
req               300 drivers/memstick/host/tifm_ms.c 		writel(sg_dma_address(&host->req->sg),
req               314 drivers/memstick/host/tifm_ms.c 	host->req->error = 0;
req               326 drivers/memstick/host/tifm_ms.c 	cmd = (host->req->tpc & 0xf) << 12;
req               342 drivers/memstick/host/tifm_ms.c 	host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff;
req               343 drivers/memstick/host/tifm_ms.c 	host->req->int_reg = (host->req->int_reg & 1)
req               344 drivers/memstick/host/tifm_ms.c 			     | ((host->req->int_reg << 4) & 0xe0);
req               351 drivers/memstick/host/tifm_ms.c 		tifm_unmap_sg(sock, &host->req->sg, 1,
req               352 drivers/memstick/host/tifm_ms.c 			      host->req->data_dir == READ
req               362 drivers/memstick/host/tifm_ms.c 		rc = memstick_next_req(msh, &host->req);
req               368 drivers/memstick/host/tifm_ms.c 	if (!host->req->error) {
req               373 drivers/memstick/host/tifm_ms.c 		if (host->req->need_card_int
req               395 drivers/memstick/host/tifm_ms.c 	if (host->req) {
req               429 drivers/memstick/host/tifm_ms.c 	if (host->req) {
req               431 drivers/memstick/host/tifm_ms.c 			host->req->error = -ETIME;
req               433 drivers/memstick/host/tifm_ms.c 			host->req->error = -EILSEQ;
req               464 drivers/memstick/host/tifm_ms.c 	if (!host->req) {
req               467 drivers/memstick/host/tifm_ms.c 				rc = memstick_next_req(msh, &host->req);
req               469 drivers/memstick/host/tifm_ms.c 					host->req->error = -ETIME;
req               476 drivers/memstick/host/tifm_ms.c 			rc = memstick_next_req(msh, &host->req);
req               545 drivers/memstick/host/tifm_ms.c 	       dev_name(&host->dev->dev), host->req ? host->req->tpc : 0,
req               602 drivers/memstick/host/tifm_ms.c 	if (host->req) {
req               608 drivers/memstick/host/tifm_ms.c 			tifm_unmap_sg(sock, &host->req->sg, 1,
req               609 drivers/memstick/host/tifm_ms.c 				      host->req->data_dir == READ
req               612 drivers/memstick/host/tifm_ms.c 		host->req->error = -ETIME;
req               615 drivers/memstick/host/tifm_ms.c 			rc = memstick_next_req(msh, &host->req);
req               617 drivers/memstick/host/tifm_ms.c 				host->req->error = -ETIME;
req               160 drivers/message/fusion/mptbase.c static int	mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
req               163 drivers/message/fusion/mptbase.c 			u32 *req, int replyBytes, u16 *u16reply, int maxwait,
req               628 drivers/message/fusion/mptbase.c mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
req              1184 drivers/message/fusion/mptbase.c mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
req              1200 drivers/message/fusion/mptbase.c 	ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
req              1202 drivers/message/fusion/mptbase.c 		MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
req              1233 drivers/message/fusion/mptbase.c 	req_as_bytes = (u8 *) req;
req              4660 drivers/message/fusion/mptbase.c mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
req              4708 drivers/message/fusion/mptbase.c 		u8	*req_as_bytes = (u8 *) req;
req              4725 drivers/message/fusion/mptbase.c 		dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handshake request frame (@%p) header\n", ioc->name, req));
req              4726 drivers/message/fusion/mptbase.c 		DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)req);
req               797 drivers/message/fusion/mptbase.h typedef int (*MPT_CALLBACK)(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
req               930 drivers/message/fusion/mptbase.h extern int	 mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
req               201 drivers/message/fusion/mptctl.c mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
req               207 drivers/message/fusion/mptctl.c 	if (!req)
req               211 drivers/message/fusion/mptctl.c 	    "(0x%02X), req=%p, reply=%p\n", ioc->name,  req->u.hdr.Function,
req               212 drivers/message/fusion/mptctl.c 	    req, reply));
req               218 drivers/message/fusion/mptctl.c 	if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
req               236 drivers/message/fusion/mptctl.c 	if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
req               237 drivers/message/fusion/mptctl.c 		(req->u.hdr.Function ==
req               251 drivers/message/fusion/mptctl.c 			sz = req->u.scsireq.SenseBufferLength;
req               253 drivers/message/fusion/mptctl.c 			    le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
req               265 drivers/message/fusion/mptctl.c 		if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
req              2061 drivers/message/fusion/mptsas.c static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
req              2082 drivers/message/fusion/mptsas.c 	SasIoUnitControlRequest_t *req;
req              2107 drivers/message/fusion/mptsas.c 	req = (SasIoUnitControlRequest_t *)mf;
req              2108 drivers/message/fusion/mptsas.c 	memset(req, 0, sizeof(SasIoUnitControlRequest_t));
req              2109 drivers/message/fusion/mptsas.c 	req->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
req              2110 drivers/message/fusion/mptsas.c 	req->MsgContext = hdr->MsgContext;
req              2111 drivers/message/fusion/mptsas.c 	req->Operation = hard_reset ?
req              2113 drivers/message/fusion/mptsas.c 	req->PhyNum = phy->identify.phy_identifier;
req               106 drivers/message/fusion/mptscsih.c 		MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
req              2623 drivers/message/fusion/mptscsih.c mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
req              2639 drivers/message/fusion/mptscsih.c 	pReq = (SCSIIORequest_t *) req;
req              2641 drivers/message/fusion/mptscsih.c 	    mptscsih_get_completion_code(ioc, req, reply);
req              2649 drivers/message/fusion/mptscsih.c 		req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
req              2673 drivers/message/fusion/mptscsih.c mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
req              2724 drivers/message/fusion/mptscsih.c 			if (req->u.scsireq.CDB[0] == INQUIRY)
req               364 drivers/mfd/db8500-prcmu.c 	} req;
req               823 drivers/mfd/db8500-prcmu.c 	dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
req               826 drivers/mfd/db8500-prcmu.c 	abb_events = mb0_transfer.req.abb_events;
req               858 drivers/mfd/db8500-prcmu.c 	mb0_transfer.req.dbb_wakeups = bits;
req               870 drivers/mfd/db8500-prcmu.c 	mb0_transfer.req.abb_events = abb_events;
req              2434 drivers/mfd/db8500-prcmu.c 		ev &= mb0_transfer.req.dbb_irqs;
req              2586 drivers/mfd/db8500-prcmu.c 	mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->hwirq];
req              2600 drivers/mfd/db8500-prcmu.c 	mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->hwirq];
req               269 drivers/mfd/ezx-pcap.c 	struct pcap_adc_request *req;
req               274 drivers/mfd/ezx-pcap.c 	req = pcap->adc_queue[pcap->adc_head];
req               276 drivers/mfd/ezx-pcap.c 	if (WARN(!req, "adc irq without pending request\n")) {
req               284 drivers/mfd/ezx-pcap.c 	tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
req               285 drivers/mfd/ezx-pcap.c 	tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
req               296 drivers/mfd/ezx-pcap.c 	req->callback(req->data, res);
req               297 drivers/mfd/ezx-pcap.c 	kfree(req);
req               308 drivers/mfd/ezx-pcap.c 	struct pcap_adc_request *req;
req               312 drivers/mfd/ezx-pcap.c 	req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
req               313 drivers/mfd/ezx-pcap.c 	if (!req)
req               316 drivers/mfd/ezx-pcap.c 	req->bank = bank;
req               317 drivers/mfd/ezx-pcap.c 	req->flags = flags;
req               318 drivers/mfd/ezx-pcap.c 	req->ch[0] = ch[0];
req               319 drivers/mfd/ezx-pcap.c 	req->ch[1] = ch[1];
req               320 drivers/mfd/ezx-pcap.c 	req->callback = callback;
req               321 drivers/mfd/ezx-pcap.c 	req->data = data;
req               326 drivers/mfd/ezx-pcap.c 		kfree(req);
req               329 drivers/mfd/ezx-pcap.c 	pcap->adc_queue[pcap->adc_tail] = req;
req               342 drivers/mfd/ezx-pcap.c 	struct pcap_adc_sync_request *req = param;
req               344 drivers/mfd/ezx-pcap.c 	req->res[0] = res[0];
req               345 drivers/mfd/ezx-pcap.c 	req->res[1] = res[1];
req               346 drivers/mfd/ezx-pcap.c 	complete(&req->completion);
req                81 drivers/mfd/pcf50633-adc.c adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
req                97 drivers/mfd/pcf50633-adc.c 	adc->queue[tail] = req;
req               110 drivers/mfd/pcf50633-adc.c 	struct pcf50633_adc_sync_request *req = param;
req               112 drivers/mfd/pcf50633-adc.c 	req->result = result;
req               113 drivers/mfd/pcf50633-adc.c 	complete(&req->completion);
req               118 drivers/mfd/pcf50633-adc.c 	struct pcf50633_adc_sync_request req;
req               121 drivers/mfd/pcf50633-adc.c 	init_completion(&req.completion);
req               124 drivers/mfd/pcf50633-adc.c 		pcf50633_adc_sync_read_callback, &req);
req               128 drivers/mfd/pcf50633-adc.c 	wait_for_completion(&req.completion);
req               130 drivers/mfd/pcf50633-adc.c 	return req.result;
req               138 drivers/mfd/pcf50633-adc.c 	struct pcf50633_adc_request *req;
req               141 drivers/mfd/pcf50633-adc.c 	req = kmalloc(sizeof(*req), GFP_KERNEL);
req               142 drivers/mfd/pcf50633-adc.c 	if (!req)
req               145 drivers/mfd/pcf50633-adc.c 	req->mux = mux;
req               146 drivers/mfd/pcf50633-adc.c 	req->avg = avg;
req               147 drivers/mfd/pcf50633-adc.c 	req->callback = callback;
req               148 drivers/mfd/pcf50633-adc.c 	req->callback_param = callback_param;
req               150 drivers/mfd/pcf50633-adc.c 	return adc_enqueue_request(pcf, req);
req               172 drivers/mfd/pcf50633-adc.c 	struct pcf50633_adc_request *req;
req               178 drivers/mfd/pcf50633-adc.c 	req = adc->queue[head];
req               179 drivers/mfd/pcf50633-adc.c 	if (WARN_ON(!req)) {
req               193 drivers/mfd/pcf50633-adc.c 	req->callback(pcf, req->callback_param, res);
req               194 drivers/mfd/pcf50633-adc.c 	kfree(req);
req                34 drivers/mfd/wm831x-auxadc.c 	struct wm831x_auxadc_req *req;
req                38 drivers/mfd/wm831x-auxadc.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req                39 drivers/mfd/wm831x-auxadc.c 	if (!req)
req                42 drivers/mfd/wm831x-auxadc.c 	init_completion(&req->done);
req                43 drivers/mfd/wm831x-auxadc.c 	req->input = input;
req                44 drivers/mfd/wm831x-auxadc.c 	req->val = -ETIMEDOUT;
req                49 drivers/mfd/wm831x-auxadc.c 	list_add(&req->list, &wm831x->auxadc_pending);
req                93 drivers/mfd/wm831x-auxadc.c 	wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
req                97 drivers/mfd/wm831x-auxadc.c 	list_del(&req->list);
req                98 drivers/mfd/wm831x-auxadc.c 	ret = req->val;
req               103 drivers/mfd/wm831x-auxadc.c 	kfree(req);
req               111 drivers/mfd/wm831x-auxadc.c 	struct wm831x_auxadc_req *req;
req               141 drivers/mfd/wm831x-auxadc.c 	list_for_each_entry(req, &wm831x->auxadc_pending, list) {
req               142 drivers/mfd/wm831x-auxadc.c 		if (req->input == input) {
req               143 drivers/mfd/wm831x-auxadc.c 			req->val = val;
req               144 drivers/mfd/wm831x-auxadc.c 			complete(&req->done);
req               521 drivers/misc/genwqe/card_base.h 			   struct ddcb_requ *req,
req               524 drivers/misc/genwqe/card_base.h int  __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
req               525 drivers/misc/genwqe/card_base.h int  __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
req               195 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_requ *req;
req               197 drivers/misc/genwqe/card_ddcb.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               198 drivers/misc/genwqe/card_ddcb.c 	if (!req)
req               201 drivers/misc/genwqe/card_ddcb.c 	return &req->cmd;
req               206 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
req               208 drivers/misc/genwqe/card_ddcb.c 	kfree(req);
req               211 drivers/misc/genwqe/card_ddcb.c static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
req               213 drivers/misc/genwqe/card_ddcb.c 	return req->req_state;
req               216 drivers/misc/genwqe/card_ddcb.c static inline void ddcb_requ_set_state(struct ddcb_requ *req,
req               219 drivers/misc/genwqe/card_ddcb.c 	req->req_state = new_state;
req               222 drivers/misc/genwqe/card_ddcb.c static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
req               224 drivers/misc/genwqe/card_ddcb.c 	return req->cmd.ddata_addr != 0x0;
req               241 drivers/misc/genwqe/card_ddcb.c static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
req               243 drivers/misc/genwqe/card_ddcb.c 	return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
req               328 drivers/misc/genwqe/card_ddcb.c static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
req               330 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = req->queue;
req               331 drivers/misc/genwqe/card_ddcb.c 	struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
req               333 drivers/misc/genwqe/card_ddcb.c 	memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
req               336 drivers/misc/genwqe/card_ddcb.c 	req->cmd.vcrc     = be16_to_cpu(pddcb->vcrc_16);
req               337 drivers/misc/genwqe/card_ddcb.c 	req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
req               338 drivers/misc/genwqe/card_ddcb.c 	req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
req               340 drivers/misc/genwqe/card_ddcb.c 	req->cmd.attn     = be16_to_cpu(pddcb->attn_16);
req               341 drivers/misc/genwqe/card_ddcb.c 	req->cmd.progress = be32_to_cpu(pddcb->progress_32);
req               342 drivers/misc/genwqe/card_ddcb.c 	req->cmd.retc     = be16_to_cpu(pddcb->retc_16);
req               344 drivers/misc/genwqe/card_ddcb.c 	if (ddcb_requ_collect_debug_data(req)) {
req               349 drivers/misc/genwqe/card_ddcb.c 		memcpy(&req->debug_data.ddcb_finished, pddcb,
req               350 drivers/misc/genwqe/card_ddcb.c 		       sizeof(req->debug_data.ddcb_finished));
req               351 drivers/misc/genwqe/card_ddcb.c 		memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
req               352 drivers/misc/genwqe/card_ddcb.c 		       sizeof(req->debug_data.ddcb_prev));
req               375 drivers/misc/genwqe/card_ddcb.c 		struct ddcb_requ *req;
req               387 drivers/misc/genwqe/card_ddcb.c 		req = queue->ddcb_req[queue->ddcb_act];
req               388 drivers/misc/genwqe/card_ddcb.c 		if (req == NULL) {
req               417 drivers/misc/genwqe/card_ddcb.c 		copy_ddcb_results(req, queue->ddcb_act);
req               420 drivers/misc/genwqe/card_ddcb.c 		dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
req               427 drivers/misc/genwqe/card_ddcb.c 				   VCRC_LENGTH(req->cmd.asv_length),
req               434 drivers/misc/genwqe/card_ddcb.c 				pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
req               438 drivers/misc/genwqe/card_ddcb.c 		ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
req               474 drivers/misc/genwqe/card_ddcb.c int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
req               481 drivers/misc/genwqe/card_ddcb.c 	if (req == NULL)
req               484 drivers/misc/genwqe/card_ddcb.c 	queue = req->queue;
req               488 drivers/misc/genwqe/card_ddcb.c 	ddcb_no = req->num;
req               493 drivers/misc/genwqe/card_ddcb.c 				ddcb_requ_finished(cd, req),
req               503 drivers/misc/genwqe/card_ddcb.c 		struct ddcb_queue *queue = req->queue;
req               511 drivers/misc/genwqe/card_ddcb.c 		genwqe_check_ddcb_queue(cd, req->queue);
req               512 drivers/misc/genwqe/card_ddcb.c 		if (ddcb_requ_finished(cd, req))
req               517 drivers/misc/genwqe/card_ddcb.c 			__func__, req->num, rc,	ddcb_requ_get_state(req),
req               518 drivers/misc/genwqe/card_ddcb.c 			req);
req               523 drivers/misc/genwqe/card_ddcb.c 		pddcb = &queue->ddcb_vaddr[req->num];
req               526 drivers/misc/genwqe/card_ddcb.c 		print_ddcb_info(cd, req->queue);
req               539 drivers/misc/genwqe/card_ddcb.c 			__func__, req->num, rc, ddcb_requ_get_state(req));
req               547 drivers/misc/genwqe/card_ddcb.c 			__func__, req->num, rc);
req               615 drivers/misc/genwqe/card_ddcb.c int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
req               620 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = req->queue;
req               633 drivers/misc/genwqe/card_ddcb.c 	pddcb = &queue->ddcb_vaddr[req->num];
req               640 drivers/misc/genwqe/card_ddcb.c 		if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
req               669 drivers/misc/genwqe/card_ddcb.c 		copy_ddcb_results(req, req->num); /* for the failing case */
req               674 drivers/misc/genwqe/card_ddcb.c 		copy_ddcb_results(req, req->num);
req               675 drivers/misc/genwqe/card_ddcb.c 		ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
req               677 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_req[req->num] = NULL; /* delete from array */
req               692 drivers/misc/genwqe/card_ddcb.c 		    (queue->ddcb_act == req->num)) {
req               707 drivers/misc/genwqe/card_ddcb.c 	dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
req               712 drivers/misc/genwqe/card_ddcb.c 		__func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
req               715 drivers/misc/genwqe/card_ddcb.c 	print_ddcb_info(cd, req->queue);
req               749 drivers/misc/genwqe/card_ddcb.c int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
req               763 drivers/misc/genwqe/card_ddcb.c 			__func__, req->num);
req               767 drivers/misc/genwqe/card_ddcb.c 	queue = req->queue = &cd->queue;
req               782 drivers/misc/genwqe/card_ddcb.c 	pddcb = get_next_ddcb(cd, queue, &req->num);	/* get ptr and num */
req               804 drivers/misc/genwqe/card_ddcb.c 	if (queue->ddcb_req[req->num] != NULL) {
req               809 drivers/misc/genwqe/card_ddcb.c 			__func__, req->num, req);
req               812 drivers/misc/genwqe/card_ddcb.c 	ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
req               813 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_req[req->num] = req;
req               815 drivers/misc/genwqe/card_ddcb.c 	pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
req               816 drivers/misc/genwqe/card_ddcb.c 	pddcb->cmd = req->cmd.cmd;
req               817 drivers/misc/genwqe/card_ddcb.c 	pddcb->acfunc = req->cmd.acfunc;	/* functional unit */
req               833 drivers/misc/genwqe/card_ddcb.c 	pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
req               834 drivers/misc/genwqe/card_ddcb.c 		      ((req->cmd.asv_length  / 8)));
req               835 drivers/misc/genwqe/card_ddcb.c 	pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
req               852 drivers/misc/genwqe/card_ddcb.c 		       &req->cmd.__asiv[0],	/* source */
req               855 drivers/misc/genwqe/card_ddcb.c 		pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
req               857 drivers/misc/genwqe/card_ddcb.c 			&req->cmd.asiv[0],	/* source */
req               868 drivers/misc/genwqe/card_ddcb.c 			   ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
req               875 drivers/misc/genwqe/card_ddcb.c 	dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
req               878 drivers/misc/genwqe/card_ddcb.c 	if (ddcb_requ_collect_debug_data(req)) {
req               882 drivers/misc/genwqe/card_ddcb.c 		genwqe_init_debug_data(cd, &req->debug_data);
req               883 drivers/misc/genwqe/card_ddcb.c 		memcpy(&req->debug_data.ddcb_before, pddcb,
req               884 drivers/misc/genwqe/card_ddcb.c 		       sizeof(req->debug_data.ddcb_before));
req               887 drivers/misc/genwqe/card_ddcb.c 	enqueue_ddcb(cd, queue, pddcb, req->num);
req               893 drivers/misc/genwqe/card_ddcb.c 	ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
req               912 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
req               924 drivers/misc/genwqe/card_ddcb.c 	rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
req               928 drivers/misc/genwqe/card_ddcb.c 	rc = __genwqe_wait_ddcb(cd, req);
req               932 drivers/misc/genwqe/card_ddcb.c 	if (ddcb_requ_collect_debug_data(req)) {
req               935 drivers/misc/genwqe/card_ddcb.c 				 &req->debug_data,
req               954 drivers/misc/genwqe/card_ddcb.c 	__genwqe_purge_ddcb(cd, req);
req               956 drivers/misc/genwqe/card_ddcb.c 	if (ddcb_requ_collect_debug_data(req)) {
req               959 drivers/misc/genwqe/card_ddcb.c 				 &req->debug_data,
req               541 drivers/misc/genwqe/card_dev.c 		struct genwqe_ddcb_cmd *req;
req               562 drivers/misc/genwqe/card_dev.c 		req = ddcb_requ_alloc();
req               563 drivers/misc/genwqe/card_dev.c 		if (req == NULL) {
req               568 drivers/misc/genwqe/card_dev.c 		req->cmd = SLCMD_MOVE_FLASH;
req               569 drivers/misc/genwqe/card_dev.c 		req->cmdopts = cmdopts;
req               573 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->__asiv[0]  = cpu_to_be64(dma_addr);
req               574 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->__asiv[8]  = cpu_to_be64(tocopy);
req               575 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
req               576 drivers/misc/genwqe/card_dev.c 			*(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
req               577 drivers/misc/genwqe/card_dev.c 			req->__asiv[24]	       = load->uid;
req               578 drivers/misc/genwqe/card_dev.c 			*(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
req               581 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
req               582 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
req               583 drivers/misc/genwqe/card_dev.c 			req->asiv_length = 32; /* bytes included in crc calc */
req               585 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->asiv[0]  = cpu_to_be64(dma_addr);
req               586 drivers/misc/genwqe/card_dev.c 			*(__be32 *)&req->asiv[8]  = cpu_to_be32(tocopy);
req               587 drivers/misc/genwqe/card_dev.c 			*(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
req               588 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
req               589 drivers/misc/genwqe/card_dev.c 			*(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
req               590 drivers/misc/genwqe/card_dev.c 			*(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
req               593 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
req               594 drivers/misc/genwqe/card_dev.c 			*(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
req               597 drivers/misc/genwqe/card_dev.c 			req->ats = 0x4ULL << 44;
req               598 drivers/misc/genwqe/card_dev.c 			req->asiv_length = 40; /* bytes included in crc calc */
req               600 drivers/misc/genwqe/card_dev.c 		req->asv_length  = 8;
req               603 drivers/misc/genwqe/card_dev.c 		*(u64 *)&req->asv[0] = 0ULL;			/* 0x80 */
req               605 drivers/misc/genwqe/card_dev.c 		rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
req               607 drivers/misc/genwqe/card_dev.c 		load->retc = req->retc;
req               608 drivers/misc/genwqe/card_dev.c 		load->attn = req->attn;
req               609 drivers/misc/genwqe/card_dev.c 		load->progress = req->progress;
req               612 drivers/misc/genwqe/card_dev.c 			ddcb_requ_free(req);
req               616 drivers/misc/genwqe/card_dev.c 		if (req->retc != DDCB_RETC_COMPLETE) {
req               618 drivers/misc/genwqe/card_dev.c 			ddcb_requ_free(req);
req               626 drivers/misc/genwqe/card_dev.c 		ddcb_requ_free(req);
req               826 drivers/misc/genwqe/card_dev.c static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
req               833 drivers/misc/genwqe/card_dev.c 		dma_map = &req->dma_mappings[i];
req               839 drivers/misc/genwqe/card_dev.c 		if (req->sgls[i].sgl != NULL)
req               840 drivers/misc/genwqe/card_dev.c 			genwqe_free_sync_sgl(cd, &req->sgls[i]);
req               853 drivers/misc/genwqe/card_dev.c static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
req               858 drivers/misc/genwqe/card_dev.c 	struct genwqe_ddcb_cmd *cmd = &req->cmd;
req               931 drivers/misc/genwqe/card_dev.c 				m = &req->dma_mappings[i];
req               949 drivers/misc/genwqe/card_dev.c 			rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
req               955 drivers/misc/genwqe/card_dev.c 			genwqe_setup_sgl(cd, &req->sgls[i],
req               959 drivers/misc/genwqe/card_dev.c 				cpu_to_be64(req->sgls[i].sgl_dma_addr);
req               971 drivers/misc/genwqe/card_dev.c 	ddcb_cmd_cleanup(cfile, req);
req               988 drivers/misc/genwqe/card_dev.c 	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
req               990 drivers/misc/genwqe/card_dev.c 	rc = ddcb_cmd_fixups(cfile, req);
req               995 drivers/misc/genwqe/card_dev.c 	ddcb_cmd_cleanup(cfile, req);
req                53 drivers/misc/genwqe/genwqe_driver.h void ddcb_requ_free(struct genwqe_ddcb_cmd *req);
req               138 drivers/misc/mei/bus-fixup.c 	struct mkhi_msg *req;
req               145 drivers/misc/mei/bus-fixup.c 	req = (struct mkhi_msg *)buf;
req               146 drivers/misc/mei/bus-fixup.c 	req->hdr.group_id = MKHI_FWCAPS_GROUP_ID;
req               147 drivers/misc/mei/bus-fixup.c 	req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD;
req               149 drivers/misc/mei/bus-fixup.c 	fwcaps = (struct mkhi_fwcaps *)req->data;
req               168 drivers/misc/mei/bus-fixup.c 	struct mkhi_msg *req;
req               174 drivers/misc/mei/bus-fixup.c 	req = (struct mkhi_msg *)buf;
req               175 drivers/misc/mei/bus-fixup.c 	req->hdr.group_id = MKHI_GEN_GROUP_ID;
req               176 drivers/misc/mei/bus-fixup.c 	req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
req               197 drivers/misc/mei/bus-fixup.c 	fwver = (struct mkhi_fw_ver *)req->data;
req              1253 drivers/misc/mei/client.c enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
req              1255 drivers/misc/mei/client.c 	if (req == MEI_HBM_NOTIFICATION_START)
req               297 drivers/misc/mei/hbm.c 	struct hbm_dma_setup_request req;
req               304 drivers/misc/mei/hbm.c 	memset(&req, 0, len);
req               305 drivers/misc/mei/hbm.c 	req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD;
req               310 drivers/misc/mei/hbm.c 		req.dma_dscr[i].addr_hi = upper_32_bits(paddr);
req               311 drivers/misc/mei/hbm.c 		req.dma_dscr[i].addr_lo = lower_32_bits(paddr);
req               312 drivers/misc/mei/hbm.c 		req.dma_dscr[i].size = dev->dr_dscr[i].size;
req               317 drivers/misc/mei/hbm.c 	ret = mei_hbm_write_message(dev, &mei_hdr, &req);
req               439 drivers/misc/mei/hbm.c 			      struct hbm_add_client_request *req)
req               447 drivers/misc/mei/hbm.c 	ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req);
req               454 drivers/misc/mei/hbm.c 	return mei_hbm_add_cl_resp(dev, req->me_addr, status);
req               471 drivers/misc/mei/hbm.c 	struct hbm_notification_request req;
req               476 drivers/misc/mei/hbm.c 	mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
req               478 drivers/misc/mei/hbm.c 	req.start = start;
req               480 drivers/misc/mei/hbm.c 	ret = mei_hbm_write_message(dev, &mei_hdr, &req);
req               630 drivers/misc/mei/hbm.c 	struct hbm_power_gate req;
req               639 drivers/misc/mei/hbm.c 	memset(&req, 0, len);
req               640 drivers/misc/mei/hbm.c 	req.hbm_cmd = pg_cmd;
req               642 drivers/misc/mei/hbm.c 	ret = mei_hbm_write_message(dev, &mei_hdr, &req);
req               659 drivers/misc/mei/hbm.c 	struct hbm_host_stop_request req;
req               664 drivers/misc/mei/hbm.c 	memset(&req, 0, len);
req               665 drivers/misc/mei/hbm.c 	req.hbm_cmd = HOST_STOP_REQ_CMD;
req               666 drivers/misc/mei/hbm.c 	req.reason = DRIVER_STOP_REQUEST;
req               668 drivers/misc/mei/hbm.c 	return mei_hbm_write_message(dev, &mei_hdr, &req);
req               681 drivers/misc/mei/hbm.c 	struct hbm_flow_control req;
req               685 drivers/misc/mei/hbm.c 				&req, sizeof(req));
req               759 drivers/misc/mei/hbm.c 	struct hbm_client_connect_request req;
req               762 drivers/misc/mei/hbm.c 				&req, sizeof(req));
req               812 drivers/misc/mei/hbm.c 	struct hbm_client_connect_request req;
req               815 drivers/misc/mei/hbm.c 				&req, sizeof(req));
req               157 drivers/misc/mei/hw-txe.c static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
req               161 drivers/misc/mei/hw-txe.c 	bool do_req = hw->aliveness != req;
req               164 drivers/misc/mei/hw-txe.c 				hw->aliveness, req);
req               167 drivers/misc/mei/hw-txe.c 		mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
req               292 drivers/misc/mei/hw-txe.c int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
req               294 drivers/misc/mei/hw-txe.c 	if (mei_txe_aliveness_set(dev, req))
req               295 drivers/misc/mei/hw-txe.c 		return mei_txe_aliveness_wait(dev, req);
req                60 drivers/misc/mei/hw-txe.h int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req);
req              1640 drivers/misc/mic/scif/scif_dma.c 	struct scif_rma_req req;
req              1703 drivers/misc/mic/scif/scif_dma.c 		req.out_window = &local_window;
req              1704 drivers/misc/mic/scif/scif_dma.c 		req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK),
req              1706 drivers/misc/mic/scif/scif_dma.c 		req.va_for_temp = addr & PAGE_MASK;
req              1707 drivers/misc/mic/scif/scif_dma.c 		req.prot = (dir == SCIF_LOCAL_TO_REMOTE ?
req              1712 drivers/misc/mic/scif/scif_dma.c 			req.head = &mmn->tc_reg_list;
req              1713 drivers/misc/mic/scif/scif_dma.c 			err = scif_query_tcw(ep, &req);
req              1717 drivers/misc/mic/scif/scif_dma.c 			err = scif_register_temp(epd, req.va_for_temp,
req              1718 drivers/misc/mic/scif/scif_dma.c 						 req.nr_bytes, req.prot,
req              1740 drivers/misc/mic/scif/scif_dma.c 		req.out_window = &local_window;
req              1741 drivers/misc/mic/scif/scif_dma.c 		req.offset = loffset;
req              1746 drivers/misc/mic/scif/scif_dma.c 		req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE;
req              1747 drivers/misc/mic/scif/scif_dma.c 		req.nr_bytes = len;
req              1748 drivers/misc/mic/scif/scif_dma.c 		req.type = SCIF_WINDOW_PARTIAL;
req              1749 drivers/misc/mic/scif/scif_dma.c 		req.head = &ep->rma_info.reg_list;
req              1751 drivers/misc/mic/scif/scif_dma.c 		err = scif_query_window(&req);
req               103 drivers/misc/mic/scif/scif_fd.c 		struct scifioctl_connect req;
req               106 drivers/misc/mic/scif/scif_fd.c 		if (copy_from_user(&req, argp, sizeof(req)))
req               109 drivers/misc/mic/scif/scif_fd.c 		err = __scif_connect(priv, &req.peer, non_block);
req               113 drivers/misc/mic/scif/scif_fd.c 		req.self.node = ep->port.node;
req               114 drivers/misc/mic/scif/scif_fd.c 		req.self.port = ep->port.port;
req               116 drivers/misc/mic/scif/scif_fd.c 		if (copy_to_user(argp, &req, sizeof(req)))
req               298 drivers/misc/mic/scif/scif_fence.c 	struct scif_rma_req req;
req               303 drivers/misc/mic/scif/scif_fence.c 	req.out_window = &window;
req               304 drivers/misc/mic/scif/scif_fence.c 	req.offset = offset;
req               305 drivers/misc/mic/scif/scif_fence.c 	req.nr_bytes = sizeof(u64);
req               306 drivers/misc/mic/scif/scif_fence.c 	req.prot = SCIF_PROT_WRITE;
req               307 drivers/misc/mic/scif/scif_fence.c 	req.type = SCIF_WINDOW_SINGLE;
req               309 drivers/misc/mic/scif/scif_fence.c 		req.head = &ep->rma_info.reg_list;
req               311 drivers/misc/mic/scif/scif_fence.c 		req.head = &ep->rma_info.remote_reg_list;
req               313 drivers/misc/mic/scif/scif_fence.c 	err = scif_query_window(&req);
req                24 drivers/misc/mic/scif/scif_mmap.c 	struct scif_rma_req req;
req                31 drivers/misc/mic/scif/scif_mmap.c 	req.out_window = &window;
req                32 drivers/misc/mic/scif/scif_mmap.c 	req.offset = recv_window->offset;
req                33 drivers/misc/mic/scif/scif_mmap.c 	req.prot = recv_window->prot;
req                34 drivers/misc/mic/scif/scif_mmap.c 	req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
req                35 drivers/misc/mic/scif/scif_mmap.c 	req.type = SCIF_WINDOW_FULL;
req                36 drivers/misc/mic/scif/scif_mmap.c 	req.head = &ep->rma_info.reg_list;
req                41 drivers/misc/mic/scif/scif_mmap.c 	if (scif_query_window(&req)) {
req               216 drivers/misc/mic/scif/scif_mmap.c 	struct scif_rma_req req;
req               235 drivers/misc/mic/scif/scif_mmap.c 	req.out_window = &window;
req               236 drivers/misc/mic/scif/scif_mmap.c 	req.offset = offset;
req               237 drivers/misc/mic/scif/scif_mmap.c 	req.prot = 0;
req               238 drivers/misc/mic/scif/scif_mmap.c 	req.nr_bytes = len;
req               239 drivers/misc/mic/scif/scif_mmap.c 	req.type = SCIF_WINDOW_SINGLE;
req               240 drivers/misc/mic/scif/scif_mmap.c 	req.head = &ep->rma_info.remote_reg_list;
req               244 drivers/misc/mic/scif/scif_mmap.c 	err = scif_query_window(&req);
req               548 drivers/misc/mic/scif/scif_mmap.c 	struct scif_rma_req req;
req               562 drivers/misc/mic/scif/scif_mmap.c 	req.out_window = &window;
req               563 drivers/misc/mic/scif/scif_mmap.c 	req.offset = offset;
req               564 drivers/misc/mic/scif/scif_mmap.c 	req.nr_bytes = vma->vm_end - vma->vm_start;
req               565 drivers/misc/mic/scif/scif_mmap.c 	req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
req               566 drivers/misc/mic/scif/scif_mmap.c 	req.type = SCIF_WINDOW_PARTIAL;
req               567 drivers/misc/mic/scif/scif_mmap.c 	req.head = &ep->rma_info.remote_reg_list;
req               571 drivers/misc/mic/scif/scif_mmap.c 	err = scif_query_window(&req);
req               604 drivers/misc/mic/scif/scif_mmap.c 	struct scif_rma_req req;
req               634 drivers/misc/mic/scif/scif_mmap.c 	req.out_window = &window;
req               635 drivers/misc/mic/scif/scif_mmap.c 	req.offset = start_offset;
req               636 drivers/misc/mic/scif/scif_mmap.c 	req.nr_bytes = vma->vm_end - vma->vm_start;
req               637 drivers/misc/mic/scif/scif_mmap.c 	req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
req               638 drivers/misc/mic/scif/scif_mmap.c 	req.type = SCIF_WINDOW_PARTIAL;
req               639 drivers/misc/mic/scif/scif_mmap.c 	req.head = &ep->rma_info.remote_reg_list;
req               643 drivers/misc/mic/scif/scif_mmap.c 	err = scif_query_window(&req);
req              1176 drivers/misc/mic/scif/scif_rma.c 	struct scif_rma_req req;
req              1184 drivers/misc/mic/scif/scif_rma.c 	req.out_window = &window;
req              1185 drivers/misc/mic/scif/scif_rma.c 	req.offset = recv_window->offset;
req              1186 drivers/misc/mic/scif/scif_rma.c 	req.prot = 0;
req              1187 drivers/misc/mic/scif/scif_rma.c 	req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
req              1188 drivers/misc/mic/scif/scif_rma.c 	req.type = SCIF_WINDOW_FULL;
req              1189 drivers/misc/mic/scif/scif_rma.c 	req.head = &ep->rma_info.remote_reg_list;
req              1194 drivers/misc/mic/scif/scif_rma.c 	if (scif_query_window(&req)) {
req              1697 drivers/misc/mic/scif/scif_rma.c 	struct scif_rma_req req;
req              1722 drivers/misc/mic/scif/scif_rma.c 	req.out_window = &window;
req              1723 drivers/misc/mic/scif/scif_rma.c 	req.offset = offset;
req              1724 drivers/misc/mic/scif/scif_rma.c 	req.prot = 0;
req              1725 drivers/misc/mic/scif/scif_rma.c 	req.nr_bytes = len;
req              1726 drivers/misc/mic/scif/scif_rma.c 	req.type = SCIF_WINDOW_FULL;
req              1727 drivers/misc/mic/scif/scif_rma.c 	req.head = &ep->rma_info.reg_list;
req              1736 drivers/misc/mic/scif/scif_rma.c 	err = scif_query_window(&req);
req                76 drivers/misc/mic/scif/scif_rma_list.c int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
req                78 drivers/misc/mic/scif/scif_rma_list.c 	struct list_head *item, *temp, *head = req->head;
req                80 drivers/misc/mic/scif/scif_rma_list.c 	u64 start_va_window, start_va_req = req->va_for_temp;
req                81 drivers/misc/mic/scif/scif_rma_list.c 	u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
req                83 drivers/misc/mic/scif/scif_rma_list.c 	if (!req->nr_bytes)
req               106 drivers/misc/mic/scif/scif_rma_list.c 		if ((window->prot & req->prot) == req->prot) {
req               109 drivers/misc/mic/scif/scif_rma_list.c 				*req->out_window = window;
req               114 drivers/misc/mic/scif/scif_rma_list.c 				req->nr_bytes +=
req               116 drivers/misc/mic/scif/scif_rma_list.c 				req->va_for_temp = start_va_window;
req               119 drivers/misc/mic/scif/scif_rma_list.c 				req->nr_bytes += end_va_window - end_va_req;
req               135 drivers/misc/mic/scif/scif_rma_list.c int scif_query_window(struct scif_rma_req *req)
req               139 drivers/misc/mic/scif/scif_rma_list.c 	s64 end_offset, offset = req->offset;
req               140 drivers/misc/mic/scif/scif_rma_list.c 	u64 tmp_min, nr_bytes_left = req->nr_bytes;
req               142 drivers/misc/mic/scif/scif_rma_list.c 	if (!req->nr_bytes)
req               145 drivers/misc/mic/scif/scif_rma_list.c 	list_for_each(item, req->head) {
req               155 drivers/misc/mic/scif/scif_rma_list.c 		if ((window->prot & req->prot) != req->prot)
req               157 drivers/misc/mic/scif/scif_rma_list.c 		if (nr_bytes_left == req->nr_bytes)
req               159 drivers/misc/mic/scif/scif_rma_list.c 			*req->out_window = window;
req               169 drivers/misc/mic/scif/scif_rma_list.c 			if (req->type == SCIF_WINDOW_PARTIAL ||
req               170 drivers/misc/mic/scif/scif_rma_list.c 			    req->type == SCIF_WINDOW_SINGLE)
req               179 drivers/misc/mic/scif/scif_rma_list.c 		if (req->type == SCIF_WINDOW_SINGLE)
req               765 drivers/misc/sgi-gru/grufault.c 	struct gru_unload_context_req req;
req               768 drivers/misc/sgi-gru/grufault.c 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
req               771 drivers/misc/sgi-gru/grufault.c 	gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
req               773 drivers/misc/sgi-gru/grufault.c 	if (!req.gseg)
req               776 drivers/misc/sgi-gru/grufault.c 	gts = gru_find_lock_gts(req.gseg);
req               794 drivers/misc/sgi-gru/grufault.c 	struct gru_flush_tlb_req req;
req               798 drivers/misc/sgi-gru/grufault.c 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
req               801 drivers/misc/sgi-gru/grufault.c 	gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
req               802 drivers/misc/sgi-gru/grufault.c 		req.vaddr, req.len);
req               804 drivers/misc/sgi-gru/grufault.c 	gts = gru_find_lock_gts(req.gseg);
req               810 drivers/misc/sgi-gru/grufault.c 	gru_flush_tlb_range(gms, req.vaddr, req.len);
req               821 drivers/misc/sgi-gru/grufault.c 	struct gru_get_gseg_statistics_req req;
req               823 drivers/misc/sgi-gru/grufault.c 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
req               831 drivers/misc/sgi-gru/grufault.c 	gts = gru_find_lock_gts(req.gseg);
req               833 drivers/misc/sgi-gru/grufault.c 		memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
req               836 drivers/misc/sgi-gru/grufault.c 		memset(&req.stats, 0, sizeof(gts->ustats));
req               839 drivers/misc/sgi-gru/grufault.c 	if (copy_to_user((void __user *)arg, &req, sizeof(req)))
req               852 drivers/misc/sgi-gru/grufault.c 	struct gru_set_context_option_req req;
req               856 drivers/misc/sgi-gru/grufault.c 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
req               858 drivers/misc/sgi-gru/grufault.c 	gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
req               860 drivers/misc/sgi-gru/grufault.c 	gts = gru_find_lock_gts(req.gseg);
req               862 drivers/misc/sgi-gru/grufault.c 		gts = gru_alloc_locked_gts(req.gseg);
req               867 drivers/misc/sgi-gru/grufault.c 	switch (req.op) {
req               870 drivers/misc/sgi-gru/grufault.c 		if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
req               871 drivers/misc/sgi-gru/grufault.c 		    req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
req               872 drivers/misc/sgi-gru/grufault.c 		    (req.val1 >= 0 && !gru_base[req.val1])) {
req               875 drivers/misc/sgi-gru/grufault.c 			gts->ts_user_blade_id = req.val1;
req               876 drivers/misc/sgi-gru/grufault.c 			gts->ts_user_chiplet_id = req.val0;
req               886 drivers/misc/sgi-gru/grufault.c 		gts->ts_cch_req_slice = req.val1 & 3;
req               122 drivers/misc/sgi-gru/grufile.c 	struct gru_create_context_req req;
req               127 drivers/misc/sgi-gru/grufile.c 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
req               130 drivers/misc/sgi-gru/grufile.c 	if (req.data_segment_bytes > max_user_dsr_bytes)
req               132 drivers/misc/sgi-gru/grufile.c 	if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count)
req               135 drivers/misc/sgi-gru/grufile.c 	if (!(req.options & GRU_OPT_MISS_MASK))
req               136 drivers/misc/sgi-gru/grufile.c 		req.options |= GRU_OPT_MISS_FMM_INTR;
req               139 drivers/misc/sgi-gru/grufile.c 	vma = gru_find_vma(req.gseg);
req               142 drivers/misc/sgi-gru/grufile.c 		vdata->vd_user_options = req.options;
req               144 drivers/misc/sgi-gru/grufile.c 		    GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
req               145 drivers/misc/sgi-gru/grufile.c 		vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
req               146 drivers/misc/sgi-gru/grufile.c 		vdata->vd_tlb_preload_count = req.tlb_preload_count;
req               183 drivers/misc/sgi-gru/grufile.c static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
req               188 drivers/misc/sgi-gru/grufile.c 	gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
req               190 drivers/misc/sgi-gru/grufile.c 	switch (req) {
req               178 drivers/misc/sgi-gru/grukdump.c 	struct gru_dump_chiplet_state_req req;
req               183 drivers/misc/sgi-gru/grukdump.c 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
req               187 drivers/misc/sgi-gru/grukdump.c 	if (req.gid >= gru_max_gids)
req               189 drivers/misc/sgi-gru/grukdump.c 	req.gid = array_index_nospec(req.gid, gru_max_gids);
req               191 drivers/misc/sgi-gru/grukdump.c 	gru = GID_TO_GRU(req.gid);
req               192 drivers/misc/sgi-gru/grukdump.c 	ubuf = req.buf;
req               193 drivers/misc/sgi-gru/grukdump.c 	ubufend = req.buf + req.buflen;
req               206 drivers/misc/sgi-gru/grukdump.c 		if (req.ctxnum == ctxnum || req.ctxnum < 0) {
req               208 drivers/misc/sgi-gru/grukdump.c 						req.data_opt, req.lock_cch,
req               209 drivers/misc/sgi-gru/grukdump.c 						req.flush_cbrs);
req               217 drivers/misc/sgi-gru/grukdump.c 	if (copy_to_user((void __user *)arg, &req, sizeof(req)))
req                77 drivers/mmc/core/block.c #define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
req                78 drivers/mmc/core/block.c 				  (rq_data_dir(req) == WRITE))
req               233 drivers/mmc/core/block.c 	struct request *req;
req               246 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
req               247 drivers/mmc/core/block.c 	if (IS_ERR(req)) {
req               248 drivers/mmc/core/block.c 		count = PTR_ERR(req);
req               251 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
req               252 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
req               253 drivers/mmc/core/block.c 	ret = req_to_mmc_queue_req(req)->drv_op_result;
req               254 drivers/mmc/core/block.c 	blk_put_request(req);
req               653 drivers/mmc/core/block.c 	struct request *req;
req               671 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue,
req               673 drivers/mmc/core/block.c 	if (IS_ERR(req)) {
req               674 drivers/mmc/core/block.c 		err = PTR_ERR(req);
req               678 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op =
req               680 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op_data = idatas;
req               681 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->ioc_count = 1;
req               682 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
req               683 drivers/mmc/core/block.c 	ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
req               685 drivers/mmc/core/block.c 	blk_put_request(req);
req               703 drivers/mmc/core/block.c 	struct request *req;
req               741 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue,
req               743 drivers/mmc/core/block.c 	if (IS_ERR(req)) {
req               744 drivers/mmc/core/block.c 		err = PTR_ERR(req);
req               747 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op =
req               749 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op_data = idata;
req               750 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
req               751 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
req               752 drivers/mmc/core/block.c 	ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
req               758 drivers/mmc/core/block.c 	blk_put_request(req);
req              1027 drivers/mmc/core/block.c static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
req              1039 drivers/mmc/core/block.c 	mq_rq = req_to_mmc_queue_req(req);
req              1083 drivers/mmc/core/block.c 	blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
req              1086 drivers/mmc/core/block.c static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
req              1099 drivers/mmc/core/block.c 	from = blk_rq_pos(req);
req              1100 drivers/mmc/core/block.c 	nr = blk_rq_sectors(req);
req              1120 drivers/mmc/core/block.c 	blk_mq_end_request(req, status);
req              1124 drivers/mmc/core/block.c 				       struct request *req)
req              1137 drivers/mmc/core/block.c 	from = blk_rq_pos(req);
req              1138 drivers/mmc/core/block.c 	nr = blk_rq_sectors(req);
req              1190 drivers/mmc/core/block.c 	blk_mq_end_request(req, status);
req              1193 drivers/mmc/core/block.c static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
req              1200 drivers/mmc/core/block.c 	blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
req              1212 drivers/mmc/core/block.c 				    struct request *req)
req              1216 drivers/mmc/core/block.c 		if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
req              1287 drivers/mmc/core/block.c 	struct request *req = mmc_queue_req_to_req(mqrq);
req              1294 drivers/mmc/core/block.c 	do_rel_wr = (req->cmd_flags & REQ_FUA) &&
req              1295 drivers/mmc/core/block.c 		    rq_data_dir(req) == WRITE &&
req              1301 drivers/mmc/core/block.c 	brq->mrq.tag = req->tag;
req              1306 drivers/mmc/core/block.c 	if (rq_data_dir(req) == READ) {
req              1315 drivers/mmc/core/block.c 	brq->data.blocks = blk_rq_sectors(req);
req              1316 drivers/mmc/core/block.c 	brq->data.blk_addr = blk_rq_pos(req);
req              1339 drivers/mmc/core/block.c 		if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
req              1340 drivers/mmc/core/block.c 		    (blk_rq_pos(req) + blk_rq_sectors(req) ==
req              1358 drivers/mmc/core/block.c 						(rq_data_dir(req) == READ) ?
req              1364 drivers/mmc/core/block.c 		mmc_apply_rel_rw(brq, card, req);
req              1373 drivers/mmc/core/block.c 		      (req->cmd_flags & REQ_META) &&
req              1374 drivers/mmc/core/block.c 		      (rq_data_dir(req) == WRITE) &&
req              1390 drivers/mmc/core/block.c 	if (brq->data.blocks != blk_rq_sectors(req)) {
req              1414 drivers/mmc/core/block.c static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
req              1416 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1418 drivers/mmc/core/block.c 	struct request_queue *q = req->q;
req              1420 drivers/mmc/core/block.c 	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
req              1436 drivers/mmc/core/block.c 			blk_mq_requeue_request(req, true);
req              1438 drivers/mmc/core/block.c 			blk_mq_end_request(req, BLK_STS_IOERR);
req              1440 drivers/mmc/core/block.c 		if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
req              1441 drivers/mmc/core/block.c 			blk_mq_requeue_request(req, true);
req              1443 drivers/mmc/core/block.c 			__blk_mq_end_request(req, BLK_STS_OK);
req              1445 drivers/mmc/core/block.c 		blk_mq_end_request(req, BLK_STS_OK);
req              1486 drivers/mmc/core/block.c 	struct request *req = mmc_queue_req_to_req(mqrq);
req              1487 drivers/mmc/core/block.c 	struct request_queue *q = req->q;
req              1495 drivers/mmc/core/block.c 		mmc_blk_cqe_complete_rq(mq, req);
req              1497 drivers/mmc/core/block.c 		blk_mq_complete_request(req);
req              1509 drivers/mmc/core/block.c 						 struct request *req)
req              1516 drivers/mmc/core/block.c 	brq->mrq.tag = req->tag;
req              1521 drivers/mmc/core/block.c static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
req              1523 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1524 drivers/mmc/core/block.c 	struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
req              1536 drivers/mmc/core/block.c static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
req              1538 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1552 drivers/mmc/core/block.c 	struct request *req = mmc_queue_req_to_req(mqrq);
req              1560 drivers/mmc/core/block.c 	brq->cmd.arg = blk_rq_pos(req);
req              1570 drivers/mmc/core/block.c 		    rq_data_dir(req) == READ)
req              1579 drivers/mmc/core/block.c 	brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
req              1627 drivers/mmc/core/block.c static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
req              1629 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1648 drivers/mmc/core/block.c static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
req              1650 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1671 drivers/mmc/core/block.c 			err = mmc_blk_fix_state(card, req);
req              1689 drivers/mmc/core/block.c 	} while (blk_update_request(req, error, 512));
req              1695 drivers/mmc/core/block.c 	blk_update_request(req, BLK_STS_IOERR, 512);
req              1715 drivers/mmc/core/block.c static bool mmc_blk_status_error(struct request *req, u32 status)
req              1717 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1719 drivers/mmc/core/block.c 	struct mmc_queue *mq = req->q->queuedata;
req              1730 drivers/mmc/core/block.c 	       (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
req              1755 drivers/mmc/core/block.c static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
req              1757 drivers/mmc/core/block.c 	int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
req              1758 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1771 drivers/mmc/core/block.c 	if (err || mmc_blk_status_error(req, status))
req              1793 drivers/mmc/core/block.c 		err = mmc_blk_fix_state(mq->card, req);
req              1800 drivers/mmc/core/block.c 	    rq_data_dir(req) == WRITE) {
req              1810 drivers/mmc/core/block.c 		pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
req              1837 drivers/mmc/core/block.c 	if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
req              1840 drivers/mmc/core/block.c 		mmc_blk_read_single(mq, req);
req              1853 drivers/mmc/core/block.c static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
req              1855 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1859 drivers/mmc/core/block.c 	if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
req              1881 drivers/mmc/core/block.c 					    struct request *req)
req              1883 drivers/mmc/core/block.c 	int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
req              1888 drivers/mmc/core/block.c static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
req              1890 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1894 drivers/mmc/core/block.c 		if (blk_update_request(req, BLK_STS_OK, nr_bytes))
req              1895 drivers/mmc/core/block.c 			blk_mq_requeue_request(req, true);
req              1897 drivers/mmc/core/block.c 			__blk_mq_end_request(req, BLK_STS_OK);
req              1898 drivers/mmc/core/block.c 	} else if (!blk_rq_bytes(req)) {
req              1899 drivers/mmc/core/block.c 		__blk_mq_end_request(req, BLK_STS_IOERR);
req              1901 drivers/mmc/core/block.c 		blk_mq_requeue_request(req, true);
req              1904 drivers/mmc/core/block.c 			req->rq_flags |= RQF_QUIET;
req              1905 drivers/mmc/core/block.c 		blk_mq_end_request(req, BLK_STS_IOERR);
req              1924 drivers/mmc/core/block.c void mmc_blk_mq_complete(struct request *req)
req              1926 drivers/mmc/core/block.c 	struct mmc_queue *mq = req->q->queuedata;
req              1929 drivers/mmc/core/block.c 		mmc_blk_cqe_complete_rq(mq, req);
req              1931 drivers/mmc/core/block.c 		mmc_blk_mq_complete_rq(mq, req);
req              1935 drivers/mmc/core/block.c 				       struct request *req)
req              1937 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1941 drivers/mmc/core/block.c 	    mmc_blk_card_busy(mq->card, req)) {
req              1942 drivers/mmc/core/block.c 		mmc_blk_mq_rw_recovery(mq, req);
req              1944 drivers/mmc/core/block.c 		mmc_blk_rw_reset_success(mq, req);
req              1951 drivers/mmc/core/block.c static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
req              1958 drivers/mmc/core/block.c 	mq->in_flight[mmc_issue_type(mq, req)] -= 1;
req              1968 drivers/mmc/core/block.c static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
req              1970 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1981 drivers/mmc/core/block.c 		mmc_blk_mq_complete_rq(mq, req);
req              1983 drivers/mmc/core/block.c 		blk_mq_complete_request(req);
req              1985 drivers/mmc/core/block.c 	mmc_blk_mq_dec_in_flight(mq, req);
req              1990 drivers/mmc/core/block.c 	struct request *req = mq->recovery_req;
req              1992 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              1999 drivers/mmc/core/block.c 		mmc_blk_mq_rw_recovery(mq, req);
req              2004 drivers/mmc/core/block.c 	mmc_blk_mq_post_req(mq, req);
req              2043 drivers/mmc/core/block.c 	struct request *req = mmc_queue_req_to_req(mqrq);
req              2044 drivers/mmc/core/block.c 	struct request_queue *q = req->q;
req              2059 drivers/mmc/core/block.c 		mq->complete_req = req;
req              2083 drivers/mmc/core/block.c 		mq->recovery_req = req;
req              2090 drivers/mmc/core/block.c 	mmc_blk_rw_reset_success(mq, req);
req              2095 drivers/mmc/core/block.c 	mmc_blk_mq_post_req(mq, req);
req              2133 drivers/mmc/core/block.c 				  struct request *req)
req              2135 drivers/mmc/core/block.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req              2179 drivers/mmc/core/block.c enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
req              2190 drivers/mmc/core/block.c 	switch (mmc_issue_type(mq, req)) {
req              2195 drivers/mmc/core/block.c 		switch (req_op(req)) {
req              2198 drivers/mmc/core/block.c 			mmc_blk_issue_drv_op(mq, req);
req              2201 drivers/mmc/core/block.c 			mmc_blk_issue_discard_rq(mq, req);
req              2204 drivers/mmc/core/block.c 			mmc_blk_issue_secdiscard_rq(mq, req);
req              2207 drivers/mmc/core/block.c 			mmc_blk_issue_flush(mq, req);
req              2216 drivers/mmc/core/block.c 		switch (req_op(req)) {
req              2218 drivers/mmc/core/block.c 			ret = mmc_blk_cqe_issue_flush(mq, req);
req              2223 drivers/mmc/core/block.c 				ret = mmc_blk_cqe_issue_rw_rq(mq, req);
req              2225 drivers/mmc/core/block.c 				ret = mmc_blk_mq_issue_rw_rq(mq, req);
req              2711 drivers/mmc/core/block.c 	struct request *req;
req              2715 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
req              2716 drivers/mmc/core/block.c 	if (IS_ERR(req))
req              2717 drivers/mmc/core/block.c 		return PTR_ERR(req);
req              2718 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
req              2719 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
req              2720 drivers/mmc/core/block.c 	ret = req_to_mmc_queue_req(req)->drv_op_result;
req              2725 drivers/mmc/core/block.c 	blk_put_request(req);
req              2740 drivers/mmc/core/block.c 	struct request *req;
req              2751 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
req              2752 drivers/mmc/core/block.c 	if (IS_ERR(req)) {
req              2753 drivers/mmc/core/block.c 		err = PTR_ERR(req);
req              2756 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req              2757 drivers/mmc/core/block.c 	req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
req              2758 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
req              2759 drivers/mmc/core/block.c 	err = req_to_mmc_queue_req(req)->drv_op_result;
req              2760 drivers/mmc/core/block.c 	blk_put_request(req);
req                12 drivers/mmc/core/block.h enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
req                13 drivers/mmc/core/block.h void mmc_blk_mq_complete(struct request *req);
req                46 drivers/mmc/core/queue.c 					      struct request *req)
req                48 drivers/mmc/core/queue.c 	switch (req_op(req)) {
req                61 drivers/mmc/core/queue.c enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
req                66 drivers/mmc/core/queue.c 		return mmc_cqe_issue_type(host, req);
req                68 drivers/mmc/core/queue.c 	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
req                86 drivers/mmc/core/queue.c 	struct request *req = mmc_queue_req_to_req(mqrq);
req                87 drivers/mmc/core/queue.c 	struct request_queue *q = req->q;
req                96 drivers/mmc/core/queue.c static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
req                98 drivers/mmc/core/queue.c 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
req               100 drivers/mmc/core/queue.c 	struct mmc_queue *mq = req->q->queuedata;
req               102 drivers/mmc/core/queue.c 	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
req               121 drivers/mmc/core/queue.c static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
req               124 drivers/mmc/core/queue.c 	struct request_queue *q = req->q;
req               133 drivers/mmc/core/queue.c 	return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
req               204 drivers/mmc/core/queue.c static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
req               207 drivers/mmc/core/queue.c 	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
req               218 drivers/mmc/core/queue.c static void mmc_exit_request(struct request_queue *q, struct request *req)
req               220 drivers/mmc/core/queue.c 	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
req               226 drivers/mmc/core/queue.c static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
req               229 drivers/mmc/core/queue.c 	return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
req               232 drivers/mmc/core/queue.c static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
req               237 drivers/mmc/core/queue.c 	mmc_exit_request(mq->queue, req);
req               243 drivers/mmc/core/queue.c 	struct request *req = bd->rq;
req               244 drivers/mmc/core/queue.c 	struct request_queue *q = req->q;
req               254 drivers/mmc/core/queue.c 		req->rq_flags |= RQF_QUIET;
req               258 drivers/mmc/core/queue.c 	issue_type = mmc_issue_type(mq, req);
req               285 drivers/mmc/core/queue.c 		req->timeout = 600 * HZ;
req               298 drivers/mmc/core/queue.c 	if (!(req->rq_flags & RQF_DONTPREP)) {
req               299 drivers/mmc/core/queue.c 		req_to_mmc_queue_req(req)->retries = 0;
req               300 drivers/mmc/core/queue.c 		req->rq_flags |= RQF_DONTPREP;
req               311 drivers/mmc/core/queue.c 	blk_mq_start_request(req);
req               313 drivers/mmc/core/queue.c 	issued = mmc_blk_mq_issue_rq(mq, req);
req               521 drivers/mmc/core/queue.c 	struct request *req = mmc_queue_req_to_req(mqrq);
req               523 drivers/mmc/core/queue.c 	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
req               109 drivers/mmc/core/queue.h enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
req               355 drivers/mmc/host/android-goldfish.c 				      struct mmc_request *req)
req               357 drivers/mmc/host/android-goldfish.c 	struct mmc_data *data = req->data;
req               398 drivers/mmc/host/android-goldfish.c static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
req               404 drivers/mmc/host/android-goldfish.c 	host->mrq = req;
req               405 drivers/mmc/host/android-goldfish.c 	goldfish_mmc_prepare_data(host, req);
req               406 drivers/mmc/host/android-goldfish.c 	goldfish_mmc_start_command(host, req->cmd);
req               412 drivers/mmc/host/android-goldfish.c 	if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
req               413 drivers/mmc/host/android-goldfish.c 	    req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
req               414 drivers/mmc/host/android-goldfish.c 		req->cmd->error = -EINVAL;
req               298 drivers/mmc/host/cavium.c static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
req               302 drivers/mmc/host/cavium.c 	int data_len = req->data->blocks * req->data->blksz;
req               330 drivers/mmc/host/cavium.c 	req->data->bytes_xfered = bytes_xfered;
req               331 drivers/mmc/host/cavium.c 	req->data->error = 0;
req               334 drivers/mmc/host/cavium.c static void do_write(struct mmc_request *req)
req               336 drivers/mmc/host/cavium.c 	req->data->bytes_xfered = req->data->blocks * req->data->blksz;
req               337 drivers/mmc/host/cavium.c 	req->data->error = 0;
req               340 drivers/mmc/host/cavium.c static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
req               353 drivers/mmc/host/cavium.c 		req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
req               354 drivers/mmc/host/cavium.c 		req->cmd->resp[1] = 0;
req               355 drivers/mmc/host/cavium.c 		req->cmd->resp[2] = 0;
req               356 drivers/mmc/host/cavium.c 		req->cmd->resp[3] = 0;
req               359 drivers/mmc/host/cavium.c 		req->cmd->resp[3] = rsp_lo & 0xffffffff;
req               360 drivers/mmc/host/cavium.c 		req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
req               362 drivers/mmc/host/cavium.c 		req->cmd->resp[1] = rsp_hi & 0xffffffff;
req               363 drivers/mmc/host/cavium.c 		req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
req               438 drivers/mmc/host/cavium.c 	struct mmc_request *req;
req               455 drivers/mmc/host/cavium.c 	req = host->current_req;
req               456 drivers/mmc/host/cavium.c 	if (!req)
req               468 drivers/mmc/host/cavium.c 	if (!host->dma_active && req->data &&
req               473 drivers/mmc/host/cavium.c 			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
req               475 drivers/mmc/host/cavium.c 			do_write(req);
req               483 drivers/mmc/host/cavium.c 	if (!(host_done && req->done))
req               486 drivers/mmc/host/cavium.c 	req->cmd->error = check_status(rsp_sts);
req               488 drivers/mmc/host/cavium.c 	if (host->dma_active && req->data)
req               489 drivers/mmc/host/cavium.c 		if (!finish_dma(host, req->data))
req               492 drivers/mmc/host/cavium.c 	set_cmd_response(host, req, rsp_sts);
req               498 drivers/mmc/host/cavium.c 	req->done(req);
req               516 drivers/mmc/host/davinci_mmc.c mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
req               520 drivers/mmc/host/davinci_mmc.c 	struct mmc_data *data = req->data;
req               585 drivers/mmc/host/davinci_mmc.c static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
req               602 drivers/mmc/host/davinci_mmc.c 		req->cmd->error = -ETIMEDOUT;
req               603 drivers/mmc/host/davinci_mmc.c 		mmc_request_done(mmc, req);
req               608 drivers/mmc/host/davinci_mmc.c 	mmc_davinci_prepare_data(host, req);
req               609 drivers/mmc/host/davinci_mmc.c 	mmc_davinci_start_command(host, req->cmd);
req               147 drivers/mmc/host/jz4740_mmc.c 	struct mmc_request *req;
req               408 drivers/mmc/host/jz4740_mmc.c 	struct mmc_request *req;
req               411 drivers/mmc/host/jz4740_mmc.c 	req = host->req;
req               412 drivers/mmc/host/jz4740_mmc.c 	data = req->data;
req               413 drivers/mmc/host/jz4740_mmc.c 	host->req = NULL;
req               417 drivers/mmc/host/jz4740_mmc.c 	mmc_request_done(host->mmc, req);
req               448 drivers/mmc/host/jz4740_mmc.c 			host->req->cmd->error = -ETIMEDOUT;
req               451 drivers/mmc/host/jz4740_mmc.c 			host->req->cmd->error = -EIO;
req               456 drivers/mmc/host/jz4740_mmc.c 			host->req->cmd->error = -ETIMEDOUT;
req               459 drivers/mmc/host/jz4740_mmc.c 			host->req->cmd->error = -EIO;
req               604 drivers/mmc/host/jz4740_mmc.c 	host->req->cmd->error = -ETIMEDOUT;
req               696 drivers/mmc/host/jz4740_mmc.c 	struct mmc_command *cmd = host->req->cmd;
req               712 drivers/mmc/host/jz4740_mmc.c 	struct mmc_command *cmd = host->req->cmd;
req               713 drivers/mmc/host/jz4740_mmc.c 	struct mmc_request *req = host->req;
req               767 drivers/mmc/host/jz4740_mmc.c 		if (!req->stop)
req               770 drivers/mmc/host/jz4740_mmc.c 		jz4740_mmc_send_command(host, req->stop);
req               772 drivers/mmc/host/jz4740_mmc.c 		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
req               814 drivers/mmc/host/jz4740_mmc.c 	if (host->req && cmd && irq_reg) {
req               858 drivers/mmc/host/jz4740_mmc.c static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
req               862 drivers/mmc/host/jz4740_mmc.c 	host->req = req;
req               870 drivers/mmc/host/jz4740_mmc.c 	jz4740_mmc_send_command(host, req->cmd);
req               133 drivers/mmc/host/mxcmmc.c 	struct mmc_request	*req;
req               428 drivers/mmc/host/mxcmmc.c 		struct mmc_request *req)
req               439 drivers/mmc/host/mxcmmc.c 	host->req = NULL;
req               443 drivers/mmc/host/mxcmmc.c 	mmc_request_done(host->mmc, req);
req               605 drivers/mmc/host/mxcmmc.c 	struct mmc_data *data = host->req->data;
req               643 drivers/mmc/host/mxcmmc.c 	if (host->req->stop) {
req               644 drivers/mmc/host/mxcmmc.c 		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
req               645 drivers/mmc/host/mxcmmc.c 			mxcmci_finish_request(host, host->req);
req               649 drivers/mmc/host/mxcmmc.c 		mxcmci_finish_request(host, host->req);
req               655 drivers/mmc/host/mxcmmc.c 	struct mmc_request *req;
req               666 drivers/mmc/host/mxcmmc.c 	if (!host->req) {
req               671 drivers/mmc/host/mxcmmc.c 	req = host->req;
req               672 drivers/mmc/host/mxcmmc.c 	if (!req->stop)
req               673 drivers/mmc/host/mxcmmc.c 		host->req = NULL; /* we will handle finish req below */
req               685 drivers/mmc/host/mxcmmc.c 	if (req->stop) {
req               686 drivers/mmc/host/mxcmmc.c 		if (mxcmci_start_cmd(host, req->stop, 0)) {
req               687 drivers/mmc/host/mxcmmc.c 			mxcmci_finish_request(host, req);
req               691 drivers/mmc/host/mxcmmc.c 		mxcmci_finish_request(host, req);
req               700 drivers/mmc/host/mxcmmc.c 	if (!host->data && host->req) {
req               701 drivers/mmc/host/mxcmmc.c 		mxcmci_finish_request(host, host->req);
req               755 drivers/mmc/host/mxcmmc.c static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
req               761 drivers/mmc/host/mxcmmc.c 	WARN_ON(host->req != NULL);
req               763 drivers/mmc/host/mxcmmc.c 	host->req = req;
req               769 drivers/mmc/host/mxcmmc.c 	if (req->data) {
req               770 drivers/mmc/host/mxcmmc.c 		error = mxcmci_setup_data(host, req->data);
req               772 drivers/mmc/host/mxcmmc.c 			req->cmd->error = error;
req               779 drivers/mmc/host/mxcmmc.c 		if (req->data->flags & MMC_DATA_WRITE)
req               783 drivers/mmc/host/mxcmmc.c 	error = mxcmci_start_cmd(host, req->cmd, cmdat);
req               787 drivers/mmc/host/mxcmmc.c 		mxcmci_finish_request(host, req);
req               963 drivers/mmc/host/mxcmmc.c 	struct mmc_request *req = host->req;
req               982 drivers/mmc/host/mxcmmc.c 	host->req = NULL;
req               985 drivers/mmc/host/mxcmmc.c 	mmc_request_done(host->mmc, req);
req               238 drivers/mmc/host/omap.c 				   struct mmc_request *req);
req               912 drivers/mmc/host/omap.c static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
req               923 drivers/mmc/host/omap.c static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
req               929 drivers/mmc/host/omap.c 	timeout = req->data->timeout_ns / cycle_ns;
req               930 drivers/mmc/host/omap.c 	timeout += req->data->timeout_clks;
req               944 drivers/mmc/host/omap.c mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
req               946 drivers/mmc/host/omap.c 	struct mmc_data *data = req->data;
req               957 drivers/mmc/host/omap.c 		set_cmd_timeout(host, req);
req               965 drivers/mmc/host/omap.c 	set_data_timeout(host, req);
req              1066 drivers/mmc/host/omap.c 				   struct mmc_request *req)
req              1070 drivers/mmc/host/omap.c 	host->mrq = req;
req              1073 drivers/mmc/host/omap.c 	mmc_omap_prepare_data(host, req);
req              1074 drivers/mmc/host/omap.c 	mmc_omap_start_command(host, req->cmd);
req              1083 drivers/mmc/host/omap.c static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
req              1092 drivers/mmc/host/omap.c 		slot->mrq = req;
req              1099 drivers/mmc/host/omap.c 	mmc_omap_start_request(host, req);
req              1240 drivers/mmc/host/omap_hsmmc.c 					struct mmc_request *req)
req              1244 drivers/mmc/host/omap_hsmmc.c 	struct mmc_data *data = req->data;
req              1341 drivers/mmc/host/omap_hsmmc.c 	struct mmc_request *req = host->mrq;
req              1344 drivers/mmc/host/omap_hsmmc.c 	if (!req->data)
req              1346 drivers/mmc/host/omap_hsmmc.c 	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
req              1347 drivers/mmc/host/omap_hsmmc.c 				| (req->data->blocks << 16));
req              1348 drivers/mmc/host/omap_hsmmc.c 	set_data_timeout(host, req->data->timeout_ns,
req              1349 drivers/mmc/host/omap_hsmmc.c 				req->data->timeout_clks);
req              1350 drivers/mmc/host/omap_hsmmc.c 	chan = omap_hsmmc_get_dma_chan(host, req->data);
req              1358 drivers/mmc/host/omap_hsmmc.c omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
req              1363 drivers/mmc/host/omap_hsmmc.c 	host->data = req->data;
req              1365 drivers/mmc/host/omap_hsmmc.c 	if (req->data == NULL) {
req              1367 drivers/mmc/host/omap_hsmmc.c 		if (req->cmd->flags & MMC_RSP_BUSY) {
req              1368 drivers/mmc/host/omap_hsmmc.c 			timeout = req->cmd->busy_timeout * NSEC_PER_MSEC;
req              1383 drivers/mmc/host/omap_hsmmc.c 		ret = omap_hsmmc_setup_dma_transfer(host, req);
req              1428 drivers/mmc/host/omap_hsmmc.c static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
req              1438 drivers/mmc/host/omap_hsmmc.c 	host->mrq = req;
req              1440 drivers/mmc/host/omap_hsmmc.c 	err = omap_hsmmc_prepare_data(host, req);
req              1442 drivers/mmc/host/omap_hsmmc.c 		req->cmd->error = err;
req              1443 drivers/mmc/host/omap_hsmmc.c 		if (req->data)
req              1444 drivers/mmc/host/omap_hsmmc.c 			req->data->error = err;
req              1446 drivers/mmc/host/omap_hsmmc.c 		mmc_request_done(mmc, req);
req              1449 drivers/mmc/host/omap_hsmmc.c 	if (req->sbc && !(host->flags & AUTO_CMD23)) {
req              1450 drivers/mmc/host/omap_hsmmc.c 		omap_hsmmc_start_command(host, req->sbc, NULL);
req              1455 drivers/mmc/host/omap_hsmmc.c 	omap_hsmmc_start_command(host, req->cmd, req->data);
req               441 drivers/mmc/host/sunxi-mmc.c 				       struct mmc_request *req)
req               449 drivers/mmc/host/sunxi-mmc.c 	if (req->cmd->opcode == SD_IO_RW_EXTENDED) {
req               452 drivers/mmc/host/sunxi-mmc.c 		      ((req->cmd->arg >> 28) & 0x7);
req               468 drivers/mmc/host/sunxi-mmc.c 		if (req->stop)
req               469 drivers/mmc/host/sunxi-mmc.c 			req->stop->resp[0] = -ETIMEDOUT;
req               471 drivers/mmc/host/sunxi-mmc.c 		if (req->stop)
req               472 drivers/mmc/host/sunxi-mmc.c 			req->stop->resp[0] = mmc_readl(host, REG_RESP0);
req               100 drivers/mmc/host/tifm_sd.c 	struct mmc_request    *req;
req               165 drivers/mmc/host/tifm_sd.c 	struct mmc_data *r_data = host->req->cmd->data;
req               403 drivers/mmc/host/tifm_sd.c 	struct mmc_command *cmd = host->req->cmd;
req               427 drivers/mmc/host/tifm_sd.c 			if (host->req->stop) {
req               435 drivers/mmc/host/tifm_sd.c 					tifm_sd_exec(host, host->req->stop);
req               456 drivers/mmc/host/tifm_sd.c 			if (host->req->stop) {
req               459 drivers/mmc/host/tifm_sd.c 					tifm_sd_exec(host, host->req->stop);
req               485 drivers/mmc/host/tifm_sd.c 	if (host->req) {
req               486 drivers/mmc/host/tifm_sd.c 		r_data = host->req->cmd->data;
req               515 drivers/mmc/host/tifm_sd.c 	if (host->req) {
req               516 drivers/mmc/host/tifm_sd.c 		cmd = host->req->cmd;
req               537 drivers/mmc/host/tifm_sd.c 			if (host->req->stop) {
req               539 drivers/mmc/host/tifm_sd.c 					host->req->stop->error = cmd_error;
req               544 drivers/mmc/host/tifm_sd.c 					tifm_sd_exec(host, host->req->stop);
req               556 drivers/mmc/host/tifm_sd.c 					tifm_sd_fetch_resp(host->req->stop,
req               631 drivers/mmc/host/tifm_sd.c 	if (host->req) {
req               719 drivers/mmc/host/tifm_sd.c 	host->req = mrq;
req               744 drivers/mmc/host/tifm_sd.c 	mrq = host->req;
req               745 drivers/mmc/host/tifm_sd.c 	host->req = NULL;
req               789 drivers/mmc/host/tifm_sd.c 	       dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
req              1007 drivers/mmc/host/tifm_sd.c 	if (host->req) {
req              1011 drivers/mmc/host/tifm_sd.c 		host->req->cmd->error = -ENOMEDIUM;
req              1012 drivers/mmc/host/tifm_sd.c 		if (host->req->stop)
req              1013 drivers/mmc/host/tifm_sd.c 			host->req->stop->error = -ENOMEDIUM;
req               218 drivers/mmc/host/ushc.c 	struct mmc_request *req = ushc->current_req;
req               224 drivers/mmc/host/ushc.c 		req->cmd->error = urb->status;
req               227 drivers/mmc/host/ushc.c 			req->cmd->error = -EIO;
req               229 drivers/mmc/host/ushc.c 			req->cmd->error = -ETIMEDOUT;
req               231 drivers/mmc/host/ushc.c 	if (req->data) {
req               234 drivers/mmc/host/ushc.c 				req->data->error = -EIO;
req               236 drivers/mmc/host/ushc.c 				req->data->error = -ETIMEDOUT;
req               237 drivers/mmc/host/ushc.c 			req->data->bytes_xfered = 0;
req               239 drivers/mmc/host/ushc.c 			req->data->bytes_xfered = req->data->blksz * req->data->blocks;
req               243 drivers/mmc/host/ushc.c 	req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
req               245 drivers/mmc/host/ushc.c 	mmc_request_done(ushc->mmc, req);
req               248 drivers/mmc/host/ushc.c static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
req               262 drivers/mmc/host/ushc.c 	if (req->cmd->flags & MMC_RSP_136) {
req               269 drivers/mmc/host/ushc.c 	if (req->data && ushc->clock_freq < 6000000) {
req               274 drivers/mmc/host/ushc.c 	ushc->current_req = req;
req               277 drivers/mmc/host/ushc.c 	ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
req               278 drivers/mmc/host/ushc.c 	if (req->data)
req               279 drivers/mmc/host/ushc.c 		ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
req               282 drivers/mmc/host/ushc.c 	ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
req               289 drivers/mmc/host/ushc.c 	if (req->data) {
req               290 drivers/mmc/host/ushc.c 		struct mmc_data *data = req->data;
req               316 drivers/mmc/host/ushc.c 		req->cmd->error = ret;
req               317 drivers/mmc/host/ushc.c 		mmc_request_done(mmc, req);
req               334 drivers/mmc/host/vub300.c 	struct mmc_request *req;
req              1747 drivers/mmc/host/vub300.c 		struct mmc_request *req = vub300->req;
req              1773 drivers/mmc/host/vub300.c 		vub300->req = NULL;
req              1780 drivers/mmc/host/vub300.c 			mmc_request_done(vub300->mmc, req);
req              1788 drivers/mmc/host/vub300.c 			mmc_request_done(vub300->mmc, req);
req              1898 drivers/mmc/host/vub300.c static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
req              1900 drivers/mmc/host/vub300.c 	struct mmc_command *cmd = req->cmd;
req              1904 drivers/mmc/host/vub300.c 		mmc_request_done(mmc, req);
req              1907 drivers/mmc/host/vub300.c 		struct mmc_data *data = req->data;
req              1910 drivers/mmc/host/vub300.c 			mmc_request_done(mmc, req);
req              1915 drivers/mmc/host/vub300.c 			mmc_request_done(mmc, req);
req              1920 drivers/mmc/host/vub300.c 			mmc_request_done(mmc, req);
req              1925 drivers/mmc/host/vub300.c 			mmc_request_done(mmc, req);
req              1940 drivers/mmc/host/vub300.c 			mmc_request_done(mmc, req);
req              1944 drivers/mmc/host/vub300.c 			vub300->req = req;
req               201 drivers/mmc/host/wmt-sdmmc.c 	struct mmc_request *req;
req               296 drivers/mmc/host/wmt-sdmmc.c 	struct mmc_request *req;
req               297 drivers/mmc/host/wmt-sdmmc.c 	req = priv->req;
req               299 drivers/mmc/host/wmt-sdmmc.c 	req->data->bytes_xfered = req->data->blksz * req->data->blocks;
req               302 drivers/mmc/host/wmt-sdmmc.c 	if (req->data->flags & MMC_DATA_WRITE)
req               303 drivers/mmc/host/wmt-sdmmc.c 		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
req               304 drivers/mmc/host/wmt-sdmmc.c 			     req->data->sg_len, DMA_TO_DEVICE);
req               306 drivers/mmc/host/wmt-sdmmc.c 		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
req               307 drivers/mmc/host/wmt-sdmmc.c 			     req->data->sg_len, DMA_FROM_DEVICE);
req               310 drivers/mmc/host/wmt-sdmmc.c 	if ((req->cmd->error) || (req->data->error))
req               311 drivers/mmc/host/wmt-sdmmc.c 		mmc_request_done(priv->mmc, req);
req               314 drivers/mmc/host/wmt-sdmmc.c 		if (!req->data->stop) {
req               316 drivers/mmc/host/wmt-sdmmc.c 			mmc_request_done(priv->mmc, req);
req               325 drivers/mmc/host/wmt-sdmmc.c 			priv->cmd = req->data->stop;
req               326 drivers/mmc/host/wmt-sdmmc.c 			wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
req               327 drivers/mmc/host/wmt-sdmmc.c 					     7, req->data->stop->arg, 9);
req               345 drivers/mmc/host/wmt-sdmmc.c 		priv->req->data->error = -ETIMEDOUT;
req               350 drivers/mmc/host/wmt-sdmmc.c 	priv->req->data->error = 0;
req               401 drivers/mmc/host/wmt-sdmmc.c 	if ((!priv->req->data) ||
req               402 drivers/mmc/host/wmt-sdmmc.c 	    ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
req               421 drivers/mmc/host/wmt-sdmmc.c 			mmc_request_done(priv->mmc, priv->req);
req               559 drivers/mmc/host/wmt-sdmmc.c static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
req               577 drivers/mmc/host/wmt-sdmmc.c 	priv->req = req;
req               584 drivers/mmc/host/wmt-sdmmc.c 	priv->cmd = req->cmd;
req               586 drivers/mmc/host/wmt-sdmmc.c 	command = req->cmd->opcode;
req               587 drivers/mmc/host/wmt-sdmmc.c 	arg = req->cmd->arg;
req               588 drivers/mmc/host/wmt-sdmmc.c 	rsptype = mmc_resp_type(req->cmd);
req               598 drivers/mmc/host/wmt-sdmmc.c 	if (!req->data) {
req               603 drivers/mmc/host/wmt-sdmmc.c 	if (req->data) {
req               611 drivers/mmc/host/wmt-sdmmc.c 		writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
req               615 drivers/mmc/host/wmt-sdmmc.c 		writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
req               619 drivers/mmc/host/wmt-sdmmc.c 		if (req->data->flags & MMC_DATA_WRITE) {
req               620 drivers/mmc/host/wmt-sdmmc.c 			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
req               621 drivers/mmc/host/wmt-sdmmc.c 					    req->data->sg_len, DMA_TO_DEVICE);
req               623 drivers/mmc/host/wmt-sdmmc.c 			if (req->data->blocks > 1)
req               626 drivers/mmc/host/wmt-sdmmc.c 			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
req               627 drivers/mmc/host/wmt-sdmmc.c 					    req->data->sg_len, DMA_FROM_DEVICE);
req               629 drivers/mmc/host/wmt-sdmmc.c 			if (req->data->blocks > 1)
req               636 drivers/mmc/host/wmt-sdmmc.c 		for_each_sg(req->data->sg, sg, sg_cnt, i) {
req               639 drivers/mmc/host/wmt-sdmmc.c 				wmt_dma_init_descriptor(desc, req->data->blksz,
req               644 drivers/mmc/host/wmt-sdmmc.c 				offset += req->data->blksz;
req               646 drivers/mmc/host/wmt-sdmmc.c 				if (desc_cnt == req->data->blocks)
req               653 drivers/mmc/host/wmt-sdmmc.c 		if (req->data->flags & MMC_DATA_WRITE)
req                67 drivers/mtd/mtd_blkdevs.c 			       struct request *req)
req                72 drivers/mtd/mtd_blkdevs.c 	block = blk_rq_pos(req) << 9 >> tr->blkshift;
req                73 drivers/mtd/mtd_blkdevs.c 	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
req                75 drivers/mtd/mtd_blkdevs.c 	if (req_op(req) == REQ_OP_FLUSH) {
req                81 drivers/mtd/mtd_blkdevs.c 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
req                82 drivers/mtd/mtd_blkdevs.c 	    get_capacity(req->rq_disk))
req                85 drivers/mtd/mtd_blkdevs.c 	switch (req_op(req)) {
req                91 drivers/mtd/mtd_blkdevs.c 		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
req                94 drivers/mtd/mtd_blkdevs.c 				kunmap(bio_page(req->bio));
req                98 drivers/mtd/mtd_blkdevs.c 		kunmap(bio_page(req->bio));
req                99 drivers/mtd/mtd_blkdevs.c 		rq_flush_dcache_pages(req);
req               105 drivers/mtd/mtd_blkdevs.c 		rq_flush_dcache_pages(req);
req               106 drivers/mtd/mtd_blkdevs.c 		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
req               109 drivers/mtd/mtd_blkdevs.c 				kunmap(bio_page(req->bio));
req               113 drivers/mtd/mtd_blkdevs.c 		kunmap(bio_page(req->bio));
req               145 drivers/mtd/mtd_blkdevs.c 	struct request *req = NULL;
req               152 drivers/mtd/mtd_blkdevs.c 		if (!req && !(req = mtd_next_request(dev))) {
req               172 drivers/mtd/mtd_blkdevs.c 		res = do_blktrans_request(dev->tr, dev, req);
req               175 drivers/mtd/mtd_blkdevs.c 		if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
req               176 drivers/mtd/mtd_blkdevs.c 			__blk_mq_end_request(req, res);
req               177 drivers/mtd/mtd_blkdevs.c 			req = NULL;
req               589 drivers/mtd/mtdchar.c 	struct mtd_write_req req;
req               594 drivers/mtd/mtdchar.c 	if (copy_from_user(&req, argp, sizeof(req)))
req               597 drivers/mtd/mtdchar.c 	usr_data = (const void __user *)(uintptr_t)req.usr_data;
req               598 drivers/mtd/mtdchar.c 	usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
req               603 drivers/mtd/mtdchar.c 	ops.mode = req.mode;
req               604 drivers/mtd/mtdchar.c 	ops.len = (size_t)req.len;
req               605 drivers/mtd/mtdchar.c 	ops.ooblen = (size_t)req.ooblen;
req               626 drivers/mtd/mtdchar.c 	ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
req              1049 drivers/mtd/nand/raw/atmel/nand-controller.c 	struct atmel_pmecc_user_req req;
req              1072 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
req              1074 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.strength = chip->ecc.strength;
req              1076 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.strength = chip->base.eccreq.strength;
req              1078 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
req              1081 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.sectorsize = chip->ecc.size;
req              1083 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.sectorsize = chip->base.eccreq.step_size;
req              1085 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
req              1087 drivers/mtd/nand/raw/atmel/nand-controller.c 	req.pagesize = mtd->writesize;
req              1088 drivers/mtd/nand/raw/atmel/nand-controller.c 	req.oobsize = mtd->oobsize;
req              1091 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.bytes = 4;
req              1092 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.ooboffset = 0;
req              1094 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.bytes = mtd->oobsize - 2;
req              1095 drivers/mtd/nand/raw/atmel/nand-controller.c 		req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
req              1098 drivers/mtd/nand/raw/atmel/nand-controller.c 	nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
req              1103 drivers/mtd/nand/raw/atmel/nand-controller.c 	chip->ecc.size = req.ecc.sectorsize;
req              1104 drivers/mtd/nand/raw/atmel/nand-controller.c 	chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
req              1105 drivers/mtd/nand/raw/atmel/nand-controller.c 	chip->ecc.strength = req.ecc.strength;
req               220 drivers/mtd/nand/raw/atmel/pmecc.c atmel_pmecc_create_gf_tables(const struct atmel_pmecc_user_req *req)
req               226 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.sectorsize == 512) {
req               255 drivers/mtd/nand/raw/atmel/pmecc.c atmel_pmecc_get_gf_tables(const struct atmel_pmecc_user_req *req)
req               260 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.sectorsize == 512)
req               268 drivers/mtd/nand/raw/atmel/pmecc.c 		ret = atmel_pmecc_create_gf_tables(req);
req               278 drivers/mtd/nand/raw/atmel/pmecc.c 					struct atmel_pmecc_user_req *req)
req               282 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->pagesize <= 0 || req->oobsize <= 0 || req->ecc.bytes <= 0)
req               285 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.ooboffset >= 0 &&
req               286 drivers/mtd/nand/raw/atmel/pmecc.c 	    req->ecc.ooboffset + req->ecc.bytes > req->oobsize)
req               289 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) {
req               290 drivers/mtd/nand/raw/atmel/pmecc.c 		if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
req               293 drivers/mtd/nand/raw/atmel/pmecc.c 		if (req->pagesize > 512)
req               294 drivers/mtd/nand/raw/atmel/pmecc.c 			req->ecc.sectorsize = 1024;
req               296 drivers/mtd/nand/raw/atmel/pmecc.c 			req->ecc.sectorsize = 512;
req               299 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024)
req               302 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->pagesize % req->ecc.sectorsize)
req               305 drivers/mtd/nand/raw/atmel/pmecc.c 	req->ecc.nsectors = req->pagesize / req->ecc.sectorsize;
req               307 drivers/mtd/nand/raw/atmel/pmecc.c 	max_eccbytes = req->ecc.bytes;
req               312 drivers/mtd/nand/raw/atmel/pmecc.c 		if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH &&
req               313 drivers/mtd/nand/raw/atmel/pmecc.c 		    strength < req->ecc.strength)
req               316 drivers/mtd/nand/raw/atmel/pmecc.c 		nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize),
req               318 drivers/mtd/nand/raw/atmel/pmecc.c 		nbytes *= req->ecc.nsectors;
req               326 drivers/mtd/nand/raw/atmel/pmecc.c 		if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
req               333 drivers/mtd/nand/raw/atmel/pmecc.c 	req->ecc.bytes = eccbytes;
req               334 drivers/mtd/nand/raw/atmel/pmecc.c 	req->ecc.strength = eccstrength;
req               336 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.ooboffset < 0)
req               337 drivers/mtd/nand/raw/atmel/pmecc.c 		req->ecc.ooboffset = req->oobsize - eccbytes;
req               344 drivers/mtd/nand/raw/atmel/pmecc.c 			struct atmel_pmecc_user_req *req)
req               350 drivers/mtd/nand/raw/atmel/pmecc.c 	ret = atmel_pmecc_prepare_user_req(pmecc, req);
req               357 drivers/mtd/nand/raw/atmel/pmecc.c 	size += ((2 * req->ecc.strength) + 1) * sizeof(u16) *
req               358 drivers/mtd/nand/raw/atmel/pmecc.c 		(2 + req->ecc.strength + 2);
req               360 drivers/mtd/nand/raw/atmel/pmecc.c 	size += (req->ecc.strength + 1) * sizeof(u16);
req               363 drivers/mtd/nand/raw/atmel/pmecc.c 	size += (req->ecc.strength + 1) * sizeof(s32) * 3;
req               372 drivers/mtd/nand/raw/atmel/pmecc.c 	user->si = user->partial_syn + ((2 * req->ecc.strength) + 1);
req               373 drivers/mtd/nand/raw/atmel/pmecc.c 	user->lmu = user->si + ((2 * req->ecc.strength) + 1);
req               374 drivers/mtd/nand/raw/atmel/pmecc.c 	user->smu = user->lmu + (req->ecc.strength + 1);
req               376 drivers/mtd/nand/raw/atmel/pmecc.c 				    (((2 * req->ecc.strength) + 1) *
req               377 drivers/mtd/nand/raw/atmel/pmecc.c 				     (req->ecc.strength + 2)),
req               379 drivers/mtd/nand/raw/atmel/pmecc.c 	user->dmu = user->mu + req->ecc.strength + 1;
req               380 drivers/mtd/nand/raw/atmel/pmecc.c 	user->delta = user->dmu + req->ecc.strength + 1;
req               382 drivers/mtd/nand/raw/atmel/pmecc.c 	gf_tables = atmel_pmecc_get_gf_tables(req);
req               390 drivers/mtd/nand/raw/atmel/pmecc.c 	user->eccbytes = req->ecc.bytes / req->ecc.nsectors;
req               393 drivers/mtd/nand/raw/atmel/pmecc.c 		if (pmecc->caps->strengths[strength] == req->ecc.strength)
req               398 drivers/mtd/nand/raw/atmel/pmecc.c 			  PMECC_CFG_NSECTORS(req->ecc.nsectors);
req               400 drivers/mtd/nand/raw/atmel/pmecc.c 	if (req->ecc.sectorsize == 1024)
req               403 drivers/mtd/nand/raw/atmel/pmecc.c 	user->cache.sarea = req->oobsize - 1;
req               404 drivers/mtd/nand/raw/atmel/pmecc.c 	user->cache.saddr = req->ecc.ooboffset;
req               405 drivers/mtd/nand/raw/atmel/pmecc.c 	user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1;
req                57 drivers/mtd/nand/raw/atmel/pmecc.h 			struct atmel_pmecc_user_req *req);
req               203 drivers/mtd/nand/spi/core.c 				const struct nand_page_io_req *req)
req               206 drivers/mtd/nand/spi/core.c 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
req               213 drivers/mtd/nand/spi/core.c 				      const struct nand_page_io_req *req)
req               223 drivers/mtd/nand/spi/core.c 	if (req->datalen) {
req               229 drivers/mtd/nand/spi/core.c 	if (req->ooblen) {
req               237 drivers/mtd/nand/spi/core.c 	rdesc = spinand->dirmaps[req->pos.plane].rdesc;
req               252 drivers/mtd/nand/spi/core.c 	if (req->datalen)
req               253 drivers/mtd/nand/spi/core.c 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req               254 drivers/mtd/nand/spi/core.c 		       req->datalen);
req               256 drivers/mtd/nand/spi/core.c 	if (req->ooblen) {
req               257 drivers/mtd/nand/spi/core.c 		if (req->mode == MTD_OPS_AUTO_OOB)
req               258 drivers/mtd/nand/spi/core.c 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
req               260 drivers/mtd/nand/spi/core.c 						    req->ooboffs,
req               261 drivers/mtd/nand/spi/core.c 						    req->ooblen);
req               263 drivers/mtd/nand/spi/core.c 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
req               264 drivers/mtd/nand/spi/core.c 			       req->ooblen);
req               271 drivers/mtd/nand/spi/core.c 				     const struct nand_page_io_req *req)
req               290 drivers/mtd/nand/spi/core.c 	if (req->datalen)
req               291 drivers/mtd/nand/spi/core.c 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req               292 drivers/mtd/nand/spi/core.c 		       req->datalen);
req               294 drivers/mtd/nand/spi/core.c 	if (req->ooblen) {
req               295 drivers/mtd/nand/spi/core.c 		if (req->mode == MTD_OPS_AUTO_OOB)
req               296 drivers/mtd/nand/spi/core.c 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
req               298 drivers/mtd/nand/spi/core.c 						    req->ooboffs,
req               299 drivers/mtd/nand/spi/core.c 						    req->ooblen);
req               301 drivers/mtd/nand/spi/core.c 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req               302 drivers/mtd/nand/spi/core.c 			       req->ooblen);
req               305 drivers/mtd/nand/spi/core.c 	wdesc = spinand->dirmaps[req->pos.plane].wdesc;
req               324 drivers/mtd/nand/spi/core.c 			      const struct nand_page_io_req *req)
req               327 drivers/mtd/nand/spi/core.c 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
req               433 drivers/mtd/nand/spi/core.c 			     const struct nand_page_io_req *req,
req               439 drivers/mtd/nand/spi/core.c 	ret = spinand_load_page_op(spinand, req);
req               447 drivers/mtd/nand/spi/core.c 	ret = spinand_read_from_cache_op(spinand, req);
req               458 drivers/mtd/nand/spi/core.c 			      const struct nand_page_io_req *req)
req               467 drivers/mtd/nand/spi/core.c 	ret = spinand_write_to_cache_op(spinand, req);
req               471 drivers/mtd/nand/spi/core.c 	ret = spinand_program_op(spinand, req);
req               499 drivers/mtd/nand/spi/core.c 		ret = spinand_select_target(spinand, iter.req.pos.target);
req               507 drivers/mtd/nand/spi/core.c 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
req               520 drivers/mtd/nand/spi/core.c 		ops->retlen += iter.req.datalen;
req               521 drivers/mtd/nand/spi/core.c 		ops->oobretlen += iter.req.ooblen;
req               547 drivers/mtd/nand/spi/core.c 		ret = spinand_select_target(spinand, iter.req.pos.target);
req               555 drivers/mtd/nand/spi/core.c 		ret = spinand_write_page(spinand, &iter.req);
req               559 drivers/mtd/nand/spi/core.c 		ops->retlen += iter.req.datalen;
req               560 drivers/mtd/nand/spi/core.c 		ops->oobretlen += iter.req.ooblen;
req               572 drivers/mtd/nand/spi/core.c 	struct nand_page_io_req req = {
req               581 drivers/mtd/nand/spi/core.c 	spinand_read_page(spinand, &req, false);
req               607 drivers/mtd/nand/spi/core.c 	struct nand_page_io_req req = {
req               624 drivers/mtd/nand/spi/core.c 	return spinand_write_page(spinand, &req);
req               188 drivers/mtd/ubi/block.c 	struct request *req = blk_mq_rq_from_pdu(pdu);
req               189 drivers/mtd/ubi/block.c 	struct ubiblock *dev = req->q->queuedata;
req               191 drivers/mtd/ubi/block.c 	to_read = blk_rq_bytes(req);
req               192 drivers/mtd/ubi/block.c 	pos = blk_rq_pos(req) << 9;
req               296 drivers/mtd/ubi/block.c 	struct request *req = blk_mq_rq_from_pdu(pdu);
req               298 drivers/mtd/ubi/block.c 	blk_mq_start_request(req);
req               305 drivers/mtd/ubi/block.c 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
req               308 drivers/mtd/ubi/block.c 	rq_flush_dcache_pages(req);
req               310 drivers/mtd/ubi/block.c 	blk_mq_end_request(req, errno_to_blk_status(ret));
req               316 drivers/mtd/ubi/block.c 	struct request *req = bd->rq;
req               318 drivers/mtd/ubi/block.c 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
req               320 drivers/mtd/ubi/block.c 	switch (req_op(req)) {
req               332 drivers/mtd/ubi/block.c 		struct request *req, unsigned int hctx_idx,
req               335 drivers/mtd/ubi/block.c 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
req               431 drivers/mtd/ubi/cdev.c 		struct ubi_leb_change_req req;
req               433 drivers/mtd/ubi/cdev.c 		err = copy_from_user(&req, argp,
req               448 drivers/mtd/ubi/cdev.c 		if (!ubi_leb_valid(vol, req.lnum) ||
req               449 drivers/mtd/ubi/cdev.c 		    req.bytes < 0 || req.bytes > vol->usable_leb_size)
req               456 drivers/mtd/ubi/cdev.c 		err = ubi_start_leb_change(ubi, vol, &req);
req               457 drivers/mtd/ubi/cdev.c 		if (req.bytes == 0)
req               496 drivers/mtd/ubi/cdev.c 		struct ubi_map_req req;
req               498 drivers/mtd/ubi/cdev.c 		err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
req               503 drivers/mtd/ubi/cdev.c 		err = ubi_leb_map(desc, req.lnum);
req               538 drivers/mtd/ubi/cdev.c 		struct ubi_set_vol_prop_req req;
req               540 drivers/mtd/ubi/cdev.c 		err = copy_from_user(&req, argp,
req               546 drivers/mtd/ubi/cdev.c 		switch (req.property) {
req               549 drivers/mtd/ubi/cdev.c 			desc->vol->direct_writes = !!req.value;
req               594 drivers/mtd/ubi/cdev.c 			    const struct ubi_mkvol_req *req)
req               598 drivers/mtd/ubi/cdev.c 	if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
req               599 drivers/mtd/ubi/cdev.c 	    req->name_len < 0)
req               602 drivers/mtd/ubi/cdev.c 	if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
req               603 drivers/mtd/ubi/cdev.c 	    req->vol_id != UBI_VOL_NUM_AUTO)
req               606 drivers/mtd/ubi/cdev.c 	if (req->alignment == 0)
req               609 drivers/mtd/ubi/cdev.c 	if (req->bytes == 0)
req               612 drivers/mtd/ubi/cdev.c 	if (req->vol_type != UBI_DYNAMIC_VOLUME &&
req               613 drivers/mtd/ubi/cdev.c 	    req->vol_type != UBI_STATIC_VOLUME)
req               616 drivers/mtd/ubi/cdev.c 	if (req->flags & ~UBI_VOL_VALID_FLGS)
req               619 drivers/mtd/ubi/cdev.c 	if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG &&
req               620 drivers/mtd/ubi/cdev.c 	    req->vol_type != UBI_STATIC_VOLUME)
req               623 drivers/mtd/ubi/cdev.c 	if (req->alignment > ubi->leb_size)
req               626 drivers/mtd/ubi/cdev.c 	n = req->alignment & (ubi->min_io_size - 1);
req               627 drivers/mtd/ubi/cdev.c 	if (req->alignment != 1 && n)
req               630 drivers/mtd/ubi/cdev.c 	if (!req->name[0] || !req->name_len)
req               633 drivers/mtd/ubi/cdev.c 	if (req->name_len > UBI_VOL_NAME_MAX) {
req               638 drivers/mtd/ubi/cdev.c 	n = strnlen(req->name, req->name_len + 1);
req               639 drivers/mtd/ubi/cdev.c 	if (n != req->name_len)
req               646 drivers/mtd/ubi/cdev.c 	ubi_dump_mkvol_req(req);
req               658 drivers/mtd/ubi/cdev.c 			    const struct ubi_rsvol_req *req)
req               660 drivers/mtd/ubi/cdev.c 	if (req->bytes <= 0)
req               663 drivers/mtd/ubi/cdev.c 	if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
req               680 drivers/mtd/ubi/cdev.c 			  struct ubi_rnvol_req *req)
req               686 drivers/mtd/ubi/cdev.c 	if (req->count < 0 || req->count > UBI_MAX_RNVOL)
req               689 drivers/mtd/ubi/cdev.c 	if (req->count == 0)
req               693 drivers/mtd/ubi/cdev.c 	for (i = 0; i < req->count; i++) {
req               694 drivers/mtd/ubi/cdev.c 		if (req->ents[i].vol_id < 0 ||
req               695 drivers/mtd/ubi/cdev.c 		    req->ents[i].vol_id >= ubi->vtbl_slots)
req               697 drivers/mtd/ubi/cdev.c 		if (req->ents[i].name_len < 0)
req               699 drivers/mtd/ubi/cdev.c 		if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
req               701 drivers/mtd/ubi/cdev.c 		req->ents[i].name[req->ents[i].name_len] = '\0';
req               702 drivers/mtd/ubi/cdev.c 		n = strlen(req->ents[i].name);
req               703 drivers/mtd/ubi/cdev.c 		if (n != req->ents[i].name_len)
req               708 drivers/mtd/ubi/cdev.c 	for (i = 0; i < req->count - 1; i++) {
req               709 drivers/mtd/ubi/cdev.c 		for (n = i + 1; n < req->count; n++) {
req               710 drivers/mtd/ubi/cdev.c 			if (req->ents[i].vol_id == req->ents[n].vol_id) {
req               712 drivers/mtd/ubi/cdev.c 					req->ents[i].vol_id);
req               715 drivers/mtd/ubi/cdev.c 			if (!strcmp(req->ents[i].name, req->ents[n].name)) {
req               717 drivers/mtd/ubi/cdev.c 					req->ents[i].name);
req               725 drivers/mtd/ubi/cdev.c 	for (i = 0; i < req->count; i++) {
req               726 drivers/mtd/ubi/cdev.c 		int vol_id = req->ents[i].vol_id;
req               727 drivers/mtd/ubi/cdev.c 		int name_len = req->ents[i].name_len;
req               728 drivers/mtd/ubi/cdev.c 		const char *name = req->ents[i].name;
req               850 drivers/mtd/ubi/cdev.c 		struct ubi_mkvol_req req;
req               853 drivers/mtd/ubi/cdev.c 		err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
req               859 drivers/mtd/ubi/cdev.c 		err = verify_mkvol_req(ubi, &req);
req               864 drivers/mtd/ubi/cdev.c 		err = ubi_create_volume(ubi, &req);
req               869 drivers/mtd/ubi/cdev.c 		err = put_user(req.vol_id, (__user int32_t *)argp);
req               911 drivers/mtd/ubi/cdev.c 		struct ubi_rsvol_req req;
req               914 drivers/mtd/ubi/cdev.c 		err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
req               920 drivers/mtd/ubi/cdev.c 		err = verify_rsvol_req(ubi, &req);
req               924 drivers/mtd/ubi/cdev.c 		desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
req               930 drivers/mtd/ubi/cdev.c 		pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
req               943 drivers/mtd/ubi/cdev.c 		struct ubi_rnvol_req *req;
req               946 drivers/mtd/ubi/cdev.c 		req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
req               947 drivers/mtd/ubi/cdev.c 		if (!req) {
req               952 drivers/mtd/ubi/cdev.c 		err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
req               955 drivers/mtd/ubi/cdev.c 			kfree(req);
req               959 drivers/mtd/ubi/cdev.c 		err = rename_volumes(ubi, req);
req               960 drivers/mtd/ubi/cdev.c 		kfree(req);
req              1016 drivers/mtd/ubi/cdev.c 		struct ubi_attach_req req;
req              1020 drivers/mtd/ubi/cdev.c 		err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
req              1026 drivers/mtd/ubi/cdev.c 		if (req.mtd_num < 0 ||
req              1027 drivers/mtd/ubi/cdev.c 		    (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
req              1032 drivers/mtd/ubi/cdev.c 		mtd = get_mtd_device(NULL, req.mtd_num);
req              1043 drivers/mtd/ubi/cdev.c 		err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
req              1044 drivers/mtd/ubi/cdev.c 					 req.max_beb_per1024);
req               192 drivers/mtd/ubi/debug.c void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
req               197 drivers/mtd/ubi/debug.c 	pr_err("\tvol_id    %d\n",   req->vol_id);
req               198 drivers/mtd/ubi/debug.c 	pr_err("\talignment %d\n",   req->alignment);
req               199 drivers/mtd/ubi/debug.c 	pr_err("\tbytes     %lld\n", (long long)req->bytes);
req               200 drivers/mtd/ubi/debug.c 	pr_err("\tvol_type  %d\n",   req->vol_type);
req               201 drivers/mtd/ubi/debug.c 	pr_err("\tname_len  %d\n",   req->name_len);
req               203 drivers/mtd/ubi/debug.c 	memcpy(nm, req->name, 16);
req                47 drivers/mtd/ubi/debug.h void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
req               844 drivers/mtd/ubi/ubi.h int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
req               857 drivers/mtd/ubi/ubi.h 			 const struct ubi_leb_change_req *req);
req               169 drivers/mtd/ubi/upd.c 			 const struct ubi_leb_change_req *req)
req               174 drivers/mtd/ubi/upd.c 		vol->vol_id, req->lnum, req->bytes);
req               175 drivers/mtd/ubi/upd.c 	if (req->bytes == 0)
req               176 drivers/mtd/ubi/upd.c 		return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
req               178 drivers/mtd/ubi/upd.c 	vol->upd_bytes = req->bytes;
req               181 drivers/mtd/ubi/upd.c 	vol->ch_lnum = req->lnum;
req               183 drivers/mtd/ubi/upd.c 	vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
req               144 drivers/mtd/ubi/vmt.c int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
req               146 drivers/mtd/ubi/vmt.c 	int i, err, vol_id = req->vol_id;
req               164 drivers/mtd/ubi/vmt.c 	if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG)
req               182 drivers/mtd/ubi/vmt.c 		req->vol_id = vol_id;
req               186 drivers/mtd/ubi/vmt.c 		ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
req               187 drivers/mtd/ubi/vmt.c 		(int)req->vol_type, req->name);
req               199 drivers/mtd/ubi/vmt.c 		    ubi->volumes[i]->name_len == req->name_len &&
req               200 drivers/mtd/ubi/vmt.c 		    !strcmp(ubi->volumes[i]->name, req->name)) {
req               202 drivers/mtd/ubi/vmt.c 				req->name, i);
req               207 drivers/mtd/ubi/vmt.c 	vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
req               208 drivers/mtd/ubi/vmt.c 	vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
req               226 drivers/mtd/ubi/vmt.c 	vol->alignment = req->alignment;
req               228 drivers/mtd/ubi/vmt.c 	vol->vol_type  = req->vol_type;
req               229 drivers/mtd/ubi/vmt.c 	vol->name_len  = req->name_len;
req               230 drivers/mtd/ubi/vmt.c 	memcpy(vol->name, req->name, vol->name_len);
req               555 drivers/net/ethernet/8390/ax88796.c static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req               565 drivers/net/ethernet/8390/ax88796.c 	return phy_mii_ioctl(phy_dev, req, cmd);
req              3017 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
req              3023 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ);
req              3035 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		BIT_VEC64_SET_BIT(req, bin);
req              3050 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bool b_required = !!BIT_VEC64_TEST_BIT(req, i);
req               265 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h 	union vfpf_tlvs req;
req                55 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
req               226 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
req               234 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
req               241 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vfdev_info.vf_id = vf_id;
req               242 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vfdev_info.vf_os = 0;
req               243 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
req               245 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->resc_request.num_rxqs = rx_count;
req               246 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->resc_request.num_txqs = tx_count;
req               247 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->resc_request.num_sbs = bp->igu_sb_cnt;
req               248 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
req               249 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
req               250 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
req               253 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
req               256 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
req               260 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
req               262 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
req               265 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req,
req               266 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		      req->first_tlv.tl.length + sizeof(struct channel_tlv),
req               271 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               303 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->resc_request.num_txqs =
req               304 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				min(req->resc_request.num_txqs,
req               306 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->resc_request.num_rxqs =
req               307 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				min(req->resc_request.num_rxqs,
req               309 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->resc_request.num_sbs =
req               310 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				min(req->resc_request.num_sbs,
req               312 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->resc_request.num_mac_filters =
req               313 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				min(req->resc_request.num_mac_filters,
req               315 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->resc_request.num_vlan_filters =
req               316 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				min(req->resc_request.num_vlan_filters,
req               318 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->resc_request.num_mc_filters =
req               319 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				min(req->resc_request.num_mc_filters,
req               360 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               392 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               398 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
req               403 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
req               410 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_id = vf_id;
req               413 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               417 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               437 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               445 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
req               450 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
req               454 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
req               458 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->stats_addr = bp->fw_stats_data_mapping +
req               461 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->stats_stride = sizeof(struct per_queue_stats);
req               464 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               468 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               483 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               491 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
req               510 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
req               512 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_id = vf_id;
req               515 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               519 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               530 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               603 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
req               610 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
req               630 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_qid = fp_idx;
req               631 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
req               634 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.rcq_addr = fp->rx_comp_mapping;
req               635 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
req               636 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.rxq_addr = fp->rx_desc_mapping;
req               637 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.sge_addr = fp->rx_sge_mapping;
req               638 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.vf_sb = fp_idx;
req               639 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
req               640 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
req               641 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.mtu = bp->dev->mtu;
req               642 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.buf_sz = fp->rx_buf_size;
req               643 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
req               644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.tpa_agg_sz = tpa_agg_size;
req               645 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
req               646 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
req               648 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.flags = flags;
req               649 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.drop_flags = 0;
req               650 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
req               651 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.stat_id = -1; /* No stats at the moment */
req               654 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
req               655 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->txq.vf_sb = fp_idx;
req               656 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
req               657 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
req               658 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->txq.flags = flags;
req               659 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
req               662 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               666 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               679 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               686 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
req               691 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
req               692 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			sizeof(*req));
req               694 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_qid = qidx;
req               697 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               701 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               719 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               727 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
req               733 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
req               734 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			sizeof(*req));
req               736 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
req               737 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_qid = vf_qid;
req               738 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->n_mac_vlan_filters = 1;
req               740 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
req               742 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->filters[0].flags |= VFPF_Q_FILTER_SET;
req               748 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	memcpy(req->filters[0].mac, addr, ETH_ALEN);
req               751 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               755 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               775 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			memcpy(req->filters[0].mac, bp->dev->dev_addr,
req               792 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               802 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
req               806 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
req               807 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			sizeof(*req));
req               810 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               813 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
req               814 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
req               815 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
req               816 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rss_key_size = T_ETH_RSS_KEY;
req               817 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rss_result_mask = params->rss_result_mask;
req               821 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_MODE_DISABLED;
req               823 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_MODE_REGULAR;
req               825 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_SET_SRCH;
req               827 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_IPV4;
req               829 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_IPV4_TCP;
req               831 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_IPV4_UDP;
req               833 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_IPV6;
req               835 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_IPV6_TCP;
req               837 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rss_flags |= VFPF_RSS_IPV6_UDP;
req               839 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
req               842 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               861 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               869 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
req               880 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
req               881 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			sizeof(*req));
req               898 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
req               902 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->n_multicast = i;
req               903 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
req               904 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_qid = 0;
req               907 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               911 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               924 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               932 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
req               942 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
req               943 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			sizeof(*req));
req               945 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
req               946 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_qid = vf_qid;
req               947 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->n_mac_vlan_filters = 1;
req               949 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
req               952 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->filters[0].flags |= VFPF_Q_FILTER_SET;
req               963 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->filters[0].vlan_tag = vid;
req               966 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req               970 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req               985 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req               993 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
req               998 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
req               999 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			sizeof(*req));
req              1005 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
req              1010 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req              1011 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req              1012 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
req              1014 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
req              1018 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
req              1020 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
req              1021 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->vf_qid = 0;
req              1024 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
req              1028 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_dp_tlv_list(bp, req);
req              1039 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
req              1345 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
req              1386 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
req              1391 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
req              1401 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
req              1427 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
req              1470 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_init_tlv *init = &mbx->msg->req.init;
req              1521 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
req              1736 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
req              1921 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
req              1947 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	int qid = mbx->msg->req.q_op.vf_qid;
req              1983 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
req              2067 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
req              2172 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
req              2264 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 		mbx->first_tlv = mbx->msg->req.first_tlv;
req              4140 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct input *req = request;
req              4142 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->req_type = cpu_to_le16(req_type);
req              4143 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->cmpl_ring = cpu_to_le16(cmpl_ring);
req              4144 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->target_id = cpu_to_le16(target_id);
req              4145 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (bnxt_kong_hwrm_message(bp, req))
req              4146 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
req              4148 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
req              4181 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct input *req = msg;
req              4203 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (bnxt_hwrm_kong_chnl(bp, req)) {
req              4212 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	cp_ring_id = le16_to_cpu(req->cmpl_ring);
req              4215 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
req              4218 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
req              4231 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		memcpy(short_cmd_req, req, msg_len);
req              4236 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		short_input.req_type = req->req_type;
req              4297 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   le16_to_cpu(req->req_type));
req              4325 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   le16_to_cpu(req->req_type),
req              4326 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   le16_to_cpu(req->seq_id), len);
req              4344 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   le16_to_cpu(req->req_type),
req              4345 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   le16_to_cpu(req->seq_id), len,
req              4399 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_drv_rgtr_input req = {0};
req              4404 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req              4406 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables =
req              4426 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.async_event_fwd[i] |= cpu_to_le32(events[i]);
req              4428 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4434 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_drv_rgtr_input req = {0};
req              4438 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req              4440 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables =
req              4444 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req              4450 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(flags);
req              4451 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ver_maj_8b = DRV_VER_MAJ;
req              4452 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ver_min_8b = DRV_VER_MIN;
req              4453 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ver_upd_8b = DRV_VER_UPD;
req              4454 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
req              4455 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ver_min = cpu_to_le16(DRV_VER_MIN);
req              4456 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ver_upd = cpu_to_le16(DRV_VER_UPD);
req              4473 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
req              4475 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables |=
req              4480 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags |= cpu_to_le32(
req              4484 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4494 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_drv_unrgtr_input req = {0};
req              4496 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
req              4497 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4503 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_tunnel_dst_port_free_input req = {0};
req              4505 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
req              4506 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.tunnel_type = tunnel_type;
req              4510 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
req              4513 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
req              4519 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4530 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
req              4533 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
req              4535 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.tunnel_type = tunnel_type;
req              4536 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.tunnel_dst_port_val = port;
req              4539 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4564 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
req              4567 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
req              4568 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
req              4570 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
req              4571 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
req              4572 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.mask = cpu_to_le32(vnic->rx_mask);
req              4573 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4580 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_cfa_ntuple_filter_free_input req = {0};
req              4582 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
req              4583 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ntuple_filter_id = fltr->filter_id;
req              4584 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4609 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
req              4616 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req              4617 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
req              4621 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
req              4626 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
req              4627 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
req              4629 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ethertype = htons(ETH_P_IP);
req              4630 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
req              4631 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req              4632 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ip_protocol = keys->basic.ip_proto;
req              4637 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ethertype = htons(ETH_P_IPV6);
req              4638 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ip_addr_type =
req              4640 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		*(struct in6_addr *)&req.src_ipaddr[0] =
req              4642 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		*(struct in6_addr *)&req.dst_ipaddr[0] =
req              4645 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
req              4646 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
req              4649 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.src_ipaddr[0] = keys->addrs.v4addrs.src;
req              4650 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
req              4651 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
req              4652 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
req              4655 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
req              4656 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.tunnel_type =
req              4660 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.src_port = keys->ports.src;
req              4661 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.src_port_mask = cpu_to_be16(0xffff);
req              4662 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.dst_port = keys->ports.dst;
req              4663 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.dst_port_mask = cpu_to_be16(0xffff);
req              4666 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4668 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
req              4680 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_cfa_l2_filter_alloc_input req = {0};
req              4683 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
req              4684 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
req              4686 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags |=
req              4688 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
req              4689 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables =
req              4693 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
req              4694 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_addr_mask[0] = 0xff;
req              4695 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_addr_mask[1] = 0xff;
req              4696 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_addr_mask[2] = 0xff;
req              4697 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_addr_mask[3] = 0xff;
req              4698 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_addr_mask[4] = 0xff;
req              4699 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.l2_addr_mask[5] = 0xff;
req              4702 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4721 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			struct hwrm_cfa_l2_filter_free_input req = {0};
req              4723 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			bnxt_hwrm_cmd_hdr_init(bp, &req,
req              4726 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.l2_filter_id = vnic->fw_l2_filter_id[j];
req              4728 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = _hwrm_send_message(bp, &req, sizeof(req),
req              4742 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_tpa_cfg_input req = {0};
req              4747 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
req              4761 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags = cpu_to_le32(flags);
req              4763 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables =
req              4787 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.max_agg_segs = cpu_to_le16(segs);
req              4788 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.max_aggs = cpu_to_le16(max_aggs);
req              4790 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.min_agg_len = cpu_to_le32(512);
req              4792 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
req              4794 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4835 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_rss_cfg_input req = {0};
req              4841 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
req              4843 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req              4844 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
req              4861 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req              4862 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.hash_key_tbl_addr =
req              4865 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
req              4866 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4874 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_rss_cfg_input req = {0};
req              4876 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
req              4877 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
req              4879 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4882 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req              4883 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
req              4884 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req              4885 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
req              4891 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_table_pair_index = i;
req              4892 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
req              4907 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4917 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
req              4919 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
req              4920 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
req              4923 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables =
req              4927 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
req              4928 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
req              4929 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
req              4930 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4936 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
req              4938 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
req              4939 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.rss_cos_lb_ctx_id =
req              4942 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4964 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
req              4968 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
req              4972 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              4992 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_cfg_input req = {0};
req              4995 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
req              5000 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.default_rx_ring_id =
req              5002 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.default_cmpl_ring_id =
req              5004 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables =
req              5009 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
req              5012 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
req              5013 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
req              5016 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rss_rule =
req              5018 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
req              5020 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
req              5022 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rss_rule = cpu_to_le16(0xffff);
req              5027 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
req              5028 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
req              5030 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cos_rule = cpu_to_le16(0xffff);
req              5041 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req              5042 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.lb_rule = cpu_to_le16(0xffff);
req              5044 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
req              5047 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
req              5053 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
req              5055 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
req              5057 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5065 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct hwrm_vnic_free_input req = {0};
req              5067 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
req              5068 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.vnic_id =
req              5071 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5091 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_alloc_input req = {0};
req              5113 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
req              5115 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
req              5118 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5128 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_vnic_qcaps_input req = {0};
req              5136 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
req              5138 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5167 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct hwrm_ring_grp_alloc_input req = {0};
req              5172 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
req              5174 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
req              5175 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
req              5176 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
req              5177 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
req              5179 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req              5195 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ring_grp_free_input req = {0};
req              5200 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
req              5206 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_group_id =
req              5209 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req              5222 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ring_alloc_input req = {0};
req              5228 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
req              5230 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = 0;
req              5232 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
req              5234 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.page_size = BNXT_PAGE_SHIFT;
req              5235 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.page_tbl_depth = 1;
req              5237 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
req              5239 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fbo = 0;
req              5241 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.logical_id = cpu_to_le16(map_index);
req              5249 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
req              5252 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
req              5253 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
req              5254 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req              5255 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.queue_id = cpu_to_le16(ring->queue_id);
req              5259 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req              5260 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
req              5266 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
req              5267 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req              5268 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.enables |= cpu_to_le32(
req              5272 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.flags = cpu_to_le16(flags);
req              5277 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
req              5280 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
req              5281 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
req              5282 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req              5283 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.enables |= cpu_to_le32(
req              5287 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req              5289 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
req              5292 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req              5293 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
req              5297 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
req              5298 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.cq_handle = cpu_to_le64(ring->handle);
req              5299 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.enables |= cpu_to_le32(
req              5302 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
req              5306 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req              5307 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
req              5309 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
req              5318 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5337 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct hwrm_func_cfg_input req = {0};
req              5339 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req              5340 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.fid = cpu_to_le16(0xffff);
req              5341 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
req              5342 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.async_event_cr = cpu_to_le16(idx);
req              5343 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5345 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct hwrm_func_vf_cfg_input req = {0};
req              5347 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
req              5348 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.enables =
req              5350 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.async_event_cr = cpu_to_le16(idx);
req              5351 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5525 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ring_free_input req = {0};
req              5532 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
req              5533 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ring_type = ring_type;
req              5534 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ring_id = cpu_to_le16(ring->fw_ring_id);
req              5537 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5657 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_qcfg_input req = {0};
req              5663 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req              5664 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(0xffff);
req              5666 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5710 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_qcfg_input req = {0};
req              5716 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req              5717 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(fid);
req              5718 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5728 drivers/net/ethernet/broadcom/bnxt/bnxt.c __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req              5734 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
req              5735 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->fid = cpu_to_le16(0xffff);
req              5737 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_tx_rings = cpu_to_le16(tx_rings);
req              5756 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_rx_rings = cpu_to_le16(rx_rings);
req              5758 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req              5759 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->num_msix = cpu_to_le16(cp_rings);
req              5760 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->num_rsscos_ctxs =
req              5763 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->num_cmpl_rings = cpu_to_le16(cp_rings);
req              5764 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req              5765 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->num_rsscos_ctxs = cpu_to_le16(1);
req              5768 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				req->num_rsscos_ctxs =
req              5771 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_stat_ctxs = cpu_to_le16(stats);
req              5772 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_vnics = cpu_to_le16(vnics);
req              5774 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->enables = cpu_to_le32(enables);
req              5779 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			     struct hwrm_func_vf_cfg_input *req, int tx_rings,
req              5785 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
req              5802 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req              5803 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_tx_rings = cpu_to_le16(tx_rings);
req              5804 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_rx_rings = cpu_to_le16(rx_rings);
req              5806 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req              5807 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
req              5809 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_cmpl_rings = cpu_to_le16(cp_rings);
req              5810 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req              5811 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
req              5813 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_stat_ctxs = cpu_to_le16(stats);
req              5814 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_vnics = cpu_to_le16(vnics);
req              5816 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->enables = cpu_to_le32(enables);
req              5823 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_cfg_input req = {0};
req              5826 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
req              5828 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!req.enables)
req              5831 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              5846 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_vf_cfg_input req = {0};
req              5854 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
req              5856 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6019 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_vf_cfg_input req = {0};
req              6026 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
req              6037 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(flags);
req              6038 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6046 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_cfg_input req = {0};
req              6050 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
req              6065 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(flags);
req              6066 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6090 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ring_aggint_qcaps_input req = {0};
req              6106 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
req              6108 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6140 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
req              6152 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_cmpl_aggr_int = cpu_to_le16(val);
req              6155 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
req              6159 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
req              6163 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->int_lat_tmr_max = cpu_to_le16(tmr);
req              6169 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->int_lat_tmr_min = cpu_to_le16(val);
req              6170 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
req              6175 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
req              6182 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
req              6183 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->enables |=
req              6192 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->flags = cpu_to_le16(flags);
req              6193 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
req              6200 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
req              6209 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
req              6211 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
req              6212 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags =
req              6217 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.int_lat_tmr_min = cpu_to_le16(tmr);
req              6218 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
req              6219 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6254 drivers/net/ethernet/broadcom/bnxt/bnxt.c 							   req_tx = {0}, *req;
req              6270 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req = &req_rx;
req              6273 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req = &req_tx;
req              6277 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->ring_id = cpu_to_le16(ring_id);
req              6279 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = _hwrm_send_message(bp, req, sizeof(*req),
req              6288 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req = &req_tx;
req              6290 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->ring_id = cpu_to_le16(ring_id);
req              6291 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = _hwrm_send_message(bp, req, sizeof(*req),
req              6309 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_stat_ctx_free_input req = {0};
req              6317 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
req              6325 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
req              6327 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = _hwrm_send_message(bp, &req, sizeof(req),
req              6340 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_stat_ctx_alloc_input req = {0};
req              6346 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
req              6348 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
req              6349 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
req              6356 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
req              6358 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req              6373 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_qcfg_input req = {0};
req              6378 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req              6379 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(0xffff);
req              6381 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6430 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_backing_store_qcaps_input req = {0};
req              6438 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
req              6440 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6531 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_backing_store_cfg_input req = {0};
req              6544 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
req              6545 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(enables);
req              6549 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
req              6550 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
req              6551 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
req              6552 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
req              6554 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.qpc_pg_size_qpc_lvl,
req              6555 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.qpc_page_dir);
req              6559 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
req              6560 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
req              6561 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
req              6563 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.srq_pg_size_srq_lvl,
req              6564 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.srq_page_dir);
req              6568 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
req              6569 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
req              6570 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
req              6571 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
req              6572 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.cq_page_dir);
req              6576 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.vnic_num_vnic_entries =
req              6578 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.vnic_num_ring_table_entries =
req              6580 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
req              6582 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.vnic_pg_size_vnic_lvl,
req              6583 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.vnic_page_dir);
req              6587 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
req              6588 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
req              6590 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.stat_pg_size_stat_lvl,
req              6591 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.stat_page_dir);
req              6595 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
req              6599 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
req              6601 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.mrav_pg_size_mrav_lvl,
req              6602 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.mrav_page_dir);
req              6606 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
req              6607 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
req              6609 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.tim_pg_size_tim_lvl,
req              6610 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      &req.tim_page_dir);
req              6612 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	for (i = 0, num_entries = &req.tqm_sp_num_entries,
req              6613 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	     pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
req              6614 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	     pg_dir = &req.tqm_sp_page_dir,
req              6620 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
req              6625 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(flags);
req              6626 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              6878 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_resource_qcaps_input req = {0};
req              6882 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
req              6883 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(0xffff);
req              6886 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
req              6935 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_qcaps_input req = {0};
req              6940 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req              6941 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(0xffff);
req              6944 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7036 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
req              7045 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
req              7048 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7093 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_error_recovery_qcfg_input req = {0};
req              7099 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
req              7101 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7153 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_reset_input req = {0};
req              7155 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
req              7156 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = 0;
req              7158 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
req              7164 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_queue_qportcfg_input req = {0};
req              7169 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
req              7172 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7212 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_ver_get_input req = {0};
req              7215 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req              7216 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req              7217 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
req              7218 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
req              7220 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
req              7304 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_fw_set_time_input req = {0};
req              7313 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
req              7314 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.year = cpu_to_le16(1900 + tm.tm_year);
req              7315 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.month = 1 + tm.tm_mon;
req              7316 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.day = tm.tm_mday;
req              7317 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.hour = tm.tm_hour;
req              7318 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.minute = tm.tm_min;
req              7319 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.second = tm.tm_sec;
req              7320 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7327 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_qstats_input req = {0};
req              7332 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
req              7333 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(pf->port_id);
req              7334 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
req              7335 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
req              7336 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7344 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_qstats_ext_input req = {0};
req              7352 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
req              7353 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(pf->port_id);
req              7354 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req              7355 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
req              7358 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.tx_stat_size = cpu_to_le16(tx_stat_size);
req              7359 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
req              7361 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7412 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_pcie_qstats_input req = {0};
req              7417 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
req              7418 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
req              7419 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
req              7420 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7498 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_cfg_input req = {0};
req              7501 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req              7502 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(0xffff);
req              7503 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
req              7505 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
req              7507 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
req              7510 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              7516 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_cfg_input req = {0};
req              7522 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req              7523 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fid = cpu_to_le16(0xffff);
req              7524 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
req              7525 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
req              7527 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
req              7529 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8408 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_qcaps_input req = {0};
req              8418 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
req              8421 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8455 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_qcfg_input req = {0};
req              8460 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
req              8463 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8599 drivers/net/ethernet/broadcom/bnxt/bnxt.c bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
req              8603 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->auto_pause =
req              8606 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
req              8608 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
req              8609 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->enables |=
req              8613 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
req              8615 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
req              8616 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->enables |=
req              8619 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->auto_pause = req->force_pause;
req              8620 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req->enables |= cpu_to_le32(
req              8627 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				      struct hwrm_port_phy_cfg_input *req)
req              8634 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->auto_mode |=
req              8637 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->enables |= cpu_to_le32(
req              8639 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->auto_link_speed_mask = cpu_to_le16(advertising);
req              8641 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
req              8642 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->flags |=
req              8645 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->force_link_speed = cpu_to_le16(fw_link_speed);
req              8646 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
req              8650 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
req              8655 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_cfg_input req = {0};
req              8658 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
req              8659 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_set_pause_common(bp, &req);
req              8663 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_set_link_common(bp, &req);
req              8666 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8684 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			      struct hwrm_port_phy_cfg_input *req)
req              8697 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->flags |= cpu_to_le32(flags);
req              8699 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
req              8700 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
req              8702 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
req              8708 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_cfg_input req = {0};
req              8710 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
req              8712 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_set_pause_common(bp, &req);
req              8714 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_set_link_common(bp, &req);
req              8717 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_set_eee(bp, &req);
req              8718 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8723 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_cfg_input req = {0};
req              8731 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
req              8732 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
req              8733 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8741 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_func_drv_if_change_input req = {0};
req              8749 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
req              8751 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
req              8753 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8813 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_led_qcaps_input req = {0};
req              8821 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
req              8822 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(pf->port_id);
req              8824 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8852 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_wol_filter_alloc_input req = {0};
req              8856 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
req              8857 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(bp->pf.port_id);
req              8858 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
req              8859 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
req              8860 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
req              8862 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8871 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_wol_filter_free_input req = {0};
req              8874 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
req              8875 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(bp->pf.port_id);
req              8876 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
req              8877 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.wol_filter_id = bp->wol_filter_id;
req              8878 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8884 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_wol_filter_qcfg_input req = {0};
req              8889 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
req              8890 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(bp->pf.port_id);
req              8891 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.handle = cpu_to_le16(handle);
req              8893 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              8925 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_temp_monitor_query_input req = {0};
req              8931 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
req              8933 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
req              9341 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_mdio_read_input req = {0};
req              9347 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
req              9348 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(bp->pf.port_id);
req              9349 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.phy_addr = phy_addr;
req              9350 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.reg_addr = cpu_to_le16(reg & 0x1f);
req              9352 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cl45_mdio = 1;
req              9353 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.phy_addr = mdio_phy_id_prtad(phy_addr);
req              9354 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.dev_addr = mdio_phy_id_devad(phy_addr);
req              9355 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.reg_addr = cpu_to_le16(reg);
req              9359 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              9369 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_port_phy_mdio_write_input req = {0};
req              9374 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
req              9375 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.port_id = cpu_to_le16(bp->pf.port_id);
req              9376 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.phy_addr = phy_addr;
req              9377 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.reg_addr = cpu_to_le16(reg & 0x1f);
req              9379 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.cl45_mdio = 1;
req              9380 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.phy_addr = mdio_phy_id_prtad(phy_addr);
req              9381 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.dev_addr = mdio_phy_id_devad(phy_addr);
req              9382 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.reg_addr = cpu_to_le16(reg);
req              9384 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.reg_data = cpu_to_le16(val);
req              9386 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              9624 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct hwrm_cfa_l2_filter_free_input req = {0};
req              9626 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
req              9629 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.l2_filter_id = vnic->fw_l2_filter_id[i];
req              9631 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req              9867 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct hwrm_dbg_ring_info_get_input req = {0};
req              9870 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
req              9871 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ring_type = ring_type;
req              9872 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.fw_ring_id = cpu_to_le32(ring_id);
req              9874 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              10632 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct hwrm_fw_reset_input req = {0};
req              10635 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
req              10636 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
req              10637 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req              10638 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
req              10639 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
req              10640 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              1926 drivers/net/ethernet/broadcom/bnxt/bnxt.h static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
req              1929 drivers/net/ethernet/broadcom/bnxt/bnxt.h 		bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
req              1932 drivers/net/ethernet/broadcom/bnxt/bnxt.h static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
req              1935 drivers/net/ethernet/broadcom/bnxt/bnxt.h 		req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
req              1938 drivers/net/ethernet/broadcom/bnxt/bnxt.h static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
req              1940 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
req                41 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_pri2cos_cfg_input req = {0};
req                45 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
req                46 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
req                49 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	pri2cos = &req.pri0_cos_queue_id;
req                53 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 		req.enables |= cpu_to_le32(
req                59 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req                66 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
req                69 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
req                70 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
req                73 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req                94 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_cos2bw_cfg_input req = {0};
req                99 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
req               103 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 		req.enables |= cpu_to_le32(
req               124 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 		data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
req               127 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 			req.queue_id0 = cos2bw.queue_id;
req               128 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 			req.unused_0 = 0;
req               131 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               138 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_cos2bw_qcfg_input req = {0};
req               143 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
req               146 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               234 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_pfcenable_cfg_input req = {0};
req               272 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req               273 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	req.flags = cpu_to_le32(pri_mask);
req               274 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               284 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_pfcenable_qcfg_input req = {0};
req               288 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
req               291 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               389 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_dscp_qcaps_input req = {0};
req               396 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
req               398 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               412 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	struct hwrm_queue_dscp2pri_cfg_input req = {0};
req               420 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
req               426 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	req.src_data_addr = cpu_to_le64(mapping);
req               433 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	req.entry_cnt = cpu_to_le16(1);
req               434 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               295 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	struct hwrm_nvm_get_variable_input *req = msg;
req               325 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	req->dest_data_addr = cpu_to_le64(data_dma_addr);
req               326 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
req               327 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	req->option_num = cpu_to_le16(nvm_param.offset);
req               328 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	req->index_0 = cpu_to_le16(idx);
req               330 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 		req->dimensions = cpu_to_le16(1);
req               332 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
req               360 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	struct hwrm_nvm_get_variable_input req = {0};
req               364 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
req               365 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
req               376 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	struct hwrm_nvm_set_variable_input req = {0};
req               379 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
req               384 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 	return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
req              1723 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_nvm_write_input req = {0};
req              1727 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
req              1729 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_type = cpu_to_le16(dir_type);
req              1730 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_ordinal = cpu_to_le16(dir_ordinal);
req              1731 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_ext = cpu_to_le16(dir_ext);
req              1732 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_attr = cpu_to_le16(dir_attr);
req              1733 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_data_length = cpu_to_le32(data_len);
req              1743 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.host_src_addr = cpu_to_le64(dma_handle);
req              1745 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
req              1756 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_fw_reset_input req = {0};
req              1760 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
req              1768 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
req              1770 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
req              1774 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
req              1776 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
req              1780 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.embedded_proc_type =
req              1785 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
req              1788 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req              1789 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
req              1792 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
req              1798 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2118 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_nvm_get_dir_info_input req = {0};
req              2121 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
req              2124 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2155 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_nvm_get_dir_entries_input req = {0};
req              2178 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
req              2179 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.host_dest_addr = cpu_to_le64(dma_handle);
req              2180 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2194 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_nvm_read_input req = {0};
req              2206 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
req              2207 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.host_dest_addr = cpu_to_le64(dma_handle);
req              2208 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_idx = cpu_to_le16(index);
req              2209 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.offset = cpu_to_le32(offset);
req              2210 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.len = cpu_to_le32(length);
req              2212 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2225 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_nvm_find_dir_entry_input req = {0};
req              2228 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
req              2229 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.enables = 0;
req              2230 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_idx = 0;
req              2231 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_type = cpu_to_le16(type);
req              2232 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_ordinal = cpu_to_le16(ordinal);
req              2233 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_ext = cpu_to_le16(ext);
req              2234 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
req              2236 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2341 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_nvm_erase_dir_entry_input req = {0};
req              2343 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
req              2344 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.dir_idx = cpu_to_le16(index);
req              2345 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2467 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_port_phy_i2c_read_input req = {0};
req              2471 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
req              2472 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.i2c_slave_addr = i2c_addr;
req              2473 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.page_number = cpu_to_le16(page_number);
req              2474 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.port_id = cpu_to_le16(bp->pf.port_id);
req              2480 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.page_offset = cpu_to_le16(start_addr + byte_offset);
req              2481 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.data_length = xfer_size;
req              2482 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.enables = cpu_to_le32(start_addr + byte_offset ?
req              2485 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req              2600 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_port_led_cfg_input req = {0};
req              2620 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
req              2621 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.port_id = cpu_to_le16(pf->port_id);
req              2622 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.num_leds = bp->num_leds;
req              2623 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
req              2625 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.enables |= BNXT_LED_DFLT_ENABLES(i);
req              2632 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2638 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_selftest_irq_input req = {0};
req              2640 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
req              2641 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2661 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_port_mac_cfg_input req = {0};
req              2663 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
req              2665 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
req              2667 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
req              2669 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
req              2670 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2676 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_port_phy_qcaps_input req = {0};
req              2679 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
req              2681 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2690 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 				    struct hwrm_port_phy_cfg_input *req)
req              2716 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req->force_link_speed = cpu_to_le16(fw_speed);
req              2717 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
req              2719 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
req              2720 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req->flags = 0;
req              2721 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req->force_link_speed = cpu_to_le16(0);
req              2727 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_port_phy_cfg_input req = {0};
req              2729 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
req              2732 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		bnxt_disable_an_for_lpbk(bp, &req);
req              2734 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
req              2736 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
req              2738 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
req              2740 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
req              2741 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              2860 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_selftest_exec_input req = {0};
req              2863 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
req              2866 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.flags = test_mask;
req              2867 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
req              3093 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_dbg_coredump_list_input req = {0};
req              3097 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
req              3104 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
req              3116 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_dbg_coredump_initiate_input req = {0};
req              3118 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
req              3119 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.component_id = cpu_to_le16(component_id);
req              3120 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.segment_id = cpu_to_le16(segment_id);
req              3122 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
req              3129 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_dbg_coredump_retrieve_input req = {0};
req              3133 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
req              3134 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.component_id = cpu_to_le16(component_id);
req              3135 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	req.segment_id = cpu_to_le16(segment_id);
req              3148 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
req              3367 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct hwrm_selftest_qlist_input req = {0};
req              3379 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
req              3381 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req                28 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_fwd_async_event_cmpl_input req = {0};
req                32 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
req                34 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
req                37 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.encap_async_event_target_id = cpu_to_le16(0xffff);
req                38 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
req                42 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req                68 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req                95 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req                96 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req                97 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.flags = cpu_to_le32(func_flags);
req                98 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               111 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_qcfg_input req = {0};
req               114 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req               115 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req               117 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               138 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req               144 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req               145 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req               147 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
req               149 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
req               150 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               211 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req               229 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req               230 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req               231 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
req               232 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
req               233 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               239 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req               266 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req               267 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req               268 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.dflt_vlan = cpu_to_le16(vlan_tag);
req               269 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
req               270 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               279 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req               304 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req               305 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req               306 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req               307 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_bw = cpu_to_le32(max_tx_rate);
req               308 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
req               309 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.min_bw = cpu_to_le32(min_tx_rate);
req               310 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               368 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_vf_resc_free_input req = {0};
req               370 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
req               374 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.vf_id = cpu_to_le16(i);
req               375 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req               453 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_buf_rgtr_input req = {0};
req               455 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
req               457 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
req               458 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
req               459 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
req               460 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
req               461 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
req               462 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
req               463 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
req               465 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               471 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req               475 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req               476 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(vf->fw_fid);
req               479 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
req               480 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
req               483 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
req               484 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.dflt_vlan = cpu_to_le16(vf->vlan);
req               487 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req               488 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.max_bw = cpu_to_le32(vf->max_tx_rate);
req               490 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
req               491 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_bw = cpu_to_le32(vf->min_tx_rate);
req               495 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
req               497 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               505 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_vf_resource_cfg_input req = {0};
req               513 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
req               531 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req               532 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
req               535 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_rsscos_ctx = cpu_to_le16(min);
req               539 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_cmpl_rings = cpu_to_le16(min);
req               540 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_tx_rings = cpu_to_le16(min);
req               541 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_rx_rings = cpu_to_le16(min);
req               542 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_l2_ctxs = cpu_to_le16(min);
req               543 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_vnics = cpu_to_le16(min);
req               544 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_stat_ctx = cpu_to_le16(min);
req               546 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 			req.min_hw_ring_grps = cpu_to_le16(min);
req               555 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req               556 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
req               557 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
req               558 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req               559 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_vnics = cpu_to_le16(vf_vnics);
req               560 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req               561 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req               563 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req               564 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
req               565 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
req               566 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req               567 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_vnics = cpu_to_le16(vf_vnics);
req               568 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req               569 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req               571 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.max_msix = cpu_to_le16(vf_msix / num_vfs);
req               578 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
req               579 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req               590 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
req               591 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
req               592 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
req               594 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
req               596 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
req               597 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
req               614 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_cfg_input req = {0};
req               619 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req               635 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
req               647 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.mru = cpu_to_le16(mtu);
req               648 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.mtu = cpu_to_le16(mtu);
req               650 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_rsscos_ctxs = cpu_to_le16(1);
req               651 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
req               652 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
req               653 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
req               654 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req               655 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_l2_ctxs = cpu_to_le16(4);
req               657 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_vnics = cpu_to_le16(vf_vnics);
req               659 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
req               665 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		req.fid = cpu_to_le16(pf->first_vf_id + i);
req               666 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		rc = _hwrm_send_message(bp, &req, sizeof(req),
req               671 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		pf->vf[i].fw_fid = le16_to_cpu(req.fid);
req               898 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_fwd_resp_input req = {0};
req               903 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
req               906 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.target_id = cpu_to_le16(vf->fw_fid);
req               907 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req               908 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.encap_resp_len = cpu_to_le16(msg_size);
req               909 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.encap_resp_addr = encap_resp_addr;
req               910 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.encap_resp_cmpl_ring = encap_resp_cpr;
req               911 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	memcpy(req.encap_resp, encap_resp, msg_size);
req               913 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               923 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_reject_fwd_resp_input req = {0};
req               928 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
req               930 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.target_id = cpu_to_le16(vf->fw_fid);
req               931 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req               932 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
req               934 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               944 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_exec_fwd_resp_input req = {0};
req               949 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
req               951 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.target_id = cpu_to_le16(vf->fw_fid);
req               952 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req               953 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
req               955 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               964 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_vf_cfg_input *req =
req               970 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
req               973 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		if (is_valid_ether_addr(req->dflt_mac_addr) &&
req               975 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
req               976 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
req               987 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_cfa_l2_filter_alloc_input *req =
req               991 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	if (!is_valid_ether_addr((const u8 *)req->l2_addr))
req              1002 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
req              1005 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
req              1122 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_qcaps_input req = {0};
req              1125 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req              1126 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.fid = cpu_to_le16(0xffff);
req              1129 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
req              1151 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	struct hwrm_func_vf_cfg_input req = {0};
req              1162 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
req              1163 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
req              1164 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
req              1165 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               309 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_flow_free_input req = { 0 };
req               312 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
req               314 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.ext_flow_handle = flow_node->ext_flow_handle;
req               316 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.flow_handle = flow_node->flow_handle;
req               318 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               394 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_flow_alloc_input req = { 0 };
req               399 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
req               401 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.src_fid = cpu_to_le16(flow->src_fid);
req               402 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.ref_flow_handle = ref_flow_handle;
req               406 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.tunnel_handle = tunnel_handle;
req               411 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.ethertype = flow->l2_key.ether_type;
req               412 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.ip_proto = flow->l4_key.ip_proto;
req               415 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
req               416 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
req               425 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
req               438 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
req               439 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.ip_dst_mask_len =
req               441 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
req               442 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.ip_src_mask_len =
req               445 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
req               446 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			       sizeof(req.ip_dst));
req               447 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.ip_dst_mask_len =
req               449 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
req               450 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			       sizeof(req.ip_src));
req               451 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.ip_src_mask_len =
req               457 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_src_port = flow->l4_key.ports.sport;
req               458 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_src_port_mask = flow->l4_mask.ports.sport;
req               459 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_dst_port = flow->l4_key.ports.dport;
req               460 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_dst_port_mask = flow->l4_mask.ports.dport;
req               463 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_src_port = htons(flow->l4_key.icmp.type);
req               464 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
req               465 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_dst_port = htons(flow->l4_key.icmp.code);
req               466 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
req               468 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.flags = cpu_to_le16(flow_flags);
req               475 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.dst_fid = cpu_to_le16(actions->dst_fid);
req               480 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
req               481 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
req               482 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
req               483 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
req               489 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			req.l2_rewrite_vlan_tpid = 0;
req               490 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
req               491 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
req               494 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.action_flags = cpu_to_le16(action_flags);
req               497 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               499 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
req               525 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
req               531 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
req               533 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
req               536 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
req               537 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
req               542 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
req               547 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		ether_addr_copy(req.dst_macaddr, l2_info->dmac);
req               551 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.t_ivlan_vid = l2_info->inner_vlan_tci;
req               555 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.ethertype = htons(ETH_P_IP);
req               561 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req               562 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
req               563 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.src_ipaddr[0] = tun_key->u.ipv4.src;
req               568 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		req.dst_port = tun_key->tp_dst;
req               574 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
req               575 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.enables = cpu_to_le32(enables);
req               578 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               580 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
req               593 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_decap_filter_free_input req = { 0 };
req               596 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
req               597 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.decap_filter_id = decap_filter_handle;
req               599 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               611 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_encap_record_alloc_input req = { 0 };
req               614 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 			(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
req               619 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
req               621 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
req               643 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req               645 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
req               658 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_encap_record_free_input req = { 0 };
req               661 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
req               662 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.encap_record_id = encap_record_handle;
req               664 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              1409 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct hwrm_cfa_flow_stats_input req = { 0 };
req              1411 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	__le16 *req_flow_handles = &req.flow_handle_0;
req              1412 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	__le32 *req_flow_ids = &req.flow_id_0;
req              1415 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
req              1416 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	req.num_flows = cpu_to_le16(num_flows);
req              1425 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              1430 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
req               230 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c 	struct input *req;
req               237 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c 	req = fw_msg->msg;
req               238 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c 	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
req                31 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	struct hwrm_cfa_vfr_alloc_input req = { 0 };
req                34 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
req                35 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	req.vf_id = cpu_to_le16(vf_idx);
req                36 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	sprintf(req.vfr_name, "vfr%d", vf_idx);
req                39 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req                55 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	struct hwrm_cfa_vfr_free_input req = { 0 };
req                58 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
req                59 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	sprintf(req.vfr_name, "vfr%d", vf_idx);
req                61 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req                71 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	struct hwrm_func_qcfg_input req = {0};
req                75 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req                76 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
req                80 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req              1944 drivers/net/ethernet/broadcom/cnic.c 	struct iscsi_kwqe_conn_update *req =
req              1948 drivers/net/ethernet/broadcom/cnic.c 	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
req              1961 drivers/net/ethernet/broadcom/cnic.c 			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
req              1994 drivers/net/ethernet/broadcom/cnic.c 	struct iscsi_kwqe_conn_destroy *req =
req              1996 drivers/net/ethernet/broadcom/cnic.c 	u32 l5_cid = req->reserved0;
req              2031 drivers/net/ethernet/broadcom/cnic.c 	kcqe.iscsi_conn_context_id = req->context_id;
req              2204 drivers/net/ethernet/broadcom/cnic.c 	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
req              2210 drivers/net/ethernet/broadcom/cnic.c 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
req              2216 drivers/net/ethernet/broadcom/cnic.c 	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
req              2222 drivers/net/ethernet/broadcom/cnic.c 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
req              2227 drivers/net/ethernet/broadcom/cnic.c 	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
req              2232 drivers/net/ethernet/broadcom/cnic.c 	kcqe.pg_host_opaque = req->host_opaque;
req              2233 drivers/net/ethernet/broadcom/cnic.c 	kcqe.pg_cid = req->host_opaque;
req              2242 drivers/net/ethernet/broadcom/cnic.c 	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
req              2247 drivers/net/ethernet/broadcom/cnic.c 	kcqe.pg_host_opaque = req->pg_host_opaque;
req              2248 drivers/net/ethernet/broadcom/cnic.c 	kcqe.pg_cid = req->pg_cid;
req              2257 drivers/net/ethernet/broadcom/cnic.c 	struct fcoe_kwqe_stat *req;
req              2265 drivers/net/ethernet/broadcom/cnic.c 	req = (struct fcoe_kwqe_stat *) kwqe;
req              2273 drivers/net/ethernet/broadcom/cnic.c 	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
req              2434 drivers/net/ethernet/broadcom/cnic.c 	struct fcoe_kwqe_conn_enable_disable *req;
req              2441 drivers/net/ethernet/broadcom/cnic.c 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
req              2442 drivers/net/ethernet/broadcom/cnic.c 	cid = req->context_id;
req              2443 drivers/net/ethernet/broadcom/cnic.c 	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
req              2454 drivers/net/ethernet/broadcom/cnic.c 	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
req              2462 drivers/net/ethernet/broadcom/cnic.c 	struct fcoe_kwqe_conn_enable_disable *req;
req              2469 drivers/net/ethernet/broadcom/cnic.c 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
req              2470 drivers/net/ethernet/broadcom/cnic.c 	cid = req->context_id;
req              2471 drivers/net/ethernet/broadcom/cnic.c 	l5_cid = req->conn_id;
req              2486 drivers/net/ethernet/broadcom/cnic.c 	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
req              2494 drivers/net/ethernet/broadcom/cnic.c 	struct fcoe_kwqe_conn_destroy *req;
req              2503 drivers/net/ethernet/broadcom/cnic.c 	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
req              2504 drivers/net/ethernet/broadcom/cnic.c 	cid = req->context_id;
req              2505 drivers/net/ethernet/broadcom/cnic.c 	l5_cid = req->conn_id;
req              2531 drivers/net/ethernet/broadcom/cnic.c 	kcqe.fcoe_conn_id = req->conn_id;
req              2600 drivers/net/ethernet/broadcom/cnic.c 			struct fcoe_kwqe_conn_enable_disable *req;
req              2602 drivers/net/ethernet/broadcom/cnic.c 			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
req              2604 drivers/net/ethernet/broadcom/cnic.c 			cid = req->context_id;
req              2605 drivers/net/ethernet/broadcom/cnic.c 			l5_cid = req->conn_id;
req               715 drivers/net/ethernet/broadcom/tg3.c 	u32 status, req, gnt, bit;
req               743 drivers/net/ethernet/broadcom/tg3.c 		req = TG3_APE_LOCK_REQ;
req               746 drivers/net/ethernet/broadcom/tg3.c 		req = TG3_APE_PER_LOCK_REQ;
req               752 drivers/net/ethernet/broadcom/tg3.c 	tg3_ape_write32(tp, req + off, bit);
req               267 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	struct bfi_msgq_i2h_cmdq_copy_req *req =
req               271 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->offset = ntohs(req->offset);
req               272 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->bytes_to_copy = ntohs(req->len);
req               182 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
req               184 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req               185 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               187 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	ether_addr_copy(req->mac_addr, mac->addr);
req               189 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_ucast_req), &req->mh);
req               196 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_mcast_add_req *req =
req               199 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
req               201 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               203 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	ether_addr_copy(req->mac_addr, mac->addr);
req               205 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_mcast_add_req), &req->mh);
req               212 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_mcast_del_req *req =
req               215 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
req               217 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               219 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->handle = htons(handle);
req               221 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_mcast_del_req), &req->mh);
req               228 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
req               230 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               232 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               234 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->enable = status;
req               236 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_enable_req), &req->mh);
req               243 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
req               245 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               247 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               249 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->enable = status;
req               251 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_enable_req), &req->mh);
req               258 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
req               262 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               264 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               266 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->block_idx = block_idx;
req               270 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			req->bit_mask[i] =
req               273 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			req->bit_mask[i] = 0xFFFFFFFF;
req               276 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
req               283 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
req               285 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               287 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               289 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->enable = rxf->vlan_strip_status;
req               291 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_enable_req), &req->mh);
req               298 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
req               300 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               302 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               304 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->size = htons(rxf->rit_size);
req               305 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	memcpy(&req->table[0], rxf->rit, rxf->rit_size);
req               307 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_rit_req), &req->mh);
req               314 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
req               317 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               319 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               321 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->cfg.type = rxf->rss_cfg.hash_type;
req               322 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->cfg.mask = rxf->rss_cfg.hash_mask;
req               324 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		req->cfg.key[i] =
req               327 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
req               334 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
req               336 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req               338 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req               340 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->enable = rxf->rss_status;
req               342 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		sizeof(struct bfi_enet_enable_req), &req->mh);
req               597 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_mcast_add_req *req =
req               602 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
req              1717 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
req              1719 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req              1721 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req              1724 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		&req->mh);
req              3137 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
req              3139 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
req              3141 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	req->mh.num_entries = htons(
req              3144 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		&req->mh);
req               486 drivers/net/ethernet/brocade/bna/bna_types.h 		struct bfi_enet_req		req;
req               706 drivers/net/ethernet/brocade/bna/bna_types.h 		struct bfi_enet_enable_req req;
req               794 drivers/net/ethernet/brocade/bna/bna_types.h 		struct bfi_enet_req		req;
req                56 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 			     void *req, int req_size,
req                75 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	memcpy(sc_req, req, req_size);
req              1979 drivers/net/ethernet/cavium/thunder/nicvf_main.c static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
req              1983 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		return nicvf_config_hwtstamp(netdev, req);
req               818 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req               823 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
req               466 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct cpl_smt_write_req *req;
req               468 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req               474 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req = __skb_put_zero(skb, sizeof(*req));
req               475 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               476 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req               477 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->mtu_idx = NMTUS - 1;
req               478 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->iff = i;
req               489 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct cpl_l2t_write_req *req;
req               491 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req               497 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req = __skb_put_zero(skb, sizeof(*req));
req               498 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               499 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req               500 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->params = htonl(V_L2T_W_IDX(i));
req               511 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct cpl_rte_write_req *req;
req               513 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req               519 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req = __skb_put_zero(skb, sizeof(*req));
req               520 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               521 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req               522 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
req               905 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct cpl_smt_write_req *req;
req               907 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req               912 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req = __skb_put(skb, sizeof(*req));
req               913 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               914 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
req               915 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
req               916 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->iff = idx;
req               917 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
req               918 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
req               946 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct mngt_pktsched_wr *req;
req               949 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req               955 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req = skb_put(skb, sizeof(*req));
req               956 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req               957 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
req               958 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->sched = sched;
req               959 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->idx = qidx;
req               960 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->min = lo;
req               961 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->max = hi;
req               962 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req->binding = port;
req              2531 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req              2533 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct mii_ioctl_data *data = if_mii(req);
req              2551 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		return cxgb_extension_ioctl(dev, req->ifr_data);
req               203 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
req               211 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	switch (req) {
req               275 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
req               279 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	switch (req) {
req               373 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
req               385 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	switch (req) {
req               438 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return cxgb_ulp_iscsi_ctl(adapter, req, data);
req               448 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return cxgb_rdma_ctl(adapter, req, data);
req               552 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_tid_release *req;
req               555 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req = __skb_put(skb, sizeof(*req));
req               556 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               557 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
req               797 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
req               798 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
req               801 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int tid = GET_TID(req);
req               856 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct cpl_abort_req_rss *req = cplhdr(skb);
req               859 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		unsigned int tid = GET_TID(req);
req               860 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		u8 cmd = req->status;
req               862 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
req               863 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		    req->status == CPL_ERR_PERSIST_NEG_ADVICE)
req               891 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_act_establish *req = cplhdr(skb);
req               892 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
req               895 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int tid = GET_TID(req);
req              1091 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_set_tcb_field *req;
req              1093 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
req              1099 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req = skb_put(skb, sizeof(*req));
req              1100 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req              1101 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req              1102 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->reply = 0;
req              1103 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->cpu_idx = 0;
req              1104 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->word = htons(W_TCB_L2T_IX);
req              1105 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
req              1106 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
req                90 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct cpl_l2t_write_req *req;
req                94 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
req                99 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	req = __skb_put(skb, sizeof(*req));
req               100 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               101 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req               102 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
req               106 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
req                60 drivers/net/ethernet/chelsio/cxgb3/t3cdev.h 	int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
req               258 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 	u32 req[4];
req              3447 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	PRINT_CH_STATS("tp_cpl_requests:", req);
req                60 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct cpl_set_tcb_field *req;
req                67 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
req                68 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req                69 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->reply_ctrl = htons(REPLY_CHAN_V(0) |
req                72 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
req                73 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->mask = cpu_to_be64(mask);
req                74 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->val = cpu_to_be64(val);
req               121 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			   struct cpl_set_tcb_field *req,
req               125 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
req               129 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
req               131 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
req               132 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
req               133 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
req               135 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
req               136 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->mask = cpu_to_be64(mask);
req               137 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->val = cpu_to_be64(val);
req               138 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	sc = (struct ulptx_idata *)(req + 1);
req              1014 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct cpl_act_open_req6 *req = NULL;
req              1018 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_act_open_req6 *)t6req;
req              1019 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
req              1020 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->local_port = cpu_to_be16(f->fs.val.lport);
req              1021 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->peer_port = cpu_to_be16(f->fs.val.fport);
req              1022 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
req              1023 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
req              1024 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
req              1025 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
req              1026 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
req              1055 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct cpl_act_open_req *req = NULL;
req              1059 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_act_open_req *)t6req;
req              1060 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
req              1061 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->local_port = cpu_to_be16(f->fs.val.lport);
req              1062 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->peer_port = cpu_to_be16(f->fs.val.fport);
req              1063 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	memcpy(&req->local_ip, f->fs.val.lip, 4);
req              1064 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	memcpy(&req->peer_ip, f->fs.val.fip, 4);
req              1065 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
req              1457 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct cpl_set_tcb_field *req;
req              1487 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
req              1495 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
req              1496 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	INIT_ULPTX_WR(req, wrlen, 0, 0);
req              1497 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	wr = (struct work_request_hdr *)req;
req              1499 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_set_tcb_field *)wr;
req              1500 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
req              1502 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	aligner = (struct ulptx_idata *)(req + 1);
req              1353 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct cpl_tid_release *req;
req              1356 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
req              1357 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	INIT_TP_WR(req, tid);
req              1358 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
req              1532 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct cpl_pass_open_req *req;
req              1535 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req              1540 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
req              1541 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	INIT_TP_WR(req, 0);
req              1542 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
req              1543 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->local_port = sport;
req              1544 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->peer_port = htons(0);
req              1545 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->local_ip = sip;
req              1546 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->peer_ip = htonl(0);
req              1548 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req              1549 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
req              1573 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct cpl_pass_open_req6 *req;
req              1576 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req              1581 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
req              1582 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	INIT_TP_WR(req, 0);
req              1583 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
req              1584 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->local_port = sport;
req              1585 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->peer_port = htons(0);
req              1586 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
req              1587 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
req              1588 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->peer_ip_hi = cpu_to_be64(0);
req              1589 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->peer_ip_lo = cpu_to_be64(0);
req              1591 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req              1592 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
req              1604 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct cpl_close_listsvr_req *req;
req              1609 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req              1613 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
req              1614 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	INIT_TP_WR(req, 0);
req              1615 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
req              1616 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
req              2616 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req              2622 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
req              2651 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		return copy_to_user(req->ifr_data, &pi->tstamp_config,
req              2655 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (copy_from_user(&pi->tstamp_config, req->ifr_data,
req              2722 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		return copy_to_user(req->ifr_data, &pi->tstamp_config,
req               143 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct cpl_l2t_write_req *req;
req               145 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
req               149 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req = __skb_put(skb, sizeof(*req));
req               150 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	INIT_TP_WR(req, 0);
req               152 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
req               155 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
req               156 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req->l2t_idx = htons(l2t_idx);
req               157 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req->vlan = htons(e->vlan);
req               160 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
req              2165 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
req              2166 drivers/net/ethernet/chelsio/cxgb4/sge.c 	unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
req               141 drivers/net/ethernet/chelsio/cxgb4/smt.c 	struct cpl_smt_write_req *req;
req               147 drivers/net/ethernet/chelsio/cxgb4/smt.c 		size = sizeof(*req);
req               154 drivers/net/ethernet/chelsio/cxgb4/smt.c 		req = (struct cpl_smt_write_req *)__skb_put(skb, size);
req               155 drivers/net/ethernet/chelsio/cxgb4/smt.c 		INIT_TP_WR(req, 0);
req               162 drivers/net/ethernet/chelsio/cxgb4/smt.c 			req->pfvf1 = 0x0;
req               163 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
req               168 drivers/net/ethernet/chelsio/cxgb4/smt.c 			req->pfvf0 = 0x0;
req               169 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
req               172 drivers/net/ethernet/chelsio/cxgb4/smt.c 			req->pfvf0 = 0x0;
req               173 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
req               178 drivers/net/ethernet/chelsio/cxgb4/smt.c 			req->pfvf1 = 0x0;
req               179 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
req               190 drivers/net/ethernet/chelsio/cxgb4/smt.c 		req = (struct cpl_smt_write_req *)t6req;
req               193 drivers/net/ethernet/chelsio/cxgb4/smt.c 		req->pfvf0 = 0x0;
req               194 drivers/net/ethernet/chelsio/cxgb4/smt.c 		memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
req               198 drivers/net/ethernet/chelsio/cxgb4/smt.c 	OPCODE_TID(req) =
req               201 drivers/net/ethernet/chelsio/cxgb4/smt.c 	req->params = htonl(SMTW_NORPL_V(0) |
req                71 drivers/net/ethernet/chelsio/cxgb4/srq.c 	struct cpl_srq_table_req *req;
req                83 drivers/net/ethernet/chelsio/cxgb4/srq.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
req                86 drivers/net/ethernet/chelsio/cxgb4/srq.c 	req = (struct cpl_srq_table_req *)
req                87 drivers/net/ethernet/chelsio/cxgb4/srq.c 		__skb_put_zero(skb, sizeof(*req));
req                88 drivers/net/ethernet/chelsio/cxgb4/srq.c 	INIT_TP_WR(req, 0);
req                89 drivers/net/ethernet/chelsio/cxgb4/srq.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
req                92 drivers/net/ethernet/chelsio/cxgb4/srq.c 	req->idx = srq_idx;
req               154 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
req               157 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		req |= ENABLE_F;
req               159 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		req |= T6_ENABLE_F;
req               162 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		req |= LOCALCFG_F;
req               164 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
req              3902 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	u32 cfg, val, req, rsp;
req              3909 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	req = POLADBGWRPTR_G(val);
req              3912 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		*pif_req_wrptr = req;
req              3918 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
req              3922 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			req++;
req              3925 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		req = (req + 2) & POLADBGRDPTR_M;
req              5678 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
req                41 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c cxgb_get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
req                46 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 		      ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
req                47 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 		      T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
req                49 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 		     IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
req                50 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 		     T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
req                51 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
req                52 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
req                54 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 			     ((u8 *)(req + 1) + eth_len + ip_len);
req                91 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	struct cpl_tid_release *req;
req                93 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
req                95 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	INIT_TP_WR(req, tid);
req                96 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
req               104 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	struct cpl_close_con_req *req;
req               106 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
req               108 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	INIT_TP_WR(req, tid);
req               109 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
req               118 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	struct cpl_abort_req *req;
req               120 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
req               122 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	INIT_TP_WR(req, tid);
req               123 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
req               124 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req->cmd = CPL_ABORT_SEND_RST;
req               146 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	struct cpl_rx_data_ack *req;
req               148 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
req               150 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	INIT_TP_WR(req, tid);
req               151 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
req               152 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req->credit_dack = cpu_to_be32(credit_dack);
req               411 drivers/net/ethernet/davicom/dm9000.c static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req               418 drivers/net/ethernet/davicom/dm9000.c 	return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
req               995 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_eq_create *req;
req              1003 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1005 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1006 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
req              1013 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.version = ver;
req              1014 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
req              1016 drivers/net/ethernet/emulex/benet/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
req              1018 drivers/net/ethernet/emulex/benet/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
req              1019 drivers/net/ethernet/emulex/benet/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
req              1021 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(req->context, sizeof(req->context));
req              1023 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1044 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_mac_query *req;
req              1054 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1056 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1057 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
req              1059 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->type = MAC_ADDRESS_TYPE_NETWORK;
req              1061 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->permanent = 1;
req              1063 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->if_id = cpu_to_le16((u16)if_handle);
req              1064 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->pmac_id = cpu_to_le32(pmac_id);
req              1065 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->permanent = 0;
req              1085 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_pmac_add *req;
req              1095 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1097 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1098 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
req              1101 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              1102 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->if_id = cpu_to_le32(if_id);
req              1103 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
req              1125 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_pmac_del *req;
req              1138 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1140 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1141 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
req              1144 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = dom;
req              1145 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->if_id = cpu_to_le32(if_id);
req              1146 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->pmac_id = cpu_to_le32(pmac_id);
req              1160 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_cq_create *req;
req              1169 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1170 drivers/net/ethernet/emulex/benet/be_cmds.c 	ctxt = &req->context;
req              1172 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1173 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
req              1176 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
req              1189 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 2;
req              1190 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->page_size = 1; /* 1 for 4K */
req              1207 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req              1209 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1238 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_mcc_ext_create *req;
req              1247 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1248 drivers/net/ethernet/emulex/benet/be_cmds.c 	ctxt = &req->context;
req              1250 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1251 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
req              1254 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
req              1261 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              1262 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->cq_id = cpu_to_le16(cq->id);
req              1276 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->async_event_bitmap[0] =
req              1282 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req              1284 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1303 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_mcc_create *req;
req              1312 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1313 drivers/net/ethernet/emulex/benet/be_cmds.c 	ctxt = &req->context;
req              1315 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1316 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
req              1319 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
req              1326 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req              1328 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1360 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_eth_tx_create *req;
req              1366 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(&wrb);
req              1367 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              1368 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
req              1371 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              1374 drivers/net/ethernet/emulex/benet/be_cmds.c 			req->hdr.version = 2;
req              1376 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 2;
req              1379 drivers/net/ethernet/emulex/benet/be_cmds.c 	if (req->hdr.version > 0)
req              1380 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->if_id = cpu_to_le16(adapter->if_handle);
req              1381 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req              1382 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->ulp_num = BE_ULP1_NUM;
req              1383 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
req              1384 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->cq_id = cpu_to_le16(cq->id);
req              1385 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->queue_size = be_encoded_q_len(txq->len);
req              1386 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1387 drivers/net/ethernet/emulex/benet/be_cmds.c 	ver = req->hdr.version;
req              1410 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_eth_rx_create *req;
req              1421 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1423 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              1424 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
req              1426 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->cq_id = cpu_to_le16(cq_id);
req              1427 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->frag_size = fls(frag_size) - 1;
req              1428 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = 2;
req              1429 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1430 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->interface_id = cpu_to_le32(if_id);
req              1431 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
req              1432 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->rss_queue = cpu_to_le32(rss);
req              1455 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_q_destroy *req;
req              1463 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1490 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
req              1492 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->id = cpu_to_le16(q->id);
req              1505 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_q_destroy *req;
req              1515 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1517 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              1518 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
req              1519 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->id = cpu_to_le16(q->id);
req              1536 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_if_create *req;
req              1539 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(&wrb);
req              1540 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1542 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), &wrb, NULL);
req              1543 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              1544 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->capability_flags = cpu_to_le32(cap_flags);
req              1545 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->enable_flags = cpu_to_le32(en_flags);
req              1546 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->pmac_invalid = true;
req              1565 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_if_destroy *req;
req              1571 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(&wrb);
req              1573 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1575 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), &wrb, NULL);
req              1576 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              1577 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->interface_id = cpu_to_le32(interface_id);
req              1630 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct lancer_cmd_req_pport_stats *req;
req              1644 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = nonemb_cmd->va;
req              1646 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              1650 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req              1651 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->cmd_params.params.reset_stats = 0;
req              1694 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_link_status *req;
req              1707 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1709 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1711 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              1715 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              1717 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = dom;
req              1744 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_cntl_addnl_attribs *req;
req              1754 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1756 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1758 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              1770 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_fat *req;
req              1773 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(&wrb);
req              1775 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1776 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
req              1778 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->fat_operation = cpu_to_le32(QUERY_FAT);
req              1794 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_fat *req;
req              1822 drivers/net/ethernet/emulex/benet/be_cmds.c 		req = get_fat_cmd.va;
req              1825 drivers/net/ethernet/emulex/benet/be_cmds.c 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1829 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req              1830 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->read_log_offset = cpu_to_le32(log_offset);
req              1831 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->read_log_length = cpu_to_le32(buf_size);
req              1832 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->data_buffer_size = cpu_to_le32(buf_size);
req              1859 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_fw_version *req;
req              1870 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1872 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1873 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
req              1896 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_modify_eq_delay *req;
req              1906 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1908 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1909 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
req              1912 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_eq = cpu_to_le32(num);
req              1914 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
req              1915 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->set_eqd[i].phase = 0;
req              1916 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->set_eqd[i].delay_multiplier =
req              1946 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_vlan_config *req;
req              1956 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              1958 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1959 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
req              1961 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              1963 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->interface_id = if_id;
req              1964 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req              1965 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_vlan = num;
req              1966 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req->normal_vlan, vtag_array,
req              1967 drivers/net/ethernet/emulex/benet/be_cmds.c 	       req->num_vlan * sizeof(vtag_array[0]));
req              1979 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_rx_filter *req = mem->va;
req              1989 drivers/net/ethernet/emulex/benet/be_cmds.c 	memset(req, 0, sizeof(*req));
req              1990 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1991 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
req              1994 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->if_id = cpu_to_le32(adapter->if_handle);
req              1995 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->if_flags_mask = cpu_to_le32(flags);
req              1996 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->if_flags = (value == ON) ? req->if_flags_mask : 0;
req              2004 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->if_flags_mask |=
req              2007 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->mcast_num = cpu_to_le32(adapter->mc_count);
req              2009 drivers/net/ethernet/emulex/benet/be_cmds.c 			ether_addr_copy(req->mcast_mac[i].byte,
req              2039 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_flow_control *req;
req              2053 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2055 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2056 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
req              2059 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.version = 1;
req              2060 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req              2061 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
req              2078 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_flow_control *req;
req              2092 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2094 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2095 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
req              2116 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_query_fw_cfg *req;
req              2123 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2125 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2127 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              2150 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_hdr *req;
req              2167 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2169 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
req              2170 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
req              2183 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_rss_config *req;
req              2196 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2198 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              2199 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req              2201 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->if_id = cpu_to_le32(adapter->if_handle);
req              2202 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->enable_rss = cpu_to_le16(rss_hash_opts);
req              2203 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
req              2206 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              2208 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req->cpu_table, rsstable, table_size);
req              2209 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
req              2210 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
req              2223 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_enable_disable_beacon *req;
req              2233 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2235 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2237 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              2239 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->port_num = port_num;
req              2240 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->beacon_state = state;
req              2241 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->beacon_duration = bcn;
req              2242 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->status_duration = sts;
req              2255 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_beacon_state *req;
req              2265 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2267 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2268 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
req              2271 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->port_num = port_num;
req              2292 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_port_type *req;
req              2313 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              2315 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2319 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->port = cpu_to_le32(adapter->hba_port_num);
req              2320 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->page_num = cpu_to_le32(page_num);
req              2340 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct lancer_cmd_req_write_object *req;
req              2354 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2356 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2361 drivers/net/ethernet/emulex/benet/be_cmds.c 	ctxt = &req->context;
req              2372 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req              2373 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->write_offset = cpu_to_le32(data_offset);
req              2374 drivers/net/ethernet/emulex/benet/be_cmds.c 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
req              2375 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->descriptor_count = cpu_to_le32(1);
req              2376 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->buf_len = cpu_to_le32(data_size);
req              2377 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->addr_low = cpu_to_le32((cmd->dma +
req              2380 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
req              2456 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct lancer_cmd_req_delete_object *req;
req              2468 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2470 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2472 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              2474 drivers/net/ethernet/emulex/benet/be_cmds.c 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
req              2487 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct lancer_cmd_req_read_object *req;
req              2499 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2501 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2506 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->desired_read_len = cpu_to_le32(data_size);
req              2507 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->read_offset = cpu_to_le32(data_offset);
req              2508 drivers/net/ethernet/emulex/benet/be_cmds.c 	strcpy(req->object_name, obj_name);
req              2509 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->descriptor_count = cpu_to_le32(1);
req              2510 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->buf_len = cpu_to_le32(data_size);
req              2511 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
req              2512 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
req              2534 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_write_flashrom *req;
req              2545 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd->va;
req              2547 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2551 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->params.op_type = cpu_to_le32(flash_type);
req              2553 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->params.offset = cpu_to_le32(img_offset);
req              2555 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->params.op_code = cpu_to_le32(flash_opcode);
req              2556 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->params.data_buf_size = cpu_to_le32(buf_size);
req              2580 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_read_flash_crc *req;
req              2591 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              2593 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              2594 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
req              2597 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->params.op_type = cpu_to_le32(img_optype);
req              2599 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->params.offset = cpu_to_le32(img_offset + crc_offset);
req              2601 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->params.offset = cpu_to_le32(crc_offset);
req              2603 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
req              2604 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->params.data_buf_size = cpu_to_le32(0x4);
req              2608 drivers/net/ethernet/emulex/benet/be_cmds.c 		memcpy(flashed_crc, req->crc, 4);
req              2690 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_write_flashrom *req = flash_cmd->va;
req              2710 drivers/net/ethernet/emulex/benet/be_cmds.c 		memcpy(req->data_buf, img, num_bytes);
req              3214 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_acpi_wol_magic_config *req;
req              3224 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = nonemb_cmd->va;
req              3226 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              3227 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
req              3229 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req->magic_mac, mac, ETH_ALEN);
req              3242 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_lmode *req;
req              3257 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3259 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
req              3260 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
req              3263 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->src_port = port_num;
req              3264 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->dest_port = port_num;
req              3265 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->loopback_type = loopback_type;
req              3266 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->loopback_state = enable;
req              3290 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_loopback_test *req;
req              3306 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3308 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
req              3309 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
req              3312 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.timeout = cpu_to_le32(15);
req              3313 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->pattern = cpu_to_le64(pattern);
req              3314 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->src_port = cpu_to_le32(port_num);
req              3315 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->dest_port = cpu_to_le32(port_num);
req              3316 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->pkt_size = cpu_to_le32(pkt_size);
req              3317 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pkts = cpu_to_le32(num_pkts);
req              3318 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->loopback_type = cpu_to_le32(loopback_type);
req              3340 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_ddrdma_test *req;
req              3355 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd->va;
req              3356 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
req              3360 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->pattern = cpu_to_le64(pattern);
req              3361 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->byte_count = cpu_to_le32(byte_cnt);
req              3363 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->snd_buff[i] = (u8)(pattern >> (j*8));
req              3375 drivers/net/ethernet/emulex/benet/be_cmds.c 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
req              3390 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_seeprom_read *req;
req              3400 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = nonemb_cmd->va;
req              3402 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3403 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
req              3416 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_phy_info *req;
req              3440 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              3442 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3443 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
req              3476 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_qos *req;
req              3487 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3489 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3490 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
req              3492 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3493 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
req              3494 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->max_bps_nic = cpu_to_le32(bps);
req              3506 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_cntl_attribs *req;
req              3509 drivers/net/ethernet/emulex/benet/be_cmds.c 	int payload_len = max(sizeof(*req), sizeof(*resp));
req              3533 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = attribs_cmd.va;
req              3535 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3566 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_func_cap *req;
req              3578 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3580 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3582 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              3584 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
req              3586 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
req              3608 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_fn_privileges *req;
req              3619 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3621 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3622 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
req              3625 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3652 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_fn_privileges *req;
req              3663 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3664 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3665 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
req              3667 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3669 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->privileges_lancer = cpu_to_le32(privileges);
req              3671 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->privileges = cpu_to_le32(privileges);
req              3688 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_mac_list *req;
req              3715 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = get_mac_list_cmd.va;
req              3717 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3720 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3721 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
req              3723 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->mac_id = cpu_to_le32(*pmac_id);
req              3724 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->iface_id = cpu_to_le16(if_handle);
req              3725 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->perm_override = 0;
req              3727 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->perm_override = 1;
req              3820 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_mac_list *req;
req              3839 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              3840 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3841 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
req              3844 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3845 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->mac_count = mac_count;
req              3847 drivers/net/ethernet/emulex/benet/be_cmds.c 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
req              3881 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_hsw_config *req;
req              3897 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3898 drivers/net/ethernet/emulex/benet/be_cmds.c 	ctxt = &req->context;
req              3900 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3901 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
req              3904 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3926 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(req->context, sizeof(req->context));
req              3939 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_hsw_config *req;
req              3952 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              3953 drivers/net/ethernet/emulex/benet/be_cmds.c 	ctxt = &req->context;
req              3955 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              3956 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
req              3959 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              3969 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(req->context, sizeof(req->context));
req              4016 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
req              4046 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              4048 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
req              4050 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, &cmd);
req              4052 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.version = 1;
req              4053 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->query_options = BE_GET_WOL_CAP;
req              4154 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_ext_fat_caps *req;
req              4170 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd->va;
req              4171 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4174 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->parameter_type = cpu_to_le32(1);
req              4187 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_ext_fat_caps *req;
req              4198 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd->va;
req              4199 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
req              4200 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4212 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_port_name *req;
req              4220 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4222 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4223 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
req              4226 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              4342 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_func_config *req;
req              4365 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              4367 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4372 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              4437 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_profile_config *req;
req              4454 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              4455 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4460 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->hdr.version = 1;
req              4461 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->type = profile_type;
req              4462 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              4469 drivers/net/ethernet/emulex/benet/be_cmds.c 		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
req              4529 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_profile_config *req;
req              4541 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = cmd.va;
req              4542 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4545 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.version = version;
req              4546 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              4547 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->desc_count = cpu_to_le32(count);
req              4548 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req->desc, desc, size);
req              4678 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_manage_iface_filters *req;
req              4691 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4693 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4694 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
req              4696 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->op = op;
req              4697 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->target_iface_id = cpu_to_le32(iface);
req              4731 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_iface_list *req;
req              4742 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4744 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4747 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = vf_num + 1;
req              4751 drivers/net/ethernet/emulex/benet/be_cmds.c 		resp = (struct be_cmd_resp_get_iface_list *)req;
req              4844 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_enable_disable_vf *req;
req              4858 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4860 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4861 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
req              4864 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              4865 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->enable = 1;
req              4875 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_intr_set *req;
req              4883 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4885 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4886 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
req              4889 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->intr_enabled = intr_enable;
req              4900 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_get_active_profile *req;
req              4913 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4915 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4916 drivers/net/ethernet/emulex/benet/be_cmds.c 			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
req              4936 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_ll_link *req;
req              4949 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              4951 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              4953 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              4955 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.version = version;
req              4956 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->hdr.domain = domain;
req              4965 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->link_config = cpu_to_le32(link_config);
req              4996 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_set_features *req;
req              5009 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              5011 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              5013 drivers/net/ethernet/emulex/benet/be_cmds.c 			       sizeof(*req), wrb, NULL);
req              5015 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
req              5016 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
req              5017 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
req              5049 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_cmd_req_hdr *req;
req              5060 drivers/net/ethernet/emulex/benet/be_cmds.c 	req = embedded_payload(wrb);
req              5063 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
req              5065 drivers/net/ethernet/emulex/benet/be_cmds.c 	memcpy(req, wrb_payload, wrb_payload_size);
req              5066 drivers/net/ethernet/emulex/benet/be_cmds.c 	be_dws_cpu_to_le(req, wrb_payload_size);
req              2334 drivers/net/ethernet/emulex/benet/be_cmds.h 		struct be_req_ue_recovery req;
req               115 drivers/net/ethernet/google/gve/gve.h 	u32 req; /* driver tracked head pointer */
req               152 drivers/net/ethernet/google/gve/gve_ethtool.c 			data[i++] = tx->req;
req               154 drivers/net/ethernet/google/gve/gve_tx.c 	gve_clean_tx_done(priv, tx, tx->req, false);
req               282 drivers/net/ethernet/google/gve/gve_tx.c 	return tx->mask + 1 - (tx->req - tx->done);
req               414 drivers/net/ethernet/google/gve/gve_tx.c 	u32 idx = tx->req & tx->mask;
req               456 drivers/net/ethernet/google/gve/gve_tx.c 		next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
req               490 drivers/net/ethernet/google/gve/gve_tx.c 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
req               499 drivers/net/ethernet/google/gve/gve_tx.c 	tx->req += nsegs;
req               504 drivers/net/ethernet/google/gve/gve_tx.c 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
req               524 drivers/net/ethernet/google/gve/gve_tx.c 			   tx->q_num, __func__, idx, tx->req, tx->done);
req               391 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	struct hclge_firmware_compat_cmd *req;
req               397 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	req = (struct hclge_firmware_compat_cmd *)desc.data;
req               401 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	req->compat = cpu_to_le32(compat);
req               883 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	u32 *req;
req               907 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	req = (u32 *)req1->tcam_data;
req               909 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
req               912 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	req = (u32 *)req2->tcam_data;
req               914 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
req               917 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	req = (u32 *)req3->tcam_data;
req               919 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
req               969 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	struct hclge_get_m7_bd_cmd *req;
req               976 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	req = (struct hclge_get_m7_bd_cmd *)desc.data;
req               985 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c 	bd_num = le32_to_cpu(req->bd_num);
req              1707 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c 	struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
req              1716 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c 	req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
req              1717 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c 	*vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
req              1718 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c 	*q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
req               840 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_func_status_cmd *req;
req               846 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_func_status_cmd *)desc.data;
req               857 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		if (req->pf_state)
req               862 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	ret = hclge_parse_func_status(hdev, req);
req               869 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_pf_res_cmd *req;
req               881 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_pf_res_cmd *)desc.data;
req               882 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
req               883 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
req               885 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	if (req->tx_buf_size)
req               887 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
req               893 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	if (req->dv_buf_size)
req               895 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
req               903 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
req               906 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
req               919 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
req              1199 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_cfg_param_cmd *req;
req              1204 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
req              1207 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
req              1210 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
req              1212 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
req              1216 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
req              1219 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
req              1222 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
req              1226 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
req              1227 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
req              1233 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
req              1236 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
req              1243 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
req              1244 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
req              1246 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
req              1249 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
req              1263 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_cfg_param_cmd *req;
req              1270 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
req              1278 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->offset = cpu_to_le32(offset);
req              1404 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_cfg_tso_status_cmd *req;
req              1410 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
req              1415 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tso_mss_min = cpu_to_le16(tso_mss);
req              1420 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tso_mss_max = cpu_to_le16(tso_mss);
req              1427 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_cfg_gro_status_cmd *req;
req              1435 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
req              1437 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->gro_en = cpu_to_le16(en ? 1 : 0);
req              1479 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_tqp_map_cmd *req;
req              1485 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_tqp_map_cmd *)desc.data;
req              1486 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_id = cpu_to_le16(tqp_pid);
req              1487 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_vf = func_id;
req              1488 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
req              1490 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
req              1491 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_vid = cpu_to_le16(tqp_vid);
req              1682 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_tx_buff_alloc_cmd *req;
req              1687 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
req              1693 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->tx_pkt_buff[i] =
req              2067 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rx_priv_buff_cmd *req;
req              2073 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
req              2079 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->buf_num[i] =
req              2081 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->buf_num[i] |=
req              2085 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->shared_buf =
req              2100 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rx_priv_wl_buf *req;
req              2109 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
req              2121 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->tc_wl[j].high =
req              2123 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->tc_wl[j].high |=
req              2125 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->tc_wl[j].low =
req              2127 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->tc_wl[j].low |=
req              2145 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rx_com_thrd *req;
req              2154 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
req              2165 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->com_thrd[j].high =
req              2167 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->com_thrd[j].high |=
req              2169 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->com_thrd[j].low =
req              2171 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->com_thrd[j].low |=
req              2188 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rx_com_wl *req;
req              2194 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rx_com_wl *)desc.data;
req              2195 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
req              2196 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req              2198 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
req              2199 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req              2356 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_config_mac_speed_dup_cmd *req;
req              2360 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
req              2365 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
req              2369 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2373 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2377 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2381 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2385 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2389 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2393 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2397 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
req              2405 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
req              2449 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_config_auto_neg_cmd *req;
req              2456 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
req              2459 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
req              2526 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_config_fec_cmd *req;
req              2532 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_config_fec_cmd *)desc.data;
req              2534 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
req              2536 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
req              2539 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
req              2666 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_link_status_cmd *req;
req              2679 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_link_status_cmd *)desc.data;
req              2680 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
req              3221 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_vf_rst_cmd *req;
req              3224 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_vf_rst_cmd *)desc.data;
req              3226 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->dest_vfid = func_id;
req              3229 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->vf_rst = 0x1;
req              3270 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_pf_rst_sync_cmd *req;
req              3275 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
req              3290 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		} else if (req->all_vf_ready) {
req              3336 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
req              3340 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
req              3341 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->fun_reset_vfid = func_id;
req              3590 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_pf_rst_done_cmd *req;
req              3594 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
req              3596 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
req              4012 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rss_config_cmd *req;
req              4020 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rss_config_cmd *)desc.data;
req              4026 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
req              4027 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
req              4030 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		memcpy(req->hash_key,
req              4048 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rss_indirection_table_cmd *req;
req              4053 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
req              4059 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->start_table_index =
req              4061 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
req              4064 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->rss_result[j] =
req              4081 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rss_tc_mode_cmd *req;
req              4087 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
req              4098 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->rss_tc_mode[i] = cpu_to_le16(mode);
req              4127 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rss_input_tuple_cmd *req;
req              4133 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
req              4136 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
req              4137 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
req              4138 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
req              4139 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
req              4140 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
req              4141 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req              4142 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req              4143 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
req              4256 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_rss_input_tuple_cmd *req;
req              4265 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
req              4268 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req              4269 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
req              4270 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
req              4271 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
req              4272 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
req              4273 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
req              4274 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
req              4275 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
req              4280 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv4_tcp_en = tuple_sets;
req              4283 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv6_tcp_en = tuple_sets;
req              4286 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv4_udp_en = tuple_sets;
req              4289 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv6_udp_en = tuple_sets;
req              4292 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv4_sctp_en = tuple_sets;
req              4299 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv6_sctp_en = tuple_sets;
req              4302 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req              4305 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req              4318 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
req              4319 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
req              4320 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
req              4321 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
req              4322 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
req              4323 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
req              4324 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
req              4325 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
req              4496 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_ctrl_vector_chain_cmd *req =
req              4505 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->int_vector_id = vector_id;
req              4509 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
req              4520 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
req              4522 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
req              4523 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->vfid = vport->vport_id;
req              4537 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			req->int_vector_id = vector_id;
req              4542 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->int_cause_num = i;
req              4543 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->vfid = vport->vport_id;
req              4601 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_promisc_cfg_cmd *req;
req              4607 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
req              4608 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vf_id = param->vf_id;
req              4615 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
req              4664 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_get_fd_mode_cmd *req;
req              4670 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
req              4678 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	*fd_mode = req->mode;
req              4689 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_get_fd_allocation_cmd *req;
req              4695 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
req              4704 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
req              4705 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
req              4706 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
req              4707 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
req              4714 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_set_fd_key_config_cmd *req;
req              4721 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
req              4723 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->stage = stage_num;
req              4724 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->key_select = stage->key_sel;
req              4725 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
req              4726 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
req              4727 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
req              4728 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
req              4729 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
req              4730 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
req              4849 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_fd_ad_config_cmd *req;
req              4856 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
req              4857 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->index = cpu_to_le32(loc);
req              4858 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->stage = stage;
req              4877 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->ad_data = cpu_to_le64(ad_data);
req              6233 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_config_mac_mode_cmd *req =
req              6253 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
req              6264 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_mac_vlan_switch_cmd *req;
req              6270 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
req              6275 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
req              6276 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->func_id = cpu_to_le32(func_id);
req              6287 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->switch_param = (req->switch_param & param_mask) | switch_param;
req              6288 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->param_mask = param_mask;
req              6358 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_config_mac_mode_cmd *req;
req              6363 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
req              6374 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
req              6379 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
req              6398 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_serdes_lb_cmd *req;
req              6403 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_serdes_lb_cmd *)desc.data;
req              6420 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->enable = loop_mode_b;
req              6421 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->mask = loop_mode_b;
req              6423 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->mask = loop_mode_b;
req              6444 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
req              6446 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
req              6449 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
req              6537 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_cfg_com_tqp_queue_cmd *req =
req              6542 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
req              6543 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->stream_id = cpu_to_le16(stream_id);
req              6545 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
req              6861 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
req              6871 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
req              6888 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
req              6901 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		       req,
req              6913 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		       req,
req              6931 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
req              6946 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		memcpy(desc.data, req,
req              6962 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		memcpy(mc_desc[0].data, req,
req              7030 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_umv_spc_alc_cmd *req;
req              7034 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
req              7037 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req              7039 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->space_size = cpu_to_le32(space_size);
req              7116 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_mac_vlan_tbl_entry_cmd req;
req              7133 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	memset(&req, 0, sizeof(req));
req              7138 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req.egress_port = cpu_to_le16(egress_port);
req              7140 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hclge_prepare_mac_addr(&req, addr, false);
req              7146 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
req              7149 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
req              7187 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_mac_vlan_tbl_entry_cmd req;
req              7199 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	memset(&req, 0, sizeof(req));
req              7200 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
req              7201 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hclge_prepare_mac_addr(&req, addr, false);
req              7202 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
req              7221 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_mac_vlan_tbl_entry_cmd req;
req              7232 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	memset(&req, 0, sizeof(req));
req              7233 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
req              7234 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hclge_prepare_mac_addr(&req, addr, true);
req              7235 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
req              7245 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
req              7265 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_mac_vlan_tbl_entry_cmd req;
req              7277 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	memset(&req, 0, sizeof(req));
req              7278 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
req              7279 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hclge_prepare_mac_addr(&req, addr, true);
req              7280 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
req              7289 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			status = hclge_remove_mac_vlan_tbl(vport, &req);
req              7292 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
req              7447 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
req              7455 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
req              7562 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_vlan_filter_ctrl_cmd *req;
req              7568 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req              7569 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vlan_type = vlan_type;
req              7570 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vlan_fe = filter_en ? fe_type : 0;
req              7571 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vf_id = vf_id;
req              7700 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_vlan_filter_pf_cfg_cmd *req;
req              7713 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req              7714 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vlan_offset = vlan_offset_160;
req              7715 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vlan_cfg = is_kill;
req              7716 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
req              7777 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_vport_vtag_tx_cfg_cmd *req;
req              7785 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req              7786 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req              7787 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
req              7788 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
req              7790 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
req              7792 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
req              7794 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
req              7796 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
req              7798 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
req              7800 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req              7802 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req              7805 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vf_bitmap[bmap_index] =
req              7820 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_vport_vtag_rx_cfg_cmd *req;
req              7828 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
req              7829 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
req              7831 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
req              7833 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
req              7835 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
req              7838 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req              7841 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->vf_bitmap[bmap_index] =
req              8388 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_config_max_frm_size_cmd *req;
req              8393 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req              8394 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->max_frm_size = cpu_to_le16(new_mps);
req              8395 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
req              8462 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_reset_tqp_queue_cmd *req;
req              8468 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req              8469 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
req              8471 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
req              8485 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_reset_tqp_queue_cmd *req;
req              8491 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req              8492 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
req              8501 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
req              10067 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_set_led_state_cmd *req;
req              10073 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	req = (struct hclge_set_led_state_cmd *)desc.data;
req              10074 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
req               133 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			struct hclge_mbx_vf_to_pf_cmd *req,
req               141 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	ring_num = req->msg[2];
req               148 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
req               150 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
req               153 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			req->msg[5]);
req               163 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			      req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
req               168 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
req               173 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 				req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
req               187 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 					     struct hclge_mbx_vf_to_pf_cmd *req)
req               190 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	int vector_id = req->msg[1];
req               194 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
req               206 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 				     struct hclge_mbx_vf_to_pf_cmd *req)
req               208 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	bool en_bc = req->msg[1] ? true : false;
req               566 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 					   struct hclge_mbx_vf_to_pf_cmd *req)
req               574 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	if (!req->msg[LINK_STATUS_OFFSET])
req               575 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 		hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
req               597 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 	struct hclge_mbx_vf_to_pf_cmd *req;
req               612 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 		req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
req               618 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 				 req->msg[0]);
req               626 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 		vport = &hdev->vport[req->mbx_src_vfid];
req               628 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 		switch (req->msg[0]) {
req               631 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 								req);
req               635 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 								req);
req               638 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_set_vf_promisc_mode(vport, req);
req               645 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_set_vf_uc_mac_addr(vport, req);
req               652 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_set_vf_mc_mac_addr(vport, req, false);
req               659 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_set_vf_vlan_cfg(vport, req);
req               666 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_set_vf_alive(vport, req, false);
req               673 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_vf_queue_info(vport, req, true);
req               680 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_vf_queue_depth(vport, req, true);
req               688 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_vf_tcinfo(vport, req, true);
req               695 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_link_info(vport, req);
req               702 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			hclge_mbx_reset_vf_queue(vport, req);
req               705 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			hclge_reset_vf(vport, req);
req               708 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			hclge_vf_keep_alive(vport, req);
req               711 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_set_vf_mtu(vport, req);
req               717 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_queue_id_in_pf(vport, req);
req               724 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_rss_key(vport, req);
req               731 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			hclge_get_link_mode(vport, req);
req               743 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			ret = hclge_get_vf_media_type(vport, req);
req               750 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 			hclge_handle_link_change_event(hdev, req);
req               758 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c 				req->msg[0]);
req               552 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_rss_config_cmd *req;
req               560 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_rss_config_cmd *)desc.data;
req               567 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
req               568 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->hash_config |=
req               572 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		memcpy(req->hash_key,
req               602 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_rss_indirection_table_cmd *req;
req               607 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
req               612 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
req               613 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
req               615 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 			req->rss_result[j] =
req               632 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_rss_tc_mode_cmd *req;
req               641 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
req               654 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
req               656 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
req               658 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
req               827 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_rss_input_tuple_cmd *req;
req               839 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
req               842 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req               843 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
req               844 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
req               845 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
req               846 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
req               847 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
req               848 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req               849 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
req               854 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv4_tcp_en = tuple_sets;
req               857 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv6_tcp_en = tuple_sets;
req               860 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv4_udp_en = tuple_sets;
req               863 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv6_udp_en = tuple_sets;
req               866 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv4_sctp_en = tuple_sets;
req               873 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv6_sctp_en = tuple_sets;
req               876 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
req               879 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
req               892 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
req               893 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
req               894 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
req               895 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
req               896 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
req               897 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
req               898 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
req               899 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
req               960 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_rss_input_tuple_cmd *req;
req               966 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
req               968 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req               969 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
req               970 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
req               971 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
req               972 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
req               973 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
req               974 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req               975 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
req               998 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclge_mbx_vf_to_pf_cmd *req;
req              1004 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
req              1016 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 			req->msg[0] = type;
req              1017 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 			req->msg[1] = vector_id;
req              1020 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->msg[idx_offset] =
req              1022 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->msg[idx_offset + 1] = node->tqp_index;
req              1023 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
req              1032 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 			req->msg[2] = i;
req              1045 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 			req->msg[0] = type;
req              1046 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 			req->msg[1] = vector_id;
req              1118 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclge_mbx_vf_to_pf_cmd *req;
req              1122 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
req              1125 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
req              1126 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->msg[1] = en_bc_pmc ? 1 : 0;
req              1144 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
req              1148 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
req              1152 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req              1153 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->stream_id = cpu_to_le16(stream_id);
req              1155 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
req              2047 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_cfg_gro_status_cmd *req;
req              2056 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
req              2058 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req->gro_en = cpu_to_le16(en ? 1 : 0);
req              2532 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_query_res_cmd *req;
req              2544 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	req = (struct hclgevf_query_res_cmd *)desc.data;
req              2548 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
req              2552 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
req              2565 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
req                86 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 	struct hclge_mbx_vf_to_pf_cmd *req;
req                90 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
req               101 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 	req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
req               103 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 	req->msg[0] = code;
req               104 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 	req->msg[1] = subcode;
req               106 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 		memcpy(&req->msg[2], msg_data, msg_len);
req               148 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 	struct hclge_mbx_pf_to_vf_cmd *req;
req               166 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 		req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
req               172 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 				 req->msg[0]);
req               186 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 		switch (req->msg[0]) {
req               191 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 					 req->msg[1]);
req               194 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 			resp->origin_mbx_msg = (req->msg[1] << 16);
req               195 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 			resp->origin_mbx_msg |= req->msg[2];
req               196 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 			resp->resp_status = req->msg[3];
req               198 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 			temp = (u8 *)&req->msg[4];
req               222 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 					 req->msg[1]);
req               228 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 			memcpy(&msg_q[0], req->msg,
req               239 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c 				req->msg[0]);
req              1049 drivers/net/ethernet/intel/ice/ice_common.c enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
req              1053 drivers/net/ethernet/intel/ice/ice_common.c 	switch (req) {
req                20 drivers/net/ethernet/intel/ice/ice_common.h enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
req              8853 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
req              8859 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		return ixgbe_ptp_set_ts_config(adapter, req);
req              8861 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		return ixgbe_ptp_get_ts_config(adapter, req);
req              8867 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
req              2753 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h 	struct ixgbe_hic_hdr2_req req;
req               875 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
req               876 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 		buffer.hdr.req.buf_lenh = 0;
req               877 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
req               878 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
req              1087 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
req              1088 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.buf_lenh = 0;
req              1089 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
req              1090 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
req              1175 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
req              1176 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.buf_lenh = 0;
req              1177 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
req              1178 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
req              1222 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
req              1223 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.req.buf_lenh = 0;
req              1224 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
req              1225 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	buffer.req.checksum = FW_DEFAULT_CHECKSUM;
req              1971 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int req, desc_count;
req              1991 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
req              1992 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	desc_count += req;
req              1998 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
req               325 drivers/net/ethernet/marvell/octeontx2/af/cgx.c static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
req               345 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
req               351 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
req               373 drivers/net/ethernet/marvell/octeontx2/af/cgx.c static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
req               383 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	err = cgx_fwi_cmd_send(req, resp, lmac);
req               504 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	u64 req = 0;
req               508 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
req               509 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
req               519 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	u64 req = 0;
req               523 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
req               524 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
req               642 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	u64 req = 0;
req               646 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
req               648 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
req               650 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
req               655 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	u64 req = 0;
req               657 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
req               658 drivers/net/ethernet/marvell/octeontx2/af/cgx.c 	return cgx_fwi_cmd_generic(req, resp, cgx, 0);
req               880 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
req              1072 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				       struct rsrc_attach *req, u16 pcifunc)
req              1080 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
req              1085 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	} else if (req->npalf) {
req              1093 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
req              1098 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	} else if (req->nixlf) {
req              1105 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->sso) {
req              1108 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->sso > block->lf.max) {
req              1111 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->sso, block->lf.max);
req              1117 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->sso > mappedlfs &&
req              1118 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		    ((req->sso - mappedlfs) > free_lfs))
req              1122 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->ssow) {
req              1124 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->ssow > block->lf.max) {
req              1127 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->sso, block->lf.max);
req              1132 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->ssow > mappedlfs &&
req              1133 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		    ((req->ssow - mappedlfs) > free_lfs))
req              1137 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->timlfs) {
req              1139 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->timlfs > block->lf.max) {
req              1142 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->timlfs, block->lf.max);
req              1147 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->timlfs > mappedlfs &&
req              1148 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		    ((req->timlfs - mappedlfs) > free_lfs))
req              1152 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->cptlfs) {
req              1154 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->cptlfs > block->lf.max) {
req              1157 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->cptlfs, block->lf.max);
req              1162 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->cptlfs > mappedlfs &&
req              1163 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		    ((req->cptlfs - mappedlfs) > free_lfs))
req              1297 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
req              1301 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	u16 pcifunc = req->hdr.pcifunc;
req              1346 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
req              1349 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	u16 pcifunc = req->hdr.pcifunc;
req              1367 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				struct mbox_msghdr *req)
req              1372 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (req->sig != OTX2_MBOX_REQ_SIG)
req              1375 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	switch (req->id) {
req              1394 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			rsp->hdr.pcifunc = req->pcifunc;		\
req              1399 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 						    (struct _req_type *)req, \
req              1411 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
req               349 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
req               351 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
req               353 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
req               356 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				      struct cgx_mac_addr_set_or_get *req,
req               359 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				      struct cgx_mac_addr_set_or_get *req,
req               361 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
req               363 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
req               365 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
req               367 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
req               369 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
req               371 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
req               373 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
req               381 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				struct npa_aq_enq_req *req,
req               384 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				       struct hwctx_disable_req *req,
req               387 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				  struct npa_lf_alloc_req *req,
req               389 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
req               401 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				  struct nix_lf_alloc_req *req,
req               403 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
req               406 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				struct nix_aq_enq_req *req,
req               409 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				       struct hwctx_disable_req *req,
req               412 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				     struct nix_txsch_alloc_req *req,
req               415 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				    struct nix_txsch_free_req *req,
req               418 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				    struct nix_txschq_config *req,
req               420 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
req               423 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				  struct nix_vtag_config *req,
req               425 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
req               428 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					 struct nix_rss_flowkey_cfg *req,
req               431 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				      struct nix_set_mac_addr *req,
req               433 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
req               435 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
req               437 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
req               439 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
req               442 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					 struct nix_mark_format_cfg  *req,
req               444 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
req               447 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					struct nix_lso_format_cfg *req,
req               470 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					  struct npc_mcam_alloc_entry_req *req,
req               473 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					 struct npc_mcam_free_entry_req *req,
req               476 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					  struct npc_mcam_write_entry_req *req,
req               479 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					struct npc_mcam_ena_dis_entry_req *req,
req               482 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					struct npc_mcam_ena_dis_entry_req *req,
req               485 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 					  struct npc_mcam_shift_entry_req *req,
req               488 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 				struct npc_mcam_alloc_counter_req *req,
req               491 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 		   struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
req               493 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
req               495 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
req               497 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 			struct npc_mcam_oper_counter_req *req,
req               500 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 			  struct npc_mcam_alloc_and_write_entry_req *req,
req               502 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
req                27 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	struct _req_type *req;						\
req                29 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
req                32 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	if (!req)							\
req                34 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
req                35 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	req->hdr.id = _id;						\
req                36 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	return req;							\
req               355 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
req               358 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
req               362 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
req               365 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
req               369 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
req               372 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	int pf = rvu_get_pf(req->hdr.pcifunc);
req               378 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
req               407 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 				      struct cgx_mac_addr_set_or_get *req,
req               410 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	int pf = rvu_get_pf(req->hdr.pcifunc);
req               415 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
req               421 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 				      struct cgx_mac_addr_set_or_get *req,
req               424 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	int pf = rvu_get_pf(req->hdr.pcifunc);
req               439 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
req               442 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	u16 pcifunc = req->hdr.pcifunc;
req               449 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
req               459 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
req               462 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	u16 pcifunc = req->hdr.pcifunc;
req               469 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
req               503 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
req               506 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
req               510 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
req               513 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
req               517 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
req               523 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	pf = rvu_get_pf(req->hdr.pcifunc);
req               552 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
req               555 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
req               559 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
req               562 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
req               450 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
req               454 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req               479 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
req               484 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	switch (req->ctype) {
req               487 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
req               491 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
req               495 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
req               502 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (req->qidx >= (256UL << (cfg & 0xF))))
req               509 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (req->qidx >= (256UL << (cfg & 0xF))))
req               526 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
req               527 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
req               528 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	     (req->op == NIX_AQ_INSTOP_WRITE &&
req               529 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
req               531 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				     pcifunc, req->sq.smq))
req               537 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	inst.cindex = req->qidx;
req               538 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	inst.ctype = req->ctype;
req               539 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	inst.op = req->op;
req               552 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	switch (req->op) {
req               554 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_RQ)
req               555 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(mask, &req->rq_mask,
req               557 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
req               558 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(mask, &req->sq_mask,
req               560 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
req               561 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(mask, &req->cq_mask,
req               563 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
req               564 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(mask, &req->rss_mask,
req               566 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
req               567 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(mask, &req->mce_mask,
req               571 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_RQ)
req               572 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
req               573 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
req               574 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
req               575 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
req               576 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
req               577 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
req               578 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
req               579 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
req               580 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
req               602 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->op == NIX_AQ_INSTOP_INIT) {
req               603 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
req               604 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			__set_bit(req->qidx, pfvf->rq_bmap);
req               605 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
req               606 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			__set_bit(req->qidx, pfvf->sq_bmap);
req               607 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
req               608 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			__set_bit(req->qidx, pfvf->cq_bmap);
req               611 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->op == NIX_AQ_INSTOP_WRITE) {
req               612 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
req               613 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			ena = (req->rq.ena & req->rq_mask.ena) |
req               614 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				(test_bit(req->qidx, pfvf->rq_bmap) &
req               615 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				~req->rq_mask.ena);
req               617 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				__set_bit(req->qidx, pfvf->rq_bmap);
req               619 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				__clear_bit(req->qidx, pfvf->rq_bmap);
req               621 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
req               622 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			ena = (req->rq.ena & req->sq_mask.ena) |
req               623 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				(test_bit(req->qidx, pfvf->sq_bmap) &
req               624 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				~req->sq_mask.ena);
req               626 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				__set_bit(req->qidx, pfvf->sq_bmap);
req               628 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				__clear_bit(req->qidx, pfvf->sq_bmap);
req               630 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
req               631 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			ena = (req->rq.ena & req->cq_mask.ena) |
req               632 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				(test_bit(req->qidx, pfvf->cq_bmap) &
req               633 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				~req->cq_mask.ena);
req               635 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				__set_bit(req->qidx, pfvf->cq_bmap);
req               637 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				__clear_bit(req->qidx, pfvf->cq_bmap);
req               643 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->op == NIX_AQ_INSTOP_READ) {
req               644 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			if (req->ctype == NIX_AQ_CTYPE_RQ)
req               647 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
req               650 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
req               653 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
req               656 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
req               666 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
req               668 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
req               678 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
req               680 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
req               686 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
req               692 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
req               699 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	aq_req.ctype = req->ctype;
req               710 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				(req->ctype == NIX_AQ_CTYPE_CQ) ?
req               711 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
req               720 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				struct nix_aq_enq_req *req,
req               723 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
req               727 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				       struct hwctx_disable_req *req,
req               730 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	return nix_lf_hwctx_disable(rvu, req);
req               734 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				  struct nix_lf_alloc_req *req,
req               739 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req               745 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
req               759 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->npa_func) {
req               761 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
req               762 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			req->npa_func = pcifunc;
req               763 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
req               768 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->sso_func) {
req               770 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
req               771 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			req->sso_func = pcifunc;
req               772 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
req               781 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
req               782 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			    !is_power_of_2(req->rss_sz)))
req               785 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->rss_sz &&
req               786 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
req               801 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
req               805 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
req               813 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
req               818 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
req               822 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
req               828 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
req               833 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
req               837 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
req               843 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
req               849 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				 req->rss_sz, req->rss_grps, hwctx_size);
req               888 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->npa_func)
req               889 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		cfg = req->npa_func;
req               890 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->sso_func)
req               891 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		cfg |= (u64)req->sso_func << 16;
req               893 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg |= (u64)req->xqe_sz << 33;
req               897 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
req               937 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
req               941 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req               972 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 					 struct nix_mark_format_cfg  *req,
req               975 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req               990 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = (((u32)req->offset & 0x7) << 16) |
req               991 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	      (((u32)req->y_mask & 0xF) << 12) |
req               992 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	      (((u32)req->y_val & 0xF) << 8) |
req               993 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
req              1132 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				     struct nix_txsch_alloc_req *req,
req              1135 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              1156 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
req              1164 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			if (req->schq_contig[lvl] ||
req              1165 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			    req->schq[lvl] > 2 ||
req              1177 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->schq_contig[lvl] &&
req              1178 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
req              1188 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
req              1190 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		rsp->schq[lvl] = req->schq[lvl];
req              1192 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (!req->schq[lvl] && !req->schq_contig[lvl])
req              1209 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->schq_contig[lvl]) {
req              1211 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 						     req->schq_contig[lvl]);
req              1213 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
req              1223 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		for (idx = 0; idx < req->schq[lvl]; idx++) {
req              1320 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			       struct nix_txsch_free_req *req)
req              1324 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              1342 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	lvl = req->schq_lvl;
req              1343 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	schq = req->schq;
req              1387 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				    struct nix_txsch_free_req *req,
req              1390 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->flags & TXSCHQ_FREE_ALL)
req              1391 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		return nix_txschq_free(rvu, req->hdr.pcifunc);
req              1393 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		return nix_txschq_free_one(rvu, req);
req              1490 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				    struct nix_txschq_config *req,
req              1493 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 schq, pcifunc = req->hdr.pcifunc;
req              1503 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req              1504 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
req              1519 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	txsch = &nix_hw->txsch[req->lvl];
req              1526 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	    req->lvl == NIX_TXSCH_LVL_TL1) {
req              1530 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	for (idx = 0; idx < req->num_regs; idx++) {
req              1531 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		reg = req->reg[idx];
req              1532 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		regval = req->regval[idx];
req              1577 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			   struct nix_vtag_config *req)
req              1579 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u64 regval = req->vtag_size;
req              1581 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
req              1584 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->rx.capture_vtag)
req              1586 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->rx.strip_vtag)
req              1590 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
req              1595 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				  struct nix_vtag_config *req,
req              1599 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              1610 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->cfg_type) {
req              1611 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
req              1947 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
req              1951 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2155 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 					 struct nix_rss_flowkey_cfg *req,
req              2159 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2175 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
req              2179 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 						  req->flowkey_cfg);
req              2184 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
req              2185 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				       alg_idx, req->mcam_index);
req              2259 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				      struct nix_set_mac_addr *req,
req              2263 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2276 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
req              2279 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				    pfvf->rx_chan_base, req->mac_addr);
req              2286 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
req              2291 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2304 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->mode & NIX_RX_MODE_PROMISC)
req              2306 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
req              2323 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			      struct nix_frs_cfg *req, u16 pcifunc)
req              2333 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	pfvf->maxlen = req->maxlen;
req              2334 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->update_minlen)
req              2335 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		pfvf->minlen = req->minlen;
req              2337 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	maxlen = req->maxlen;
req              2338 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	minlen = req->update_minlen ? req->minlen : 0;
req              2348 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->update_minlen &&
req              2357 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->update_minlen &&
req              2362 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	req->maxlen = maxlen;
req              2363 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->update_minlen)
req              2364 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		req->minlen = minlen;
req              2367 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
req              2371 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2387 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
req              2390 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
req              2394 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (!req->update_smq)
req              2404 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
req              2405 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->update_minlen)
req              2406 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
req              2413 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->sdp_link) {
req              2433 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nix_find_link_frs(rvu, req, pcifunc);
req              2437 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
req              2438 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->update_minlen)
req              2439 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
req              2442 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->sdp_link || pf == 0)
req              2450 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
req              2457 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
req              2463 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2516 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
req              2520 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2538 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->len_verify & BIT(0))
req              2543 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->len_verify & BIT(1))
req              2548 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	if (req->csum_verify & BIT(0))
req              2845 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
req              2848 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2859 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
req              2862 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2911 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 					struct nix_lso_format_cfg *req,
req              2914 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	u16 pcifunc = req->hdr.pcifunc;
req              2934 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			if (req->fields[f] != (reg & req->field_mask))
req              2956 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			    req->fields[f]);
req                55 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
req                59 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	u16 pcifunc = req->hdr.pcifunc;
req                69 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
req                88 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	inst.cindex = req->aura_id;
req                90 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	inst.ctype = req->ctype;
req                91 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	inst.op = req->op;
req               104 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	switch (req->op) {
req               107 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
req               108 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(mask, &req->aura_mask,
req               110 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
req               112 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(mask, &req->pool_mask,
req               114 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
req               118 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
req               119 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
req               124 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			req->aura.pool_addr = pfvf->pool_ctx->iova +
req               125 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
req               126 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
req               128 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
req               154 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	if (req->ctype == NPA_AQ_CTYPE_AURA) {
req               155 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
req               156 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			__set_bit(req->aura_id, pfvf->aura_bmap);
req               157 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->op == NPA_AQ_INSTOP_WRITE) {
req               158 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			ena = (req->aura.ena & req->aura_mask.ena) |
req               159 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				(test_bit(req->aura_id, pfvf->aura_bmap) &
req               160 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				~req->aura_mask.ena);
req               162 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				__set_bit(req->aura_id, pfvf->aura_bmap);
req               164 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				__clear_bit(req->aura_id, pfvf->aura_bmap);
req               169 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
req               170 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
req               171 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			__set_bit(req->aura_id, pfvf->pool_bmap);
req               172 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->op == NPA_AQ_INSTOP_WRITE) {
req               173 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			ena = (req->pool.ena & req->pool_mask.ena) |
req               174 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				(test_bit(req->aura_id, pfvf->pool_bmap) &
req               175 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				~req->pool_mask.ena);
req               177 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				__set_bit(req->aura_id, pfvf->pool_bmap);
req               179 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				__clear_bit(req->aura_id, pfvf->pool_bmap);
req               186 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->op == NPA_AQ_INSTOP_READ) {
req               187 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			if (req->ctype == NPA_AQ_CTYPE_AURA)
req               199 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
req               201 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
req               211 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
req               213 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
req               218 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
req               225 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	aq_req.ctype = req->ctype;
req               236 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				(req->ctype == NPA_AQ_CTYPE_AURA) ?
req               245 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				struct npa_aq_enq_req *req,
req               248 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
req               252 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				       struct hwctx_disable_req *req,
req               255 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	return npa_lf_hwctx_disable(rvu, req);
req               277 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				  struct npa_lf_alloc_req *req,
req               282 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	u16 pcifunc = req->hdr.pcifunc;
req               288 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	if (req->aura_sz > NPA_AURA_SZ_MAX ||
req               289 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
req               314 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
req               318 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
req               325 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
req               329 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
req               348 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	cfg |= (req->aura_sz << 16) | BIT_ULL(34);
req               375 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
req               379 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	u16 pcifunc = req->hdr.pcifunc;
req              1399 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 				   struct npc_mcam_alloc_entry_req *req,
req              1404 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->priority == NPC_MCAM_HIGHER_PRIO)
req              1418 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	*start = req->ref_entry + 1;
req              1421 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->ref_entry >= mcam->hprio_end)
req              1426 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (fcnt > req->count)
req              1444 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	*end = req->ref_entry;
req              1446 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->ref_entry <= mcam->lprio_start)
req              1451 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (fcnt < req->count)
req              1458 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 				  struct npc_mcam_alloc_entry_req *req,
req              1494 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->priority) {
req              1495 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		npc_get_mcam_search_range_priority(mcam, req,
req              1511 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (fcnt > req->count) {
req              1514 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	} else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
req              1542 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->contig) {
req              1547 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 						req->count, &max_contig);
req              1559 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		for (entry = 0; entry < req->count; entry++) {
req              1577 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (!req->priority && (rsp->count < req->count) &&
req              1588 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->priority && rsp->count < req->count) {
req              1589 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		if (req->priority == NPC_MCAM_LOWER_PRIO &&
req              1590 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		    (start != (req->ref_entry + 1))) {
req              1591 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			start = req->ref_entry + 1;
req              1595 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		} else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
req              1596 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			   ((end - start) != req->ref_entry)) {
req              1598 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			end = req->ref_entry;
req              1608 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (!req->contig && rsp->count) {
req              1620 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		index = req->contig ?
req              1635 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					  struct npc_mcam_alloc_entry_req *req,
req              1639 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1650 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->priority && req->ref_entry >= mcam->bmap_entries)
req              1656 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
req              1657 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	    ((req->ref_entry == (mcam->bmap_entries - 1)) &&
req              1658 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	     req->priority == NPC_MCAM_LOWER_PRIO))
req              1664 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
req              1671 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
req              1675 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					 struct npc_mcam_free_entry_req *req,
req              1679 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1693 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->all)
req              1696 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
req              1700 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	mcam->entry2pfvf_map[req->entry] = 0;
req              1701 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	npc_mcam_clear_bit(mcam, req->entry);
req              1702 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
req              1705 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	cntr = mcam->entry2cntr_map[req->entry];
req              1708 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					      req->entry, cntr);
req              1721 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					  struct npc_mcam_write_entry_req *req,
req              1725 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1733 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
req              1737 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->set_cntr &&
req              1738 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	    npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
req              1743 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
req              1748 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
req              1749 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			      &req->entry_data, req->enable_entry);
req              1751 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->set_cntr)
req              1753 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					    req->entry, req->cntr);
req              1762 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					struct npc_mcam_ena_dis_entry_req *req,
req              1766 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1774 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
req              1779 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
req              1785 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					struct npc_mcam_ena_dis_entry_req *req,
req              1789 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1797 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
req              1802 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
req              1808 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					  struct npc_mcam_shift_entry_req *req,
req              1812 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1821 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
req              1825 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	for (index = 0; index < req->shift_count; index++) {
req              1826 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		old_entry = req->curr_entry[index];
req              1827 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		new_entry = req->new_entry[index];
req              1867 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (index != req->shift_count) {
req              1877 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			struct npc_mcam_alloc_counter_req *req,
req              1881 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	u16 pcifunc = req->hdr.pcifunc;
req              1896 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
req              1909 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->contig) {
req              1915 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 						req->count, &max_contig);
req              1926 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		for (cntr = 0; cntr < req->count; cntr++) {
req              1941 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
req              1952 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
req              1959 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
req              1960 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rvu_free_rsrc(&mcam->counters, req->cntr);
req              1964 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		if (!mcam->cntr_refcnt[req->cntr])
req              1970 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		if (mcam->entry2cntr_map[index] != req->cntr)
req              1975 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					      index, req->cntr);
req              1983 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
req              1994 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
req              1999 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (!req->all) {
req              2000 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
req              2004 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					      req->entry, req->cntr);
req              2010 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		if (!mcam->cntr_refcnt[req->cntr])
req              2016 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		if (mcam->entry2cntr_map[index] != req->cntr)
req              2021 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 					      index, req->cntr);
req              2029 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
req              2039 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
req              2044 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
req              2050 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			struct npc_mcam_oper_counter_req *req,
req              2061 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
req              2066 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
req              2073 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			  struct npc_mcam_alloc_and_write_entry_req *req,
req              2089 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
req              2093 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	entry_req.hdr.pcifunc = req->hdr.pcifunc;
req              2095 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	entry_req.priority = req->priority;
req              2096 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	entry_req.ref_entry = req->ref_entry;
req              2109 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (!req->alloc_cntr)
req              2113 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	cntr_req.hdr.pcifunc = req->hdr.pcifunc;
req              2131 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
req              2132 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 			      &req->entry_data, req->enable_entry);
req              2134 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c 	if (req->alloc_cntr)
req              2158 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
req              3346 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	struct res_eq *req;
req              3365 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	err = get_res(dev, slave, res_id, RES_EQ, &req);
req              3369 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	if (req->com.from_state != RES_EQ_HW) {
req                50 drivers/net/ethernet/mellanox/mlx5/core/devlink.c mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
req                58 drivers/net/ethernet/mellanox/mlx5/core/devlink.c 	err = devlink_info_driver_name_put(req, DRIVER_NAME);
req                62 drivers/net/ethernet/mellanox/mlx5/core/devlink.c 	err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
req                73 drivers/net/ethernet/mellanox/mlx5/core/devlink.c 	err = devlink_info_version_running_put(req, "fw.version", version_str);
req                84 drivers/net/ethernet/mellanox/mlx5/core/devlink.c 	err = devlink_info_version_stored_put(req, "fw.version", version_str);
req                84 drivers/net/ethernet/mellanox/mlx5/core/en.h #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
req                85 drivers/net/ethernet/mellanox/mlx5/core/en.h 	max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
req               431 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
req               432 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct mlx5_core_dev *dev = req->dev;
req               435 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	if (req->npages < 0)
req               436 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
req               437 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 				    req->ec_function);
req               438 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	else if (req->npages > 0)
req               439 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
req               443 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 			       req->npages < 0 ? "reclaim" : "give", err);
req               445 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	kfree(req);
req               455 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct mlx5_pages_req *req;
req               472 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
req               473 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	if (!req) {
req               478 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	req->dev = dev;
req               479 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	req->func_id = func_id;
req               480 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	req->npages = npages;
req               481 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	req->ec_function = ec_function;
req               482 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	INIT_WORK(&req->work, pages_work_handler);
req               483 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	queue_work(dev->priv.pg_wq, &req->work);
req              1035 drivers/net/ethernet/mellanox/mlx5/core/vport.c 				       struct mlx5_hca_vport_context *req)
req              1066 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
req              1067 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
req              1068 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
req              1069 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
req              1070 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
req              1071 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
req              1072 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
req              1073 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
req              1074 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
req              1075 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
req              1076 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
req              1077 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
req              1078 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
req              1079 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, lid, req->lid);
req              1080 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
req              1081 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
req              1082 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
req              1083 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
req              1084 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
req              1085 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
req              1086 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
req               947 drivers/net/ethernet/mellanox/mlxsw/core.c mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
req               957 drivers/net/ethernet/mellanox/mlxsw/core.c 	err = devlink_info_driver_name_put(req,
req               970 drivers/net/ethernet/mellanox/mlxsw/core.c 	err = devlink_info_version_fixed_put(req, "hw.revision", buf);
req               974 drivers/net/ethernet/mellanox/mlxsw/core.c 	err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
req               979 drivers/net/ethernet/mellanox/mlxsw/core.c 	err = devlink_info_version_running_put(req, "fw.version", buf);
req              1052 drivers/net/ethernet/micrel/ks8851.c static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req              1059 drivers/net/ethernet/micrel/ks8851.c 	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
req               964 drivers/net/ethernet/micrel/ks8851_mll.c static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
req               971 drivers/net/ethernet/micrel/ks8851_mll.c 	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
req               419 drivers/net/ethernet/mscc/ocelot_ace.c 		val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
req               421 drivers/net/ethernet/mscc/ocelot_ace.c 		msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
req               121 drivers/net/ethernet/mscc/ocelot_ace.h 	enum ocelot_vcap_bit req;	/* Opcode request/reply */
req               146 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	int req ____cacheline_aligned;	/* transmit slots submitted     */
req              1049 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		ss->tx.req = 0;
req              1438 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		if (tx->req == tx->done) {
req              1448 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	    tx->req - tx->done < (tx->mask >> 1) &&
req              1846 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		data[i++] = (unsigned int)ss->tx.req;
req              2111 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	while (tx->done != tx->req) {
req              2526 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	starting_slot = tx->req;
req              2551 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	idx = tx->req & tx->mask;
req              2581 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	tx->req += cnt;
req              2593 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	idx = tx->req & tx->mask;
req              2629 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct mcp_kreq_ether_send *req;
req              2648 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	req = tx->req_list;
req              2649 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	avail = tx->mask - 1 - (tx->req - tx->done);
req              2733 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	idx = tx->req & tx->mask;
req              2781 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 				(req - rdma_count)->rdma_count = rdma_count + 1;
req              2806 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->addr_high = high_swapped;
req              2807 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->addr_low = htonl(low);
req              2808 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
req              2809 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->pad = 0;	/* complete solid 16-byte block; does this matter? */
req              2810 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->rdma_count = 1;
req              2811 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->length = htons(seglen);
req              2812 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->cksum_offset = cksum_offset;
req              2813 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->flags = flags | ((cum_len & 1) * odd_flag);
req              2819 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req++;
req              2842 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		idx = (count + tx->req) & tx->mask;
req              2847 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	(req - rdma_count)->rdma_count = rdma_count;
req              2850 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req--;
req              2851 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			req->flags |= MXGEFW_FLAGS_TSO_LAST;
req              2852 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
req              2854 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	idx = ((count - 1) + tx->req) & tx->mask;
req              3375 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (ss->tx.req != ss->tx.done &&
req              3386 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 				    slice, ss->tx.queue_active, ss->tx.req,
req              3400 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ss->watchdog_tx_req = ss->tx.req;
req                85 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct cmsg_req_map_alloc_tbl *req;
req                90 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
req                94 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req = (void *)skb->data;
req                95 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->key_size = cpu_to_be32(map->key_size);
req                96 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->value_size = cpu_to_be32(map->value_size);
req                97 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->max_entries = cpu_to_be32(map->max_entries);
req                98 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->map_type = cpu_to_be32(map->map_type);
req                99 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->map_flags = 0;
req               123 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct cmsg_req_map_free_tbl *req;
req               127 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
req               133 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req = (void *)skb->data;
req               134 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->tid = cpu_to_be32(nfp_map->tid);
req               152 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
req               155 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
req               159 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
req               162 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
req               300 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct cmsg_req_map_op *req;
req               321 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req = (void *)skb->data;
req               322 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->tid = cpu_to_be32(nfp_map->tid);
req               323 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->count = cpu_to_be32(n_entries);
req               324 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req->flags = cpu_to_be32(flags);
req               328 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
req               330 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
req                35 drivers/net/ethernet/netronome/nfp/ccm.h #define __NFP_CCM_REPLY(req)		(BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
req               136 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct nfp_crypto_req_del *req;
req               139 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
req               143 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
req               144 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->ep_id = 0;
req               145 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	memcpy(req->handle, fw_handle, sizeof(req->handle));
req               174 drivers/net/ethernet/netronome/nfp/crypto/tls.c nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
req               179 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->front.key_len += sizeof(__be32) * 2;
req               182 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		nfp_net_tls_assign_conn_id(nn, &req->front);
req               184 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		req->src_ip = inet->inet_daddr;
req               185 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		req->dst_ip = inet->inet_saddr;
req               188 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	return &req->back;
req               192 drivers/net/ethernet/netronome/nfp/crypto/tls.c nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
req               198 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->front.key_len += sizeof(struct in6_addr) * 2;
req               201 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		nfp_net_tls_assign_conn_id(nn, &req->front);
req               203 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
req               204 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
req               208 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	return &req->back;
req               276 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	void *req;
req               325 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
req               327 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
req               329 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
req               426 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct nfp_crypto_req_update *req;
req               432 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
req               437 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
req               438 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->ep_id = 0;
req               439 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
req               440 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	memset(req->resv, 0, sizeof(req->resv));
req               441 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
req               442 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->tcp_seq = cpu_to_be32(seq);
req               443 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
req               467 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct nfp_crypto_req_reset *req;
req               470 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
req               474 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
req               475 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req->ep_id = 0;
req               187 drivers/net/ethernet/netronome/nfp/nfp_devlink.c nfp_devlink_versions_get_hwinfo(struct nfp_pf *pf, struct devlink_info_req *req)
req               202 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 		err = devlink_info_version_fixed_put(req, info->key, val);
req               224 drivers/net/ethernet/netronome/nfp/nfp_devlink.c nfp_devlink_versions_get_nsp(struct devlink_info_req *req, bool flash,
req               245 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 			err = devlink_info_version_stored_put(req, info->key,
req               248 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 			err = devlink_info_version_running_put(req, info->key,
req               258 drivers/net/ethernet/netronome/nfp/nfp_devlink.c nfp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
req               267 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 	err = devlink_info_driver_name_put(req, "nfp");
req               287 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 		err = devlink_info_serial_number_put(req, buf);
req               310 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 		err = nfp_devlink_versions_get_nsp(req, false,
req               315 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 		err = nfp_devlink_versions_get_nsp(req, true,
req               325 drivers/net/ethernet/netronome/nfp/nfp_devlink.c 	return nfp_devlink_versions_get_hwinfo(pf, req);
req              1144 drivers/net/ethernet/nxp/lpc_eth.c static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
req              1154 drivers/net/ethernet/nxp/lpc_eth.c 	return phy_mii_ioctl(phydev, req, cmd);
req                12 drivers/net/ethernet/pensando/ionic/ionic_devlink.c static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
req                20 drivers/net/ethernet/pensando/ionic/ionic_devlink.c 	err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
req                24 drivers/net/ethernet/pensando/ionic/ionic_devlink.c 	err = devlink_info_version_running_put(req,
req                31 drivers/net/ethernet/pensando/ionic/ionic_devlink.c 	err = devlink_info_version_fixed_put(req,
req                38 drivers/net/ethernet/pensando/ionic/ionic_devlink.c 	err = devlink_info_version_fixed_put(req,
req                44 drivers/net/ethernet/pensando/ionic/ionic_devlink.c 	err = devlink_info_serial_number_put(req, idev->dev_info.serial_num);
req               681 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	struct _cdrp_cmd req;
req                47 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
req                49 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
req                51 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
req                53 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
req                88 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
req               124 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
req               125 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = LSD(md_template_addr);
req               126 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = MSD(md_template_addr);
req               127 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 |= size;
req               223 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
req               224 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = recv_ctx->context_id;
req               225 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = mtu;
req               226 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = 0;
req               244 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
req               245 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = speed;
req               246 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = duplex;
req               247 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = autoneg;
req               347 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = (u32)(phys_addr >> 32);
req               348 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
req               349 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = rq_size;
req               350 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
req               403 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = recv_ctx->context_id;
req               404 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = NX_DESTROY_CTX_RESET;
req               405 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = 0;
req               406 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
req               472 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = (u32)(phys_addr >> 32);
req               473 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
req               474 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = rq_size;
req               475 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
req               508 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = adapter->tx_context_id;
req               509 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = NX_DESTROY_CTX_RESET;
req               510 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = 0;
req               511 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
req               526 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = reg;
req               527 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = 0;
req               528 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = 0;
req               529 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
req               549 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg1 = reg;
req               550 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg2 = val;
req               551 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.arg3 = 0;
req               552 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
req               604 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               608 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               609 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
req               612 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               614 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	mac_req = (nx_mac_req_t *)&req.words[0];
req               618 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               698 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               701 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               703 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               707 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               709 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.words[0] = cpu_to_le64(mode);
req               712 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 				(struct cmd_desc_type0 *)&req, 1);
req               743 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               747 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               750 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               753 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word[0]);
req               757 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		req.words[i] = cpu_to_le64(word[i]);
req               759 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               770 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               777 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               779 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               782 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               784 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.words[0] = cpu_to_le64(enable);
req               786 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               797 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               804 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               806 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               810 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               812 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.words[0] = cpu_to_le64(enable);
req               814 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               830 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               841 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               842 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               845 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               861 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.words[0] = cpu_to_le64(word);
req               863 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		req.words[i+1] = cpu_to_le64(key[i]);
req               866 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               877 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               881 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               882 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               885 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               887 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.words[0] = cpu_to_le64(cmd);
req               888 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memcpy(&req.words[1], &ip, sizeof(u32));
req               890 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               901 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               905 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               906 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               909 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               910 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.words[0] = cpu_to_le64(enable | (enable << 8));
req               912 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               923 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	nx_nic_req_t req;
req               930 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	memset(&req, 0, sizeof(nx_nic_req_t));
req               931 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
req               937 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               939 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req              1560 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
req              1583 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
req              1584 drivers/net/ethernet/qlogic/qed/qed_sriov.c 		if (req->vfdev_info.capabilities &
req              1586 drivers/net/ethernet/qlogic/qed/qed_sriov.c 			struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
req              1597 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				req->vfdev_info.eth_fp_hsi_major,
req              1598 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				req->vfdev_info.eth_fp_hsi_minor,
req              1607 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
req              1615 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	memcpy(&vf->acquire, req, sizeof(vf->acquire));
req              1617 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	vf->opaque_fid = req->vfdev_info.opaque_fid;
req              1619 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	vf->vf_bulletin = req->bulletin_addr;
req              1620 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
req              1621 drivers/net/ethernet/qlogic/qed/qed_sriov.c 			    vf->bulletin.size : req->bulletin_size;
req              1636 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
req              1655 drivers/net/ethernet/qlogic/qed/qed_sriov.c 					 req->vfdev_info.eth_fp_hsi_minor);
req              1666 drivers/net/ethernet/qlogic/qed/qed_sriov.c 						  &req->resc_request, resc);
req              2037 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_start_rxq_tlv *req;
req              2058 drivers/net/ethernet/qlogic/qed/qed_sriov.c 		req = &mbx->req_virt->start_rxq;
req              2062 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				sizeof(struct eth_rx_prod_data) * req->rx_qid;
req              2113 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_start_rxq_tlv *req;
req              2119 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->start_rxq;
req              2121 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
req              2123 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
req              2130 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	p_queue = &vf->vf_queues[req->rx_qid];
req              2143 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	sb_dummy.igu_sb_id = req->hw_sb;
req              2145 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	params.sb_idx = req->sb_index;
req              2149 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	vf_params.vf_qid = (u8)req->rx_qid;
req              2163 drivers/net/ethernet/qlogic/qed/qed_sriov.c 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
req              2167 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				      req->bd_max_bytes,
req              2168 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				      req->rxq_addr,
req              2169 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				      req->cqe_pbl_addr, req->cqe_pbl_size);
req              2445 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_start_txq_tlv *req;
req              2455 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->start_txq;
req              2457 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
req              2459 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
req              2466 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	p_queue = &vf->vf_queues[req->tx_qid];
req              2479 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	sb_dummy.igu_sb_id = req->hw_sb;
req              2481 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	params.sb_idx = req->sb_index;
req              2485 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	vf_params.vf_qid = (u8)req->tx_qid;
req              2496 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				      req->pbl_addr, req->pbl_size, pq);
req              2588 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_stop_rxqs_tlv *req;
req              2596 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->stop_rxqs;
req              2597 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	if (req->num_rxqs != 1) {
req              2610 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
req              2611 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				  qid_usage_idx, req->cqe_completion);
req              2626 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_stop_txqs_tlv *req;
req              2634 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->stop_txqs;
req              2635 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	if (req->num_txqs != 1) {
req              2648 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
req              2664 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_update_rxq_tlv *req;
req              2672 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->update_rxq;
req              2673 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
req              2674 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
req              2684 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	     VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
req              2694 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
req              2701 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				   vf->relative_vf_id, req->rx_qid,
req              2702 drivers/net/ethernet/qlogic/qed/qed_sriov.c 				   req->num_rxqs);
req              2708 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	for (i = 0; i < req->num_rxqs; i++) {
req              2709 drivers/net/ethernet/qlogic/qed/qed_sriov.c 		u16 qid = req->rx_qid + i;
req              2715 drivers/net/ethernet/qlogic/qed/qed_sriov.c 					 req->num_rxqs,
req              3280 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_ucast_filter_tlv *req;
req              3287 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->ucast_filter;
req              3288 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	params.opcode = (enum qed_filter_opcode)req->opcode;
req              3289 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	params.type = (enum qed_filter_ucast_type)req->type;
req              3295 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	memcpy(params.mac, req->mac, ETH_ALEN);
req              3296 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	params.vlan = req->vlan;
req              3430 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_read_coal_req_tlv *req;
req              3439 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->read_coal_req;
req              3441 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	qid = req->qid;
req              3442 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	b_is_rx = req->is_rx ? true : false;
req              3498 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	struct vfpf_update_coalesce *req;
req              3505 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	req = &mbx->req_virt->update_coalesce;
req              3507 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	rx_coal = req->rx_coal;
req              3508 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	tx_coal = req->tx_coal;
req              3509 drivers/net/ethernet/qlogic/qed/qed_sriov.c 	qid = req->qid;
req               176 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_first_tlv *req;
req               181 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
req               266 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_acquire_tlv *req;
req               270 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
req               271 drivers/net/ethernet/qlogic/qed/qed_vf.c 	p_resc = &req->resc_request;
req               274 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
req               283 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req               284 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.fw_major = FW_MAJOR_VERSION;
req               285 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req               286 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req               287 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
req               288 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
req               289 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
req               292 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
req               296 drivers/net/ethernet/qlogic/qed/qed_vf.c 		req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
req               302 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->bulletin_addr = p_iov->bulletin.phys;
req               303 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->bulletin_size = p_iov->bulletin.size;
req               343 drivers/net/ethernet/qlogic/qed/qed_vf.c 				req->vfdev_info.capabilities |=
req               367 drivers/net/ethernet/qlogic/qed/qed_vf.c 				if (req->vfdev_info.capabilities &
req               376 drivers/net/ethernet/qlogic/qed/qed_vf.c 					req->vfdev_info.capabilities |=
req               398 drivers/net/ethernet/qlogic/qed/qed_vf.c 	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
req               740 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_start_rxq_tlv *req;
req               745 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
req               747 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->rx_qid = rx_qid;
req               748 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->cqe_pbl_addr = cqe_pbl_addr;
req               749 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->cqe_pbl_size = cqe_pbl_size;
req               750 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->rxq_addr = bd_chain_phys_addr;
req               751 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->hw_sb = p_cid->sb_igu_id;
req               752 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->sb_index = p_cid->sb_idx;
req               753 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->bd_max_bytes = bd_max_bytes;
req               754 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->stat_id = -1;
req               812 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_stop_rxqs_tlv *req;
req               817 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
req               819 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->rx_qid = p_cid->rel.queue_id;
req               820 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->num_rxqs = 1;
req               821 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->cqe_completion = cqe_completion;
req               853 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_start_txq_tlv *req;
req               858 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
req               860 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->tx_qid = qid;
req               863 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->pbl_addr = pbl_addr;
req               864 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->pbl_size = pbl_size;
req               865 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->hw_sb = p_cid->sb_igu_id;
req               866 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->sb_index = p_cid->sb_idx;
req               909 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_stop_txqs_tlv *req;
req               914 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
req               916 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->tx_qid = p_cid->rel.queue_id;
req               917 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->num_txqs = 1;
req               949 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_vport_start_tlv *req;
req               954 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
req               956 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->mtu = mtu;
req               957 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vport_id = vport_id;
req               958 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->inner_vlan_removal = inner_vlan_removal;
req               959 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->tpa_mode = tpa_mode;
req               960 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->max_buffers_per_cqe = max_buffers_per_cqe;
req               961 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->only_untagged = only_untagged;
req               968 drivers/net/ethernet/qlogic/qed/qed_vf.c 			req->sb_addr[i] = p_sb->sb_phys;
req              1080 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_vport_update_tlv *req;
req              1094 drivers/net/ethernet/qlogic/qed/qed_vf.c 	qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
req              1242 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_first_tlv *req;
req              1246 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
req              1295 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_ucast_filter_tlv *req;
req              1300 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
req              1301 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->opcode = (u8) p_ucast->opcode;
req              1302 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->type = (u8) p_ucast->type;
req              1303 drivers/net/ethernet/qlogic/qed/qed_vf.c 	memcpy(req->mac, p_ucast->mac, ETH_ALEN);
req              1304 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->vlan = p_ucast->vlan;
req              1360 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_read_coal_req_tlv *req;
req              1364 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
req              1365 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->qid = p_cid->rel.queue_id;
req              1366 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->is_rx = p_cid->b_is_rx ? 1 : 0;
req              1420 drivers/net/ethernet/qlogic/qed/qed_vf.c 	struct vfpf_update_coalesce *req;
req              1425 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req));
req              1427 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->rx_coal = rx_coal;
req              1428 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->tx_coal = tx_coal;
req              1429 drivers/net/ethernet/qlogic/qed/qed_vf.c 	req->qid = p_cid->rel.queue_id;
req              1434 drivers/net/ethernet/qlogic/qed/qed_vf.c 		   rx_coal, tx_coal, req->qid);
req                42 drivers/net/ethernet/qlogic/qede/qede_ptp.h int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
req              1528 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	struct _cdrp_cmd	req;
req               809 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		 "Host MBX regs(%d)\n", cmd->req.num);
req               810 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	for (i = 0; i < cmd->req.num; i++) {
req               813 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		pr_info("%08x ", cmd->req.arg[i]);
req               830 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	int opcode = LSW(cmd->req.arg[0]);
req               861 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	opcode = LSW(cmd->req.arg[0]);
req               911 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			mbx->req.num = mbx_tbl[i].in_args;
req               913 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
req               915 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			if (!mbx->req.arg)
req               920 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 				kfree(mbx->req.arg);
req               921 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 				mbx->req.arg = NULL;
req               925 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
req               948 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
req              1084 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
req              1107 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		buf = &cmd.req.arg[index];
req              1151 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[0] |= (0x3 << 29);
req              1156 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = recv_ctx->context_id | temp;
req              1200 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[0] |= (0x3 << 29);
req              1202 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = cap;
req              1203 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
req              1208 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 							 &cmd.req.arg[6]);
req              1228 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		buf = &cmd.req.arg[index];
req              1248 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	buf = &cmd.req.arg[index];
req              1302 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[0] |= (0x3 << 29);
req              1307 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = tx_ring->ctx_id | temp;
req              1360 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[0] |= (0x3 << 29);
req              1365 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
req              1366 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
req              1368 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	buf = &cmd.req.arg[6];
req              1543 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] = mbx_in;
req              1544 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[2] = mbx_in;
req              1545 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[3] = mbx_in;
req              1547 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
req              1564 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
req              1565 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
req              1566 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
req              1568 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
req              1641 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
req              1644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
req              1664 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = adapter->ahw->port_config;
req              1701 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
req              1747 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd->req.arg[1] = mode | temp;
req              1989 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] = 1 | temp;
req              1991 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] = 2 | temp;
req              2002 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
req              2030 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = arg1;
req              2065 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = (adapter->recv_ctx->context_id);
req              2066 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[2] = word;
req              2067 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	memcpy(&cmd.req.arg[4], key, sizeof(key));
req              2115 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd->req.arg[1] = op | (1 << 8);
req              2117 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd->req.arg[1] |= temp;
req              2125 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	buf = &cmd->req.arg[2];
req              2152 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		memcpy(&cmd->req.arg[2], mac, sizeof(u32));
req              2153 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
req              2156 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd->req.arg[1] = type;
req              2202 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
req              2204 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[2] = coal->rx_packets | temp << 16;
req              2205 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[3] = coal->flag;
req              2229 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
req              2231 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[2] = coal->tx_packets | temp << 16;
req              2232 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[3] = coal->flag;
req              2389 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = (nic->pci_func << 16);
req              2390 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[2] = 0x1 << 16;
req              2391 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
req              2392 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[4] = nic->capabilities;
req              2393 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
req              2394 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
req              2395 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
req              2397 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[i] = 0;
req              2427 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] = op | BIT_31 | temp;
req              2429 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] = ahw->pci_func << 16;
req              2561 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = max_ints;
req              2564 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
req              2571 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		cmd.req.arg[index++] = val;
req              3533 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
req              3542 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
req              3552 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
req              3576 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
req              3577 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
req              3578 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
req              3661 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = 1;
req              3662 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[2] = intrpt_id;
req              3663 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[3] = BIT_0;
req              3963 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		mbx_cmd = cmd->req.arg[0];
req              3965 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		for (i = 1; i < cmd->req.num; i++)
req              3966 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
req              1044 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c 	cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
req              1045 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c 	cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
req              1074 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c 	cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
req              2394 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c 		cmd.req.arg[1] = BIT_31;
req                63 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			mbx->req.num = mbx_tbl[i].in_args;
req                65 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			mbx->req.arg = kcalloc(mbx->req.num,
req                67 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			if (!mbx->req.arg)
req                72 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 				kfree(mbx->req.arg);
req                73 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 				mbx->req.arg = NULL;
req                76 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			mbx->req.arg[0] = type;
req                86 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	kfree(cmd->req.arg);
req                87 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd->req.arg = NULL;
req               130 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (i = 1; i < cmd->req.num; i++)
req               131 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
req               133 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
req               195 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = arg1;
req               196 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = arg2;
req               197 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = arg3;
req               222 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = recv_ctx->context_id;
req               223 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = mtu;
req               342 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = MSD(phys_addr);
req               343 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = LSD(phys_addr);
req               344 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = rq_size;
req               403 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = recv_ctx->context_id;
req               488 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = MSD(phys_addr);
req               489 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = LSD(phys_addr);
req               490 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = rq_size;
req               534 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = tx_ring->ctx_id;
req               551 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = config;
req               785 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		cmd.req.arg[1] = val;
req               827 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = function | BIT_8;
req               869 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = MSD(nic_dma_t);
req               870 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = LSD(nic_dma_t);
req               871 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = (func_id << 16 | nic_size);
req               934 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = MSD(nic_dma_t);
req               935 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = LSD(nic_dma_t);
req               936 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
req               977 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = MSD(pci_info_dma_t);
req               978 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = LSD(pci_info_dma_t);
req               979 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = pci_size;
req              1046 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = arg1;
req              1094 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = arg1;
req              1095 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = MSD(stats_dma_t);
req              1096 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = LSD(stats_dma_t);
req              1146 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = stats_size << 16;
req              1147 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = MSD(stats_dma_t);
req              1148 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[3] = LSD(stats_dma_t);
req              1271 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = arg1;
req              1296 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = *arg1;
req              1385 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = arg1;
req              1386 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[2] = arg2;
req               459 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	cmd.req.arg[1] = size | (type << 16);
req               460 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	cmd.req.arg[2] = MSD(phys_addr);
req               461 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	cmd.req.arg[3] = LSD(phys_addr);
req               584 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
req               995 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	cmd.req.arg[1] = ahw->pci_func;
req               440 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               445 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               446 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
req               449 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               451 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	mac_req = (struct qlcnic_mac_req *)&req.words[0];
req               455 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
req               458 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               603 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               606 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               608 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               612 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               614 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(mode);
req               617 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 				(struct cmd_desc_type0 *)&req, 1);
req               707 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               710 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               712 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               713 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
req               716 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(flag);
req               718 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               775 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               778 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               780 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               782 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
req               785 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
req               786 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
req               788 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
req               791 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               824 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               831 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               833 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               836 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               846 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(word);
req               848 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               858 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               865 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               867 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               871 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               873 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(enable);
req               875 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               893 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               903 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               904 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               907 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               929 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(word);
req               931 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		req.words[i+1] = cpu_to_le64(key[i]);
req               933 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               943 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               948 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               949 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               952 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               954 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(cmd);
req               955 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	ipa = (struct qlcnic_ipaddr *)&req.words[1];
req               958 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               967 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               970 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               971 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req               974 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req               975 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(enable | (enable << 8));
req               976 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               986 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req req;
req               993 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req               994 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req              1000 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req              1002 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req              1544 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_nic_req   req;
req              1548 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	memset(&req, 0, sizeof(struct qlcnic_nic_req));
req              1549 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
req              1552 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.req_hdr = cpu_to_le64(word);
req              1554 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
req              1555 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	req.words[1] = cpu_to_le64(state);
req              1557 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
req               274 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_nic_req *req;
req               283 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	req = (struct qlcnic_nic_req *)hwdesc;
req               284 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	memset(req, 0, sizeof(struct qlcnic_nic_req));
req               285 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
req               288 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	req->req_hdr = cpu_to_le64(word);
req               290 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
req               294 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
req              1188 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c 	cmd.req.arg[1] = LSD(tmp_addr_t);
req              1189 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c 	cmd.req.arg[2] = MSD(tmp_addr_t);
req              1190 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c 	cmd.req.arg[3] = temp_size;
req               251 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		cmd.req.arg = (u32 *)trans->req_pay;
req               369 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 	cmd.req.arg[1] = vport_id << 16 | 0x1;
req               727 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 			mbx->req.num = mbx_tbl[i].in_args;
req               729 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 			mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
req               731 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 			if (!mbx->req.arg)
req               736 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 				kfree(mbx->req.arg);
req               737 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 				mbx->req.arg = NULL;
req               740 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 			mbx->req.arg[0] = (type | (mbx->req.num << 16) |
req               761 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
req               763 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		num_regs = cmd->req.num;
req               767 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		cmd_op = cmd->req.arg[0] & 0xff;
req               784 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		cmd->req.arg = (u32 *)trans->req_pay;
req               786 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		cmd_op = cmd->req.arg[0] & 0xff;
req               792 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		cmd->req.num = trans->req_pay_size / 4;
req              1054 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 	u8 req;
req              1077 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 	req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
req              1079 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 	if (req)
req              1336 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
req              1394 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 		      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
req              1401 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 			(cmd->req.arg[0] & 0xffff), func);
req              1416 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 	opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
req              2071 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 	cmd.req.arg[1] = (enable & 1) | vid << 16;
req                43 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[1] = (vport_id << 16) | 0x1;
req                44 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[2] = npar_info->bit_offsets;
req                45 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
req                46 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
req                47 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[4] = npar_info->max_tx_mac_filters;
req                48 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
req                49 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
req                51 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[6] = npar_info->max_rx_lro_flow |
req                53 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[7] = npar_info->max_rx_buf_rings |
req                55 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
req                56 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
req                57 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
req               184 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[1] = 0x2;
req               300 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[3] = func << 8;
req               307 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
req               340 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[1] = 0x4;
req               343 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[1] |= BIT_16;
req               345 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 			cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
req               369 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
req               390 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[0] |= (3 << 29);
req               391 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
req               393 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[1] |= BIT_0;
req               699 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd.req.arg[1] = 0x3 | func << 16;
req               702 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
req               703 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
req               705 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[5] = mac[1] | mac[0] << 8;
req               709 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[2] |= BIT_6;
req               710 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[3] |= vp->pvid << 8;
req               820 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
req               821 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
req               831 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	buf = &cmd->req.arg[2];
req               848 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[0] >> 29) != 0x3)
req               897 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[6] = vf->vp->handle;
req               917 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	type = cmd->req.arg[1];
req               937 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[0] >> 29) != 0x3)
req               957 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[5] |= vf->vp->handle << 16;
req               972 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[0] >> 29) != 0x3)
req               975 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
req               995 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= vf->vp->handle << 16;
req              1007 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[0] >> 29) != 0x3)
req              1010 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
req              1029 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= vf->vp->handle << 16;
req              1041 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
req              1071 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= vf->vp->handle << 16;
req              1072 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= BIT_31;
req              1081 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
req              1084 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (!(cmd->req.arg[1] & BIT_16))
req              1087 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[1] & 0xff) != 0x1)
req              1113 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (cmd->req.arg[1] != vf->rx_ctx_id)
req              1116 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (cmd->req.arg[2] > adapter->ahw->max_mtu)
req              1141 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (cmd->req.arg[1] & BIT_31) {
req              1142 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
req              1145 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd->req.arg[1] |= vf->vp->handle << 16;
req              1171 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (cmd->req.arg[1] != vf->rx_ctx_id)
req              1202 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
req              1203 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	ctx_id = cmd->req.arg[1] >> 16;
req              1204 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	pkts = cmd->req.arg[2] & 0xffff;
req              1205 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	time = cmd->req.arg[2] >> 16;
req              1260 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if (!(cmd->req.arg[1] & BIT_8))
req              1263 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= (vf->vp->handle << 16);
req              1264 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= BIT_31;
req              1267 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		op = cmd->req.arg[1] & 0x7;
req              1268 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd->req.arg[1] &= ~0x7;
req              1271 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd->req.arg[3] |= vp->pvid << 16;
req              1272 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd->req.arg[1] |= new_op;
req              1298 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
req              1328 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= vf->vp->handle << 16;
req              1329 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	cmd->req.arg[1] |= BIT_31;
req              1377 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	vlan = cmd->req.arg[1] >> 16;
req              1403 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	vlan = cmd->req.arg[1] >> 16;
req              1444 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 	op = cmd->req.arg[1] & 0xf;
req              1608 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
req              1634 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 		cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
req              1714 drivers/net/ethernet/renesas/ravb_main.c static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
req              1729 drivers/net/ethernet/renesas/ravb_main.c 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
req              1734 drivers/net/ethernet/renesas/ravb_main.c static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
req              1741 drivers/net/ethernet/renesas/ravb_main.c 	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
req              1774 drivers/net/ethernet/renesas/ravb_main.c 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
req              1779 drivers/net/ethernet/renesas/ravb_main.c static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
req              1791 drivers/net/ethernet/renesas/ravb_main.c 		return ravb_hwtstamp_get(ndev, req);
req              1793 drivers/net/ethernet/renesas/ravb_main.c 		return ravb_hwtstamp_set(ndev, req);
req              1796 drivers/net/ethernet/renesas/ravb_main.c 	return phy_mii_ioctl(phydev, req, cmd);
req               178 drivers/net/ethernet/renesas/ravb_ptp.c 			  struct ptp_extts_request *req, int on)
req               186 drivers/net/ethernet/renesas/ravb_ptp.c 	if (req->flags & ~(PTP_ENABLE_FEATURE |
req               192 drivers/net/ethernet/renesas/ravb_ptp.c 	if (req->index)
req               195 drivers/net/ethernet/renesas/ravb_ptp.c 	if (priv->ptp.extts[req->index] == on)
req               197 drivers/net/ethernet/renesas/ravb_ptp.c 	priv->ptp.extts[req->index] = on;
req               212 drivers/net/ethernet/renesas/ravb_ptp.c 			   struct ptp_perout_request *req, int on)
req               222 drivers/net/ethernet/renesas/ravb_ptp.c 	if (req->flags)
req               225 drivers/net/ethernet/renesas/ravb_ptp.c 	if (req->index)
req               232 drivers/net/ethernet/renesas/ravb_ptp.c 		start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
req               233 drivers/net/ethernet/renesas/ravb_ptp.c 		period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
req               249 drivers/net/ethernet/renesas/ravb_ptp.c 		perout = &priv->ptp.perout[req->index];
req               263 drivers/net/ethernet/renesas/ravb_ptp.c 		perout = &priv->ptp.perout[req->index];
req               278 drivers/net/ethernet/renesas/ravb_ptp.c 			   struct ptp_clock_request *req, int on)
req               280 drivers/net/ethernet/renesas/ravb_ptp.c 	switch (req->type) {
req               282 drivers/net/ethernet/renesas/ravb_ptp.c 		return ravb_ptp_extts(ptp, &req->extts, on);
req               284 drivers/net/ethernet/renesas/ravb_ptp.c 		return ravb_ptp_perout(ptp, &req->perout, on);
req               830 drivers/net/ethernet/sfc/rx.c 	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
req               832 drivers/net/ethernet/sfc/rx.c 	struct efx_nic *efx = netdev_priv(req->net_dev);
req               833 drivers/net/ethernet/sfc/rx.c 	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
req               834 drivers/net/ethernet/sfc/rx.c 	int slot_idx = req - efx->rps_slot;
req               839 drivers/net/ethernet/sfc/rx.c 	rc = efx->type->filter_insert(efx, &req->spec, true);
req               844 drivers/net/ethernet/sfc/rx.c 		rule = efx_rps_hash_find(efx, &req->spec);
req               865 drivers/net/ethernet/sfc/rx.c 		channel->rps_flow_id[rc] = req->flow_id;
req               869 drivers/net/ethernet/sfc/rx.c 		if (req->spec.ether_type == htons(ETH_P_IP))
req               872 drivers/net/ethernet/sfc/rx.c 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req               873 drivers/net/ethernet/sfc/rx.c 				   req->spec.rem_host, ntohs(req->spec.rem_port),
req               874 drivers/net/ethernet/sfc/rx.c 				   req->spec.loc_host, ntohs(req->spec.loc_port),
req               875 drivers/net/ethernet/sfc/rx.c 				   req->rxq_index, req->flow_id, rc, arfs_id);
req               879 drivers/net/ethernet/sfc/rx.c 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req               880 drivers/net/ethernet/sfc/rx.c 				   req->spec.rem_host, ntohs(req->spec.rem_port),
req               881 drivers/net/ethernet/sfc/rx.c 				   req->spec.loc_host, ntohs(req->spec.loc_port),
req               882 drivers/net/ethernet/sfc/rx.c 				   req->rxq_index, req->flow_id, rc, arfs_id);
req               887 drivers/net/ethernet/sfc/rx.c 	dev_put(req->net_dev);
req               894 drivers/net/ethernet/sfc/rx.c 	struct efx_async_filter_insertion *req;
req               927 drivers/net/ethernet/sfc/rx.c 	req = efx->rps_slot + slot_idx;
req               928 drivers/net/ethernet/sfc/rx.c 	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
req               931 drivers/net/ethernet/sfc/rx.c 	req->spec.match_flags =
req               935 drivers/net/ethernet/sfc/rx.c 	req->spec.ether_type = fk.basic.n_proto;
req               936 drivers/net/ethernet/sfc/rx.c 	req->spec.ip_proto = fk.basic.ip_proto;
req               939 drivers/net/ethernet/sfc/rx.c 		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
req               940 drivers/net/ethernet/sfc/rx.c 		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
req               942 drivers/net/ethernet/sfc/rx.c 		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
req               944 drivers/net/ethernet/sfc/rx.c 		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
req               948 drivers/net/ethernet/sfc/rx.c 	req->spec.rem_port = fk.ports.src;
req               949 drivers/net/ethernet/sfc/rx.c 	req->spec.loc_port = fk.ports.dst;
req               954 drivers/net/ethernet/sfc/rx.c 		rule = efx_rps_hash_add(efx, &req->spec, &new);
req               979 drivers/net/ethernet/sfc/rx.c 	dev_hold(req->net_dev = net_dev);
req               980 drivers/net/ethernet/sfc/rx.c 	INIT_WORK(&req->work, efx_filter_rfs_work);
req               981 drivers/net/ethernet/sfc/rx.c 	req->rxq_index = rxq_index;
req               982 drivers/net/ethernet/sfc/rx.c 	req->flow_id = flow_id;
req               983 drivers/net/ethernet/sfc/rx.c 	schedule_work(&req->work);
req                89 drivers/net/ethernet/sfc/siena_sriov.c 	struct work_struct req;
req               240 drivers/net/ethernet/sfc/siena_sriov.c 				  struct efx_memcpy_req *req,
req               261 drivers/net/ethernet/sfc/siena_sriov.c 			       req->to_rid);
req               263 drivers/net/ethernet/sfc/siena_sriov.c 			       req->to_addr);
req               264 drivers/net/ethernet/sfc/siena_sriov.c 		if (req->from_buf == NULL) {
req               265 drivers/net/ethernet/sfc/siena_sriov.c 			from_rid = req->from_rid;
req               266 drivers/net/ethernet/sfc/siena_sriov.c 			from_addr = req->from_addr;
req               268 drivers/net/ethernet/sfc/siena_sriov.c 			if (WARN_ON(used + req->length >
req               276 drivers/net/ethernet/sfc/siena_sriov.c 			memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
req               277 drivers/net/ethernet/sfc/siena_sriov.c 			       req->length);
req               278 drivers/net/ethernet/sfc/siena_sriov.c 			used += req->length;
req               285 drivers/net/ethernet/sfc/siena_sriov.c 			       req->length);
req               287 drivers/net/ethernet/sfc/siena_sriov.c 		++req;
req               531 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_req *req = vf->buf.addr;
req               532 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_evq = req->u.init_evq.index;
req               533 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned buf_count = req->u.init_evq.buf_count;
req               547 drivers/net/ethernet/sfc/siena_sriov.c 	efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
req               561 drivers/net/ethernet/sfc/siena_sriov.c 		memcpy(vf->evq0_addrs, req->u.init_evq.addr,
req               572 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_req *req = vf->buf.addr;
req               573 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_rxq = req->u.init_rxq.index;
req               574 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_evq = req->u.init_rxq.evq;
req               575 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned buf_count = req->u.init_rxq.buf_count;
req               590 drivers/net/ethernet/sfc/siena_sriov.c 	if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
req               592 drivers/net/ethernet/sfc/siena_sriov.c 	efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
req               594 drivers/net/ethernet/sfc/siena_sriov.c 	label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
req               601 drivers/net/ethernet/sfc/siena_sriov.c 			     !!(req->u.init_rxq.flags &
req               613 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_req *req = vf->buf.addr;
req               614 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_txq = req->u.init_txq.index;
req               615 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_evq = req->u.init_txq.evq;
req               616 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned buf_count = req->u.init_txq.buf_count;
req               633 drivers/net/ethernet/sfc/siena_sriov.c 	if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
req               636 drivers/net/ethernet/sfc/siena_sriov.c 	efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
req               640 drivers/net/ethernet/sfc/siena_sriov.c 	label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
req               763 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_req *req = vf->buf.addr;
req               764 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_rxq = req->u.mac_filter.rxq;
req               772 drivers/net/ethernet/sfc/siena_sriov.c 				  req->u.mac_filter.flags);
req               777 drivers/net/ethernet/sfc/siena_sriov.c 	if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
req               779 drivers/net/ethernet/sfc/siena_sriov.c 	if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
req               807 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_req *req = vf->buf.addr;
req               808 drivers/net/ethernet/sfc/siena_sriov.c 	u64 page_count = req->u.set_status_page.peer_page_count;
req               812 drivers/net/ethernet/sfc/siena_sriov.c 		/ sizeof(req->u.set_status_page.peer_page_addr[0]);
req               814 drivers/net/ethernet/sfc/siena_sriov.c 	if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
req               824 drivers/net/ethernet/sfc/siena_sriov.c 	vf->status_addr = req->u.set_status_page.dma_addr;
req               835 drivers/net/ethernet/sfc/siena_sriov.c 			       req->u.set_status_page.peer_page_addr,
req               872 drivers/net/ethernet/sfc/siena_sriov.c 	struct siena_vf *vf = container_of(work, struct siena_vf, req);
req               874 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_req *req = vf->buf.addr;
req               896 drivers/net/ethernet/sfc/siena_sriov.c 	if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
req               897 drivers/net/ethernet/sfc/siena_sriov.c 		rc = vfdi_ops[req->op](vf);
req               901 drivers/net/ethernet/sfc/siena_sriov.c 				  req->op, vf->pci_name);
req               906 drivers/net/ethernet/sfc/siena_sriov.c 			  "%llx\n", req->op, vf->pci_name,
req               916 drivers/net/ethernet/sfc/siena_sriov.c 	req->rc = rc;
req               917 drivers/net/ethernet/sfc/siena_sriov.c 	req->op = VFDI_OP_RESPONSE;
req               920 drivers/net/ethernet/sfc/siena_sriov.c 	copy[0].from_buf = &req->rc;
req               923 drivers/net/ethernet/sfc/siena_sriov.c 	copy[0].length = sizeof(req->rc);
req               924 drivers/net/ethernet/sfc/siena_sriov.c 	copy[1].from_buf = &req->op;
req               927 drivers/net/ethernet/sfc/siena_sriov.c 	copy[1].length = sizeof(req->op);
req              1009 drivers/net/ethernet/sfc/siena_sriov.c 	struct siena_vf *vf = container_of(work, struct siena_vf, req);
req              1211 drivers/net/ethernet/sfc/siena_sriov.c 		INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
req              1386 drivers/net/ethernet/sfc/siena_sriov.c 		cancel_work_sync(&vf->req);
req              1444 drivers/net/ethernet/sfc/siena_sriov.c 		queue_work(vfdi_workqueue, &vf->req);
req              2017 drivers/net/ethernet/ti/cpsw.c static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req              2028 drivers/net/ethernet/ti/cpsw.c 		return cpsw_hwtstamp_set(dev, req);
req              2030 drivers/net/ethernet/ti/cpsw.c 		return cpsw_hwtstamp_get(dev, req);
req              2035 drivers/net/ethernet/ti/cpsw.c 	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
req               208 drivers/net/ethernet/ti/netcp.h 	int	(*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
req              1786 drivers/net/ethernet/ti/netcp_core.c 			   struct ifreq *req, int cmd)
req              1801 drivers/net/ethernet/ti/netcp_core.c 		err = module->ioctl(intf_modpriv->module_priv, req, cmd);
req              2761 drivers/net/ethernet/ti/netcp_ethss.c static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
req              2766 drivers/net/ethernet/ti/netcp_ethss.c static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
req              2827 drivers/net/ethernet/ti/netcp_ethss.c static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
req              2835 drivers/net/ethernet/ti/netcp_ethss.c 			return gbe_hwtstamp_get(gbe_intf, req);
req              2837 drivers/net/ethernet/ti/netcp_ethss.c 			return gbe_hwtstamp_set(gbe_intf, req);
req              2842 drivers/net/ethernet/ti/netcp_ethss.c 		return phy_mii_ioctl(phy, req, cmd);
req               370 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c 	struct iw_scan_req *req;
req               376 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c 		req = (struct iw_scan_req*)extra;
req               377 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c 		essid = req->essid;
req               378 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c 		essid_len = req->essid_len;
req               961 drivers/net/ethernet/xscale/ixp4xx_eth.c static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
req               968 drivers/net/ethernet/xscale/ixp4xx_eth.c 			return hwtstamp_set(dev, req);
req               970 drivers/net/ethernet/xscale/ixp4xx_eth.c 			return hwtstamp_get(dev, req);
req               973 drivers/net/ethernet/xscale/ixp4xx_eth.c 	return phy_mii_ioctl(dev->phydev, req, cmd);
req               553 drivers/net/fddi/skfp/ess.c 	struct smt_sba_alc_req *req ;
req               584 drivers/net/fddi/skfp/ess.c 	req = smtod(mb,struct smt_sba_alc_req *) ;
req               585 drivers/net/fddi/skfp/ess.c 	req->smt.smt_tid = smc->ess.alloc_trans_id = smt_get_tid(smc) ;
req               586 drivers/net/fddi/skfp/ess.c 	req->smt.smt_dest = smt_sba_da ;
req               589 drivers/net/fddi/skfp/ess.c 	req->s_type.para.p_type = SMT_P0015 ;
req               590 drivers/net/fddi/skfp/ess.c 	req->s_type.para.p_len = sizeof(struct smt_p_0015) - PARA_LEN ;
req               591 drivers/net/fddi/skfp/ess.c 	req->s_type.res_type = SYNC_BW ;
req               594 drivers/net/fddi/skfp/ess.c 	req->cmd.para.p_type = SMT_P0016 ;
req               595 drivers/net/fddi/skfp/ess.c 	req->cmd.para.p_len = sizeof(struct smt_p_0016) - PARA_LEN ;
req               596 drivers/net/fddi/skfp/ess.c 	req->cmd.sba_cmd = REQUEST_ALLOCATION ;
req               604 drivers/net/fddi/skfp/ess.c 	req->path.para.p_type = SMT_P320B ;
req               605 drivers/net/fddi/skfp/ess.c 	req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
req               606 drivers/net/fddi/skfp/ess.c 	req->path.mib_index = SBAPATHINDEX ;
req               607 drivers/net/fddi/skfp/ess.c 	req->path.path_pad = 0;
req               608 drivers/net/fddi/skfp/ess.c 	req->path.path_index = PRIMARY_RING ;
req               611 drivers/net/fddi/skfp/ess.c 	req->pl_req.para.p_type = SMT_P0017 ;
req               612 drivers/net/fddi/skfp/ess.c 	req->pl_req.para.p_len = sizeof(struct smt_p_0017) - PARA_LEN ;
req               613 drivers/net/fddi/skfp/ess.c 	req->pl_req.sba_pl_req = smc->mib.fddiESSPayload -
req               617 drivers/net/fddi/skfp/ess.c 	req->ov_req.para.p_type = SMT_P0018 ;
req               618 drivers/net/fddi/skfp/ess.c 	req->ov_req.para.p_len = sizeof(struct smt_p_0018) - PARA_LEN ;
req               619 drivers/net/fddi/skfp/ess.c 	req->ov_req.sba_ov_req = smc->mib.fddiESSOverhead -
req               623 drivers/net/fddi/skfp/ess.c 	req->payload.para.p_type = SMT_P320F ;
req               624 drivers/net/fddi/skfp/ess.c 	req->payload.para.p_len = sizeof(struct smt_p_320f) - PARA_LEN ;
req               625 drivers/net/fddi/skfp/ess.c 	req->payload.mib_index = SBAPATHINDEX ;
req               626 drivers/net/fddi/skfp/ess.c 	req->payload.mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ;
req               629 drivers/net/fddi/skfp/ess.c 	req->overhead.para.p_type = SMT_P3210 ;
req               630 drivers/net/fddi/skfp/ess.c 	req->overhead.para.p_len = sizeof(struct smt_p_3210) - PARA_LEN ;
req               631 drivers/net/fddi/skfp/ess.c 	req->overhead.mib_index = SBAPATHINDEX ;
req               632 drivers/net/fddi/skfp/ess.c 	req->overhead.mib_overhead = smc->mib.a[PATH0].fddiPATHSbaOverhead ;
req               635 drivers/net/fddi/skfp/ess.c 	req->a_addr.para.p_type = SMT_P0019 ;
req               636 drivers/net/fddi/skfp/ess.c 	req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ;
req               637 drivers/net/fddi/skfp/ess.c 	req->a_addr.sba_pad = 0;
req               638 drivers/net/fddi/skfp/ess.c 	req->a_addr.alloc_addr = null_addr ;
req               641 drivers/net/fddi/skfp/ess.c 	req->cat.para.p_type = SMT_P001A ;
req               642 drivers/net/fddi/skfp/ess.c 	req->cat.para.p_len = sizeof(struct smt_p_001a) - PARA_LEN ;
req               643 drivers/net/fddi/skfp/ess.c 	req->cat.category = smc->mib.fddiESSCategory ;
req               646 drivers/net/fddi/skfp/ess.c 	req->tneg.para.p_type = SMT_P001B ;
req               647 drivers/net/fddi/skfp/ess.c 	req->tneg.para.p_len = sizeof(struct smt_p_001b) - PARA_LEN ;
req               648 drivers/net/fddi/skfp/ess.c 	req->tneg.max_t_neg = smc->mib.fddiESSMaxTNeg ;
req               651 drivers/net/fddi/skfp/ess.c 	req->segm.para.p_type = SMT_P001C ;
req               652 drivers/net/fddi/skfp/ess.c 	req->segm.para.p_len = sizeof(struct smt_p_001c) - PARA_LEN ;
req               653 drivers/net/fddi/skfp/ess.c 	req->segm.min_seg_siz = smc->mib.fddiESSMinSegmentSize ;
req               655 drivers/net/fddi/skfp/ess.c 	dump_smt(smc,(struct smt_header *)req,"RAF") ;
req                39 drivers/net/fddi/skfp/pmf.c static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
req               307 drivers/net/fddi/skfp/pmf.c static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
req               330 drivers/net/fddi/skfp/pmf.c 	smt->smt_dest = req->smt_source ;	/* DA == source of request */
req               331 drivers/net/fddi/skfp/pmf.c 	smt->smt_class = req->smt_class ;	/* same class (GET/SET) */
req               334 drivers/net/fddi/skfp/pmf.c 	smt->smt_tid = req->smt_tid ;		/* same TID */
req               351 drivers/net/fddi/skfp/pmf.c 		if (!local && smt_authorize(smc,req))
req               353 drivers/net/fddi/skfp/pmf.c 		else if (smt_check_set_count(smc,req))
req               368 drivers/net/fddi/skfp/pmf.c 	len = req->smt_len ;
req               369 drivers/net/fddi/skfp/pmf.c 	pa = (struct smt_para *) (req + 1) ;
req               488 drivers/net/fddi/skfp/pmf.c 		smc->mib.fddiSMTLastSetStationId = req->smt_sid ;
req               300 drivers/net/hamradio/bpqether.c 	struct bpq_req req;
req               307 drivers/net/hamradio/bpqether.c 			if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req)))
req               309 drivers/net/hamradio/bpqether.c 			switch (req.cmd) {
req               117 drivers/net/hyperv/rndis_filter.c 			    struct rndis_request *req)
req               122 drivers/net/hyperv/rndis_filter.c 	list_del(&req->list_ent);
req               125 drivers/net/hyperv/rndis_filter.c 	kfree(req);
req               204 drivers/net/hyperv/rndis_filter.c 				  struct rndis_request *req)
req               212 drivers/net/hyperv/rndis_filter.c 	packet = &req->pkt;
req               214 drivers/net/hyperv/rndis_filter.c 	packet->total_data_buflen = req->request_msg.msg_len;
req               217 drivers/net/hyperv/rndis_filter.c 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
req               219 drivers/net/hyperv/rndis_filter.c 	pb[0].len = req->request_msg.msg_len;
req               221 drivers/net/hyperv/rndis_filter.c 		(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
req               228 drivers/net/hyperv/rndis_filter.c 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
req               231 drivers/net/hyperv/rndis_filter.c 		pb[1].len = req->request_msg.msg_len -
req               235 drivers/net/hyperv/rndis_filter.c 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
req               297 drivers/net/macsec.c 	struct aead_request *req;
req               602 drivers/net/macsec.c 	aead_request_free(macsec_skb_cb(skb)->req);
req               622 drivers/net/macsec.c 	struct aead_request *req;
req               639 drivers/net/macsec.c 	req = tmp;
req               641 drivers/net/macsec.c 	aead_request_set_tfm(req, tfm);
req               643 drivers/net/macsec.c 	return req;
req               656 drivers/net/macsec.c 	struct aead_request *req;
req               733 drivers/net/macsec.c 	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
req               734 drivers/net/macsec.c 	if (!req) {
req               745 drivers/net/macsec.c 		aead_request_free(req);
req               754 drivers/net/macsec.c 		aead_request_set_crypt(req, sg, sg, len, iv);
req               755 drivers/net/macsec.c 		aead_request_set_ad(req, macsec_hdr_len(sci_present));
req               757 drivers/net/macsec.c 		aead_request_set_crypt(req, sg, sg, 0, iv);
req               758 drivers/net/macsec.c 		aead_request_set_ad(req, skb->len - secy->icv_len);
req               761 drivers/net/macsec.c 	macsec_skb_cb(skb)->req = req;
req               763 drivers/net/macsec.c 	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
req               766 drivers/net/macsec.c 	ret = crypto_aead_encrypt(req);
req               772 drivers/net/macsec.c 		aead_request_free(req);
req               778 drivers/net/macsec.c 	aead_request_free(req);
req               894 drivers/net/macsec.c 	aead_request_free(macsec_skb_cb(skb)->req);
req               933 drivers/net/macsec.c 	struct aead_request *req;
req               947 drivers/net/macsec.c 	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
req               948 drivers/net/macsec.c 	if (!req) {
req               959 drivers/net/macsec.c 		aead_request_free(req);
req               970 drivers/net/macsec.c 		aead_request_set_crypt(req, sg, sg, len, iv);
req               971 drivers/net/macsec.c 		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
req               974 drivers/net/macsec.c 			aead_request_free(req);
req               979 drivers/net/macsec.c 		aead_request_set_crypt(req, sg, sg, icv_len, iv);
req               980 drivers/net/macsec.c 		aead_request_set_ad(req, skb->len - icv_len);
req               983 drivers/net/macsec.c 	macsec_skb_cb(skb)->req = req;
req               985 drivers/net/macsec.c 	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
req               988 drivers/net/macsec.c 	ret = crypto_aead_decrypt(req);
req              1004 drivers/net/macsec.c 	aead_request_free(req);
req                42 drivers/net/usb/cdc-phonet.c static void tx_complete(struct urb *req);
req                43 drivers/net/usb/cdc-phonet.c static void rx_complete(struct urb *req);
req                51 drivers/net/usb/cdc-phonet.c 	struct urb *req = NULL;
req                58 drivers/net/usb/cdc-phonet.c 	req = usb_alloc_urb(0, GFP_ATOMIC);
req                59 drivers/net/usb/cdc-phonet.c 	if (!req)
req                61 drivers/net/usb/cdc-phonet.c 	usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len,
req                63 drivers/net/usb/cdc-phonet.c 	req->transfer_flags = URB_ZERO_PACKET;
req                64 drivers/net/usb/cdc-phonet.c 	err = usb_submit_urb(req, GFP_ATOMIC);
req                66 drivers/net/usb/cdc-phonet.c 		usb_free_urb(req);
req                83 drivers/net/usb/cdc-phonet.c static void tx_complete(struct urb *req)
req                85 drivers/net/usb/cdc-phonet.c 	struct sk_buff *skb = req->context;
req                88 drivers/net/usb/cdc-phonet.c 	int status = req->status;
req               113 drivers/net/usb/cdc-phonet.c 	usb_free_urb(req);
req               116 drivers/net/usb/cdc-phonet.c static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
req               126 drivers/net/usb/cdc-phonet.c 	usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page),
req               128 drivers/net/usb/cdc-phonet.c 	req->transfer_flags = 0;
req               129 drivers/net/usb/cdc-phonet.c 	err = usb_submit_urb(req, gfp_flags);
req               137 drivers/net/usb/cdc-phonet.c static void rx_complete(struct urb *req)
req               139 drivers/net/usb/cdc-phonet.c 	struct net_device *dev = req->context;
req               141 drivers/net/usb/cdc-phonet.c 	struct page *page = virt_to_page(req->transfer_buffer);
req               144 drivers/net/usb/cdc-phonet.c 	int status = req->status;
req               156 drivers/net/usb/cdc-phonet.c 						page, 1, req->actual_length,
req               162 drivers/net/usb/cdc-phonet.c 					page, 0, req->actual_length,
req               166 drivers/net/usb/cdc-phonet.c 		if (req->actual_length < PAGE_SIZE)
req               186 drivers/net/usb/cdc-phonet.c 		req = NULL;
req               203 drivers/net/usb/cdc-phonet.c 	if (req)
req               204 drivers/net/usb/cdc-phonet.c 		rx_submit(pnd, req, GFP_ATOMIC);
req               221 drivers/net/usb/cdc-phonet.c 		struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
req               223 drivers/net/usb/cdc-phonet.c 		if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
req               224 drivers/net/usb/cdc-phonet.c 			usb_free_urb(req);
req               228 drivers/net/usb/cdc-phonet.c 		pnd->urbs[i] = req;
req               244 drivers/net/usb/cdc-phonet.c 		struct urb *req = pnd->urbs[i];
req               246 drivers/net/usb/cdc-phonet.c 		if (!req)
req               248 drivers/net/usb/cdc-phonet.c 		usb_kill_urb(req);
req               249 drivers/net/usb/cdc-phonet.c 		usb_free_urb(req);
req               258 drivers/net/usb/cdc-phonet.c 	struct if_phonet_req *req = (struct if_phonet_req *)ifr;
req               262 drivers/net/usb/cdc-phonet.c 		req->ifr_phonet_autoconf.device = PN_DEV_PC;
req              1961 drivers/net/usb/hso.c 	struct usb_ctrlrequest *req;
req              1978 drivers/net/usb/hso.c 	req = (struct usb_ctrlrequest *)(urb->setup_packet);
req              1983 drivers/net/usb/hso.c 	if (req->bRequestType ==
req               331 drivers/net/usb/lan78xx.c 	struct usb_ctrlrequest req;
req                96 drivers/net/usb/net1080.c nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
req                98 drivers/net/usb/net1080.c 	int status = usbnet_read_cmd(dev, req,
req               118 drivers/net/usb/net1080.c nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
req               120 drivers/net/usb/net1080.c 	usbnet_write_cmd(dev, req,
req               115 drivers/net/usb/pegasus.c 	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
req               120 drivers/net/usb/pegasus.c 	kfree(req);
req               188 drivers/net/usb/pegasus.c 	struct usb_ctrlrequest *req;
req               190 drivers/net/usb/pegasus.c 	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
req               191 drivers/net/usb/pegasus.c 	if (req == NULL)
req               196 drivers/net/usb/pegasus.c 		kfree(req);
req               199 drivers/net/usb/pegasus.c 	req->bRequestType = PEGASUS_REQT_WRITE;
req               200 drivers/net/usb/pegasus.c 	req->bRequest = PEGASUS_REQ_SET_REGS;
req               201 drivers/net/usb/pegasus.c 	req->wValue = cpu_to_le16(0);
req               202 drivers/net/usb/pegasus.c 	req->wIndex = cpu_to_le16(EthCtrl0);
req               203 drivers/net/usb/pegasus.c 	req->wLength = cpu_to_le16(3);
req               206 drivers/net/usb/pegasus.c 			     usb_sndctrlpipe(pegasus->usb, 0), (void *)req,
req               207 drivers/net/usb/pegasus.c 			     pegasus->eth_regs, 3, async_ctrl_callback, req);
req                58 drivers/net/usb/plusb.c pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
req                60 drivers/net/usb/plusb.c 	return usbnet_read_cmd(dev, req,
req               189 drivers/net/usb/rtl8150.c 	struct async_req *req = (struct async_req *)urb->context;
req               194 drivers/net/usb/rtl8150.c 	kfree(req);
req               202 drivers/net/usb/rtl8150.c 	struct async_req *req;
req               204 drivers/net/usb/rtl8150.c 	req = kmalloc(sizeof(struct async_req), GFP_ATOMIC);
req               205 drivers/net/usb/rtl8150.c 	if (req == NULL)
req               209 drivers/net/usb/rtl8150.c 		kfree(req);
req               212 drivers/net/usb/rtl8150.c 	req->rx_creg = cpu_to_le16(reg);
req               213 drivers/net/usb/rtl8150.c 	req->dr.bRequestType = RTL8150_REQT_WRITE;
req               214 drivers/net/usb/rtl8150.c 	req->dr.bRequest = RTL8150_REQ_SET_REGS;
req               215 drivers/net/usb/rtl8150.c 	req->dr.wIndex = 0;
req               216 drivers/net/usb/rtl8150.c 	req->dr.wValue = cpu_to_le16(indx);
req               217 drivers/net/usb/rtl8150.c 	req->dr.wLength = cpu_to_le16(size);
req               219 drivers/net/usb/rtl8150.c 	                     usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr,
req               220 drivers/net/usb/rtl8150.c 			     &req->rx_creg, size, async_set_reg_cb, req);
req                65 drivers/net/usb/smsc75xx.c 	struct usb_ctrlrequest req;
req              2100 drivers/net/usb/usbnet.c 	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
req              2107 drivers/net/usb/usbnet.c 	kfree(req);
req              2118 drivers/net/usb/usbnet.c 	struct usb_ctrlrequest *req = NULL;
req              2140 drivers/net/usb/usbnet.c 	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
req              2141 drivers/net/usb/usbnet.c 	if (!req)
req              2144 drivers/net/usb/usbnet.c 	req->bRequestType = reqtype;
req              2145 drivers/net/usb/usbnet.c 	req->bRequest = cmd;
req              2146 drivers/net/usb/usbnet.c 	req->wValue = cpu_to_le16(value);
req              2147 drivers/net/usb/usbnet.c 	req->wIndex = cpu_to_le16(index);
req              2148 drivers/net/usb/usbnet.c 	req->wLength = cpu_to_le16(size);
req              2152 drivers/net/usb/usbnet.c 			     (void *)req, buf, size,
req              2153 drivers/net/usb/usbnet.c 			     usbnet_async_cmd_cb, req);
req              2167 drivers/net/usb/usbnet.c 	kfree(req);
req               587 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct htt_stats_req *req;
req               603 drivers/net/wireless/ath/ath10k/htt_tx.c 	req = &cmd->stats_req;
req               605 drivers/net/wireless/ath/ath10k/htt_tx.c 	memset(req, 0, sizeof(*req));
req               610 drivers/net/wireless/ath/ath10k/htt_tx.c 	memcpy(req->upload_types, &mask, 3);
req               611 drivers/net/wireless/ath/ath10k/htt_tx.c 	memcpy(req->reset_types, &reset_mask, 3);
req               612 drivers/net/wireless/ath/ath10k/htt_tx.c 	req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
req               613 drivers/net/wireless/ath/ath10k/htt_tx.c 	req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
req               614 drivers/net/wireless/ath/ath10k/htt_tx.c 	req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
req              5953 drivers/net/wireless/ath/ath10k/mac.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              5992 drivers/net/wireless/ath/ath10k/mac.c 	if (req->ie_len) {
req              5993 drivers/net/wireless/ath/ath10k/mac.c 		arg.ie_len = req->ie_len;
req              5994 drivers/net/wireless/ath/ath10k/mac.c 		memcpy(arg.ie, req->ie, arg.ie_len);
req              5997 drivers/net/wireless/ath/ath10k/mac.c 	if (req->n_ssids) {
req              5998 drivers/net/wireless/ath/ath10k/mac.c 		arg.n_ssids = req->n_ssids;
req              6000 drivers/net/wireless/ath/ath10k/mac.c 			arg.ssids[i].len  = req->ssids[i].ssid_len;
req              6001 drivers/net/wireless/ath/ath10k/mac.c 			arg.ssids[i].ssid = req->ssids[i].ssid;
req              6007 drivers/net/wireless/ath/ath10k/mac.c 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
req              6009 drivers/net/wireless/ath/ath10k/mac.c 		ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
req              6010 drivers/net/wireless/ath/ath10k/mac.c 		ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
req              6013 drivers/net/wireless/ath/ath10k/mac.c 	if (req->n_channels) {
req              6014 drivers/net/wireless/ath/ath10k/mac.c 		arg.n_channels = req->n_channels;
req              6016 drivers/net/wireless/ath/ath10k/mac.c 			arg.channels[i] = req->channels[i]->center_freq;
req              6020 drivers/net/wireless/ath/ath10k/mac.c 	if (req->duration) {
req              6021 drivers/net/wireless/ath/ath10k/mac.c 		arg.dwell_time_active = req->duration;
req              6022 drivers/net/wireless/ath/ath10k/mac.c 		arg.dwell_time_passive = req->duration;
req              6023 drivers/net/wireless/ath/ath10k/mac.c 		arg.burst_duration_ms = req->duration;
req              6026 drivers/net/wireless/ath/ath10k/mac.c 				(arg.n_channels - 1) + (req->duration +
req              2099 drivers/net/wireless/ath/ath10k/pci.c 				    void *req, u32 req_len,
req              2121 drivers/net/wireless/ath/ath10k/pci.c 	treq = kmemdup(req, req_len, GFP_KERNEL);
req               221 drivers/net/wireless/ath/ath10k/pci.h int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len,
req               112 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_msa_info_req_msg_v01 req = {};
req               118 drivers/net/wireless/ath/ath10k/qmi.c 	req.msa_addr = qmi->msa_pa;
req               119 drivers/net/wireless/ath/ath10k/qmi.c 	req.size = qmi->msa_mem_size;
req               129 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_msa_info_req_msg_v01_ei, &req);
req               175 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_msa_ready_req_msg_v01 req = {};
req               188 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_msa_ready_req_msg_v01_ei, &req);
req               214 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_bdf_download_req_msg_v01 *req;
req               221 drivers/net/wireless/ath/ath10k/qmi.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               222 drivers/net/wireless/ath/ath10k/qmi.c 	if (!req)
req               229 drivers/net/wireless/ath/ath10k/qmi.c 		req->valid = 1;
req               230 drivers/net/wireless/ath/ath10k/qmi.c 		req->file_id_valid = 1;
req               231 drivers/net/wireless/ath/ath10k/qmi.c 		req->file_id = 0;
req               232 drivers/net/wireless/ath/ath10k/qmi.c 		req->total_size_valid = 1;
req               233 drivers/net/wireless/ath/ath10k/qmi.c 		req->total_size = ar->normal_mode_fw.board_len;
req               234 drivers/net/wireless/ath/ath10k/qmi.c 		req->seg_id_valid = 1;
req               235 drivers/net/wireless/ath/ath10k/qmi.c 		req->data_valid = 1;
req               236 drivers/net/wireless/ath/ath10k/qmi.c 		req->end_valid = 1;
req               239 drivers/net/wireless/ath/ath10k/qmi.c 			req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
req               241 drivers/net/wireless/ath/ath10k/qmi.c 			req->data_len = remaining;
req               242 drivers/net/wireless/ath/ath10k/qmi.c 			req->end = 1;
req               245 drivers/net/wireless/ath/ath10k/qmi.c 		memcpy(req->data, temp, req->data_len);
req               256 drivers/net/wireless/ath/ath10k/qmi.c 				       wlfw_bdf_download_req_msg_v01_ei, req);
req               274 drivers/net/wireless/ath/ath10k/qmi.c 		remaining -= req->data_len;
req               275 drivers/net/wireless/ath/ath10k/qmi.c 		temp += req->data_len;
req               276 drivers/net/wireless/ath/ath10k/qmi.c 		req->seg_id++;
req               281 drivers/net/wireless/ath/ath10k/qmi.c 	kfree(req);
req               285 drivers/net/wireless/ath/ath10k/qmi.c 	kfree(req);
req               292 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_cal_report_req_msg_v01 req = {};
req               306 drivers/net/wireless/ath/ath10k/qmi.c 			req.meta_data[j] = qmi->cal_data[i].cal_id;
req               310 drivers/net/wireless/ath/ath10k/qmi.c 	req.meta_data_len = j;
req               315 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_cal_report_req_msg_v01_ei, &req);
req               345 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_wlan_mode_req_msg_v01 req = {};
req               355 drivers/net/wireless/ath/ath10k/qmi.c 	req.mode = mode;
req               356 drivers/net/wireless/ath/ath10k/qmi.c 	req.hw_debug_valid = 1;
req               357 drivers/net/wireless/ath/ath10k/qmi.c 	req.hw_debug = 0;
req               362 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_wlan_mode_req_msg_v01_ei, &req);
req               394 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_wlan_cfg_req_msg_v01 *req;
req               399 drivers/net/wireless/ath/ath10k/qmi.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               400 drivers/net/wireless/ath/ath10k/qmi.c 	if (!req)
req               409 drivers/net/wireless/ath/ath10k/qmi.c 	req->host_version_valid = 0;
req               411 drivers/net/wireless/ath/ath10k/qmi.c 	req->tgt_cfg_valid = 1;
req               413 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
req               415 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg_len = config->num_ce_tgt_cfg;
req               416 drivers/net/wireless/ath/ath10k/qmi.c 	for (i = 0; i < req->tgt_cfg_len; i++) {
req               417 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
req               418 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
req               419 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
req               420 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
req               421 drivers/net/wireless/ath/ath10k/qmi.c 		req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
req               424 drivers/net/wireless/ath/ath10k/qmi.c 	req->svc_cfg_valid = 1;
req               426 drivers/net/wireless/ath/ath10k/qmi.c 		req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
req               428 drivers/net/wireless/ath/ath10k/qmi.c 		req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
req               429 drivers/net/wireless/ath/ath10k/qmi.c 	for (i = 0; i < req->svc_cfg_len; i++) {
req               430 drivers/net/wireless/ath/ath10k/qmi.c 		req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
req               431 drivers/net/wireless/ath/ath10k/qmi.c 		req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
req               432 drivers/net/wireless/ath/ath10k/qmi.c 		req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
req               435 drivers/net/wireless/ath/ath10k/qmi.c 	req->shadow_reg_valid = 1;
req               438 drivers/net/wireless/ath/ath10k/qmi.c 		req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
req               440 drivers/net/wireless/ath/ath10k/qmi.c 		req->shadow_reg_len = config->num_shadow_reg_cfg;
req               442 drivers/net/wireless/ath/ath10k/qmi.c 	memcpy(req->shadow_reg, config->shadow_reg_cfg,
req               443 drivers/net/wireless/ath/ath10k/qmi.c 	       sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
req               448 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_wlan_cfg_req_msg_v01_ei, req);
req               466 drivers/net/wireless/ath/ath10k/qmi.c 	kfree(req);
req               470 drivers/net/wireless/ath/ath10k/qmi.c 	kfree(req);
req               507 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_cap_req_msg_v01 req = {};
req               524 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_cap_req_msg_v01_ei, &req);
req               583 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_host_cap_req_msg_v01 req = {};
req               590 drivers/net/wireless/ath/ath10k/qmi.c 	req.daemon_support_valid = 1;
req               591 drivers/net/wireless/ath/ath10k/qmi.c 	req.daemon_support = 0;
req               606 drivers/net/wireless/ath/ath10k/qmi.c 			       req_ei, &req);
req               635 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_ini_req_msg_v01 req = {};
req               639 drivers/net/wireless/ath/ath10k/qmi.c 	req.enablefwlog_valid = 1;
req               640 drivers/net/wireless/ath/ath10k/qmi.c 	req.enablefwlog = fw_log_mode;
req               650 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_ini_req_msg_v01_ei, &req);
req               679 drivers/net/wireless/ath/ath10k/qmi.c 	struct wlfw_ind_register_req_msg_v01 req = {};
req               684 drivers/net/wireless/ath/ath10k/qmi.c 	req.client_id_valid = 1;
req               685 drivers/net/wireless/ath/ath10k/qmi.c 	req.client_id = ATH10K_QMI_CLIENT_ID;
req               686 drivers/net/wireless/ath/ath10k/qmi.c 	req.fw_ready_enable_valid = 1;
req               687 drivers/net/wireless/ath/ath10k/qmi.c 	req.fw_ready_enable = 1;
req               688 drivers/net/wireless/ath/ath10k/qmi.c 	req.msa_ready_enable_valid = 1;
req               689 drivers/net/wireless/ath/ath10k/qmi.c 	req.msa_ready_enable = 1;
req               699 drivers/net/wireless/ath/ath10k/qmi.c 			       wlfw_ind_register_req_msg_v01_ei, &req);
req              1145 drivers/net/wireless/ath/ath10k/sdio.c 					void *req, u32 req_len,
req              1152 drivers/net/wireless/ath/ath10k/sdio.c 	if (req) {
req              1159 drivers/net/wireless/ath/ath10k/sdio.c 		memcpy(ar_sdio->bmi_buf, req, req_len);
req              1274 drivers/net/wireless/ath/ath10k/sdio.c 				      struct ath10k_sdio_bus_request *req)
req              1280 drivers/net/wireless/ath/ath10k/sdio.c 	skb = req->skb;
req              1281 drivers/net/wireless/ath/ath10k/sdio.c 	ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
req              1284 drivers/net/wireless/ath/ath10k/sdio.c 			    req->address, ret);
req              1286 drivers/net/wireless/ath/ath10k/sdio.c 	if (req->htc_msg) {
req              1287 drivers/net/wireless/ath/ath10k/sdio.c 		ep = &ar->htc.endpoint[req->eid];
req              1289 drivers/net/wireless/ath/ath10k/sdio.c 	} else if (req->comp) {
req              1290 drivers/net/wireless/ath/ath10k/sdio.c 		complete(req->comp);
req              1293 drivers/net/wireless/ath/ath10k/sdio.c 	ath10k_sdio_free_bus_req(ar, req);
req              1301 drivers/net/wireless/ath/ath10k/sdio.c 	struct ath10k_sdio_bus_request *req, *tmp_req;
req              1305 drivers/net/wireless/ath/ath10k/sdio.c 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
req              1306 drivers/net/wireless/ath/ath10k/sdio.c 		list_del(&req->list);
req              1308 drivers/net/wireless/ath/ath10k/sdio.c 		__ath10k_sdio_write_async(ar, req);
req              1783 drivers/net/wireless/ath/ath10k/sdio.c 	struct ath10k_sdio_bus_request *req, *tmp_req;
req              1793 drivers/net/wireless/ath/ath10k/sdio.c 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
req              1796 drivers/net/wireless/ath/ath10k/sdio.c 		list_del(&req->list);
req              1798 drivers/net/wireless/ath/ath10k/sdio.c 		if (req->htc_msg) {
req              1799 drivers/net/wireless/ath/ath10k/sdio.c 			ep = &ar->htc.endpoint[req->eid];
req              1800 drivers/net/wireless/ath/ath10k/sdio.c 			ath10k_htc_notify_tx_completion(ep, req->skb);
req              1801 drivers/net/wireless/ath/ath10k/sdio.c 		} else if (req->skb) {
req              1802 drivers/net/wireless/ath/ath10k/sdio.c 			kfree_skb(req->skb);
req              1804 drivers/net/wireless/ath/ath10k/sdio.c 		ath10k_sdio_free_bus_req(ar, req);
req               475 drivers/net/wireless/ath/ath10k/usb.c 				      u8 req, u16 value, u16 index, void *data,
req               491 drivers/net/wireless/ath/ath10k/usb.c 			      req,
req               509 drivers/net/wireless/ath/ath10k/usb.c 				     u8 req, u16 value, u16 index, void *data,
req               525 drivers/net/wireless/ath/ath10k/usb.c 			      req,
req               633 drivers/net/wireless/ath/ath10k/usb.c 				       void *req, u32 req_len,
req               638 drivers/net/wireless/ath/ath10k/usb.c 	if (req) {
req               641 drivers/net/wireless/ath/ath10k/usb.c 						 0, 0, req, req_len);
req                31 drivers/net/wireless/ath/ath6kl/hif.c static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
req                37 drivers/net/wireless/ath/ath6kl/hif.c 	buf = req->virt_dma_buf;
req                39 drivers/net/wireless/ath/ath6kl/hif.c 	for (i = 0; i < req->scat_entries; i++) {
req                41 drivers/net/wireless/ath/ath6kl/hif.c 			memcpy(req->scat_list[i].buf, buf,
req                42 drivers/net/wireless/ath/ath6kl/hif.c 			       req->scat_list[i].len);
req                44 drivers/net/wireless/ath/ath6kl/hif.c 			memcpy(buf, req->scat_list[i].buf,
req                45 drivers/net/wireless/ath/ath6kl/hif.c 			       req->scat_list[i].len);
req                47 drivers/net/wireless/ath/ath6kl/hif.c 		buf += req->scat_list[i].len;
req               236 drivers/net/wireless/ath/ath6kl/hif.c 		scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
req               239 drivers/net/wireless/ath/ath6kl/hif.c 		scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
req               183 drivers/net/wireless/ath/ath6kl/hif.h 	u32 req;
req                39 drivers/net/wireless/ath/ath6kl/htc-ops.h 					  struct htc_service_connect_req *req,
req                42 drivers/net/wireless/ath/ath6kl/htc-ops.h 	return target->dev->ar->htc_ops->conn_service(target, req, resp);
req               553 drivers/net/wireless/ath/ath6kl/htc.h 			    struct htc_service_connect_req *req,
req               234 drivers/net/wireless/ath/ath6kl/sdio.c 		   (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
req               238 drivers/net/wireless/ath/ath6kl/sdio.c 	data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
req               261 drivers/net/wireless/ath/ath6kl/sdio.c 			       struct bus_request *req)
req               270 drivers/net/wireless/ath/ath6kl/sdio.c 	scat_req = req->scat_req;
req               274 drivers/net/wireless/ath/ath6kl/sdio.c 		if (scat_req->req & HIF_BLOCK_BASIS)
req               277 drivers/net/wireless/ath/ath6kl/sdio.c 		status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
req               289 drivers/net/wireless/ath/ath6kl/sdio.c 	opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
req               292 drivers/net/wireless/ath/ath6kl/sdio.c 	rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
req               295 drivers/net/wireless/ath/ath6kl/sdio.c 	if (scat_req->req & HIF_WRITE) {
req               319 drivers/net/wireless/ath/ath6kl/sdio.c 			       scat_req->req,
req               338 drivers/net/wireless/ath/ath6kl/sdio.c 	if (scat_req->req & HIF_ASYNCHRONOUS)
req               445 drivers/net/wireless/ath/ath6kl/sdio.c 				      struct bus_request *req)
req               447 drivers/net/wireless/ath/ath6kl/sdio.c 	if (req->scat_req) {
req               448 drivers/net/wireless/ath/ath6kl/sdio.c 		ath6kl_sdio_scat_rw(ar_sdio, req);
req               453 drivers/net/wireless/ath/ath6kl/sdio.c 		status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
req               454 drivers/net/wireless/ath/ath6kl/sdio.c 						     req->buffer, req->length,
req               455 drivers/net/wireless/ath/ath6kl/sdio.c 						     req->request);
req               456 drivers/net/wireless/ath/ath6kl/sdio.c 		context = req->packet;
req               457 drivers/net/wireless/ath/ath6kl/sdio.c 		ath6kl_sdio_free_bus_req(ar_sdio, req);
req               465 drivers/net/wireless/ath/ath6kl/sdio.c 	struct bus_request *req, *tmp_req;
req               470 drivers/net/wireless/ath/ath6kl/sdio.c 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
req               471 drivers/net/wireless/ath/ath6kl/sdio.c 		list_del(&req->list);
req               473 drivers/net/wireless/ath/ath6kl/sdio.c 		__ath6kl_sdio_write_async(ar_sdio, req);
req               676 drivers/net/wireless/ath/ath6kl/sdio.c 	u32 request = scat_req->req;
req              1226 drivers/net/wireless/ath/ath6kl/sdio.c 	struct bus_request *req, *tmp_req;
req              1235 drivers/net/wireless/ath/ath6kl/sdio.c 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
req              1236 drivers/net/wireless/ath/ath6kl/sdio.c 		list_del(&req->list);
req              1238 drivers/net/wireless/ath/ath6kl/sdio.c 		if (req->scat_req) {
req              1240 drivers/net/wireless/ath/ath6kl/sdio.c 			req->scat_req->status = -ECANCELED;
req              1241 drivers/net/wireless/ath/ath6kl/sdio.c 			req->scat_req->complete(ar_sdio->ar->htc_target,
req              1242 drivers/net/wireless/ath/ath6kl/sdio.c 						req->scat_req);
req              1244 drivers/net/wireless/ath/ath6kl/sdio.c 			context = req->packet;
req              1245 drivers/net/wireless/ath/ath6kl/sdio.c 			ath6kl_sdio_free_bus_req(ar_sdio, req);
req               860 drivers/net/wireless/ath/ath6kl/usb.c 				   u8 req, u16 value, u16 index, void *data,
req               875 drivers/net/wireless/ath/ath6kl/usb.c 			      req,
req               892 drivers/net/wireless/ath/ath6kl/usb.c 				  u8 req, u16 value, u16 index, void *data,
req               907 drivers/net/wireless/ath/ath6kl/usb.c 				 req,
req               793 drivers/net/wireless/ath/ath9k/channel.c 	struct cfg80211_scan_request *req = sc->offchannel.scan_req;
req               795 drivers/net/wireless/ath/ath9k/channel.c 	if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR))
req               883 drivers/net/wireless/ath/ath9k/channel.c 	struct cfg80211_scan_request *req = sc->offchannel.scan_req;
req               886 drivers/net/wireless/ath/ath9k/channel.c 	if (sc->offchannel.scan_idx >= req->n_channels) {
req               891 drivers/net/wireless/ath/ath9k/channel.c 			req->n_channels);
req               903 drivers/net/wireless/ath/ath9k/channel.c 	chan = req->channels[sc->offchannel.scan_idx++];
req               992 drivers/net/wireless/ath/ath9k/channel.c 	struct cfg80211_scan_request *req = sc->offchannel.scan_req;
req              1000 drivers/net/wireless/ath/ath9k/channel.c 			ssid->ssid, ssid->ssid_len, req->ie_len);
req              1005 drivers/net/wireless/ath/ath9k/channel.c 	if (req->no_cck)
req              1008 drivers/net/wireless/ath/ath9k/channel.c 	if (req->ie_len)
req              1009 drivers/net/wireless/ath/ath9k/channel.c 		skb_put_data(skb, req->ie, req->ie_len);
req              1029 drivers/net/wireless/ath/ath9k/channel.c 	struct cfg80211_scan_request *req = sc->offchannel.scan_req;
req              1033 drivers/net/wireless/ath/ath9k/channel.c 	    req->n_ssids) {
req              1034 drivers/net/wireless/ath/ath9k/channel.c 		for (i = 0; i < req->n_ssids; i++)
req              1035 drivers/net/wireless/ath/ath9k/channel.c 			ath_scan_send_probe(sc, &req->ssids[i]);
req               128 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	struct htc_service_connreq req;
req               130 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	memset(&req, 0, sizeof(struct htc_service_connreq));
req               132 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	req.service_id = service_id;
req               133 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	req.ep_callbacks.priv = priv;
req               134 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	req.ep_callbacks.rx = ath9k_htc_rxep;
req               135 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	req.ep_callbacks.tx = tx;
req               137 drivers/net/wireless/ath/ath9k/htc_drv_init.c 	return htc_connect_service(priv->htc, &req, ep_id);
req              2316 drivers/net/wireless/ath/ath9k/main.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              2331 drivers/net/wireless/ath/ath9k/main.c 	sc->offchannel.scan_req = req;
req               481 drivers/net/wireless/ath/carl9170/carl9170.h 	bool req;
req              1479 drivers/net/wireless/ath/carl9170/main.c 		sta_info->stats[tid].req = false;
req               410 drivers/net/wireless/ath/carl9170/tx.c 		sta_info->stats[tid].req = false;
req               420 drivers/net/wireless/ath/carl9170/tx.c 		sta_info->stats[tid].req = true;
req               426 drivers/net/wireless/ath/carl9170/tx.c 		if (sta_info->stats[tid].req)
req               620 drivers/net/wireless/ath/wcn36xx/main.c 	struct cfg80211_scan_request *req = wcn->scan_req;
req               626 drivers/net/wireless/ath/wcn36xx/main.c 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
req               628 drivers/net/wireless/ath/wcn36xx/main.c 	for (i = 0; i < req->n_channels; i++)
req               629 drivers/net/wireless/ath/wcn36xx/main.c 		channels[i] = req->channels[i]->hw_value;
req               631 drivers/net/wireless/ath/wcn36xx/main.c 	wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels);
req               634 drivers/net/wireless/ath/wcn36xx/main.c 	for (i = 0; i < req->n_channels; i++) {
req               642 drivers/net/wireless/ath/wcn36xx/main.c 		wcn->scan_freq = req->channels[i]->center_freq;
req               643 drivers/net/wireless/ath/wcn36xx/main.c 		wcn->scan_band = req->channels[i]->band;
req               645 drivers/net/wireless/ath/wcn36xx/main.c 		wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value);
req               647 drivers/net/wireless/ath/wcn36xx/main.c 		wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value);
req               673 drivers/net/wireless/ath/wcn36xx/main.c 	wcn->scan_req = &hw_req->req;
req               683 drivers/net/wireless/ath/wcn36xx/main.c 	return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
req               641 drivers/net/wireless/ath/wcn36xx/smd.c 			      struct cfg80211_scan_request *req)
req               647 drivers/net/wireless/ath/wcn36xx/smd.c 	if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN)
req               667 drivers/net/wireless/ath/wcn36xx/smd.c 	msg_body->num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body->ssids));
req               669 drivers/net/wireless/ath/wcn36xx/smd.c 		msg_body->ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
req               671 drivers/net/wireless/ath/wcn36xx/smd.c 		memcpy(msg_body->ssids[i].ssid, req->ssids[i].ssid,
req               675 drivers/net/wireless/ath/wcn36xx/smd.c 	msg_body->num_channel = min_t(u8, req->n_channels,
req               678 drivers/net/wireless/ath/wcn36xx/smd.c 		msg_body->channels[i] = req->channels[i]->hw_value;
req               682 drivers/net/wireless/ath/wcn36xx/smd.c 	if (req->ie_len > 0) {
req               683 drivers/net/wireless/ath/wcn36xx/smd.c 		msg_body->ie_len = req->ie_len;
req               684 drivers/net/wireless/ath/wcn36xx/smd.c 		msg_body->header.len += req->ie_len;
req               685 drivers/net/wireless/ath/wcn36xx/smd.c 		memcpy(msg_body->ie, req->ie, req->ie_len);
req                69 drivers/net/wireless/ath/wcn36xx/smd.h 			      struct cfg80211_scan_request *req);
req              2307 drivers/net/wireless/ath/wil6210/cfg80211.c 				    struct wil_probe_client_req *req)
req              2310 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct wil_sta_info *sta = &wil->sta[req->cid];
req              2316 drivers/net/wireless/ath/wil6210/cfg80211.c 	cfg80211_probe_status(ndev, sta->addr, req->cookie, alive,
req              2341 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct wil_probe_client_req *req;
req              2345 drivers/net/wireless/ath/wil6210/cfg80211.c 		req = list_entry(lh, struct wil_probe_client_req, list);
req              2347 drivers/net/wireless/ath/wil6210/cfg80211.c 		wil_probe_client_handle(wil, vif, req);
req              2348 drivers/net/wireless/ath/wil6210/cfg80211.c 		kfree(req);
req              2354 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct wil_probe_client_req *req, *t;
req              2361 drivers/net/wireless/ath/wil6210/cfg80211.c 	list_for_each_entry_safe(req, t, &vif->probe_client_pending, list) {
req              2362 drivers/net/wireless/ath/wil6210/cfg80211.c 		list_del(&req->list);
req              2363 drivers/net/wireless/ath/wil6210/cfg80211.c 		kfree(req);
req              2375 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct wil_probe_client_req *req;
req              2384 drivers/net/wireless/ath/wil6210/cfg80211.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req              2385 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (!req)
req              2388 drivers/net/wireless/ath/wil6210/cfg80211.c 	req->cid = cid;
req              2389 drivers/net/wireless/ath/wil6210/cfg80211.c 	req->cookie = cid;
req              2392 drivers/net/wireless/ath/wil6210/cfg80211.c 	list_add_tail(&req->list, &vif->probe_client_pending);
req              2395 drivers/net/wireless/ath/wil6210/cfg80211.c 	*cookie = req->cookie;
req              1958 drivers/net/wireless/atmel/at76c50x-usb.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              1976 drivers/net/wireless/atmel/at76c50x-usb.c 	if (req->n_ssids) {
req              1978 drivers/net/wireless/atmel/at76c50x-usb.c 		ssid = req->ssids[0].ssid;
req              1979 drivers/net/wireless/atmel/at76c50x-usb.c 		len = req->ssids[0].ssid_len;
req              1248 drivers/net/wireless/broadcom/b43/main.c 	u32 req = B43_BCMA_CLKCTLST_80211_PLL_REQ |
req              1278 drivers/net/wireless/broadcom/b43/main.c 	bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
req              3229 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	struct cfg80211_scan_request *req;
req              3232 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	req_size = sizeof(*req) +
req              3233 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		   n_netinfo * sizeof(req->channels[0]) +
req              3234 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		   n_netinfo * sizeof(*req->ssids);
req              3236 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	req = kzalloc(req_size, GFP_KERNEL);
req              3237 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (req) {
req              3238 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		req->wiphy = wiphy;
req              3239 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		req->ssids = (void *)(&req->channels[0]) +
req              3240 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			     n_netinfo * sizeof(req->channels[0]);
req              3242 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	return req;
req              3245 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
req              3261 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	chan = ieee80211_get_channel(req->wiphy, freq);
req              3265 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	for (i = 0; i < req->n_channels; i++) {
req              3266 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		if (req->channels[i] == chan)
req              3269 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (i == req->n_channels)
req              3270 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		req->channels[req->n_channels++] = chan;
req              3272 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	for (i = 0; i < req->n_ssids; i++) {
req              3273 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		if (req->ssids[i].ssid_len == ssid_len &&
req              3274 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		    !memcmp(req->ssids[i].ssid, ssid, ssid_len))
req              3277 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (i == req->n_ssids) {
req              3278 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
req              3279 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		req->ssids[req->n_ssids++].ssid_len = ssid_len;
req              3428 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 				struct cfg80211_sched_scan_request *req)
req              3435 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		  req->n_match_sets, req->n_ssids);
req              3443 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (req->n_match_sets <= 0) {
req              3445 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			  req->n_match_sets);
req              3449 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	return brcmf_pno_start_sched_scan(ifp, req);
req              6952 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 					struct regulatory_request *req)
req              6962 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (req->alpha2[0] == '0' && req->alpha2[1] == '0')
req              6967 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
req              6969 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 				 req->alpha2[0], req->alpha2[1]);
req              6973 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	brcmf_dbg(TRACE, "Enter: initiator=%d, alpha=%c%c\n", req->initiator,
req              6974 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		  req->alpha2[0], req->alpha2[1]);
req              6982 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	err = brcmf_translate_country_code(ifp->drvr, req->alpha2, &ccreq);
req               431 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	struct brcmf_fw_request *req;
req               433 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	void (*done)(struct device *dev, int err, struct brcmf_fw_request *req);
req               507 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c static void brcmf_fw_free_request(struct brcmf_fw_request *req)
req               512 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	for (i = 0, item = &req->items[0]; i < req->n_items; i++, item++) {
req               518 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	kfree(req);
req               534 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	cur = &fwctx->req->items[fwctx->curpos];
req               550 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 					     fwctx->req->domain_nr,
req               551 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 					     fwctx->req->bus_nr);
req               574 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
req               602 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
req               606 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
req               613 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 		strlcat(alt_path, fwctx->req->board_type, BRCMF_FW_NAME_LEN);
req               631 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	while (ret == 0 && ++fwctx->curpos < fwctx->req->n_items) {
req               637 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 		brcmf_fw_free_request(fwctx->req);
req               638 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 		fwctx->req = NULL;
req               640 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	fwctx->done(fwctx->dev, ret, fwctx->req);
req               644 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c static bool brcmf_fw_request_is_valid(struct brcmf_fw_request *req)
req               649 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	if (!req->n_items)
req               652 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	for (i = 0, item = &req->items[0]; i < req->n_items; i++, item++) {
req               659 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
req               661 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 					 struct brcmf_fw_request *req))
req               663 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	struct brcmf_fw_item *first = &req->items[0];
req               671 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	if (!brcmf_fw_request_is_valid(req))
req               679 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c 	fwctx->req = req;
req                82 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
req                84 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h 					 struct brcmf_fw_request *req));
req                41 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 				   struct cfg80211_sched_scan_request *req)
req                47 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
req                49 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	pi->reqs[pi->n_reqs++] = req;
req               235 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 				 struct cfg80211_sched_scan_request *req)
req               239 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	if (!ssid || !req->ssids || !req->n_ssids)
req               242 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	for (i = 0; i < req->n_ssids; i++) {
req               243 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		if (ssid->ssid_len == req->ssids[i].ssid_len) {
req               244 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 			if (!strncmp(ssid->ssid, req->ssids[i].ssid,
req               464 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 			       struct cfg80211_sched_scan_request *req)
req               469 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
req               472 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	ret = brcmf_pno_store_request(pi, req);
req               478 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		brcmf_pno_remove_request(pi, req->reqid);
req               564 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	struct cfg80211_sched_scan_request *req;
req               571 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		req = pi->reqs[i];
req               573 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		if (!req->n_match_sets)
req               575 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		for (j = 0; j < req->n_match_sets; j++) {
req               576 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 			ms = &req->match_sets[j];
req                23 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h 			       struct cfg80211_sched_scan_request *req);
req               172 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 				struct brcmf_usbreq  *req);
req               376 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq  *req;
req               382 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req = list_entry(q->next, struct brcmf_usbreq, list);
req               387 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	return req;
req               392 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 			  struct list_head *q, struct brcmf_usbreq *req,
req               397 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	list_add_tail(&req->list, q);
req               407 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq *req, *reqs;
req               413 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req = reqs;
req               416 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req->urb = usb_alloc_urb(0, GFP_ATOMIC);
req               417 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		if (!req->urb)
req               420 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		INIT_LIST_HEAD(&req->list);
req               421 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		list_add_tail(&req->list, q);
req               422 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req++;
req               428 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req = list_entry(q->next, struct brcmf_usbreq, list);
req               429 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		if (req)
req               430 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 			usb_free_urb(req->urb);
req               440 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq *req, *next;
req               442 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	list_for_each_entry_safe(req, next, q, list) {
req               443 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		if (!req->urb) {
req               447 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		usb_free_urb(req->urb);
req               448 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		list_del_init(&req->list);
req               453 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 				struct brcmf_usbreq *req)
req               458 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	list_del_init(&req->list);
req               465 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
req               466 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbdev_info *devinfo = req->devinfo;
req               470 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		  req->skb);
req               471 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_usb_del_fromq(devinfo, req);
req               473 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_proto_bcdc_txcomplete(devinfo->dev, req->skb, urb->status == 0);
req               474 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = NULL;
req               475 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
req               487 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq  *req = (struct brcmf_usbreq *)urb->context;
req               488 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbdev_info *devinfo = req->devinfo;
req               492 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_usb_del_fromq(devinfo, req);
req               493 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	skb = req->skb;
req               494 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = NULL;
req               499 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
req               506 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_rx_refill(devinfo, req);
req               509 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
req               516 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 				struct brcmf_usbreq  *req)
req               521 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	if (!req || !devinfo)
req               526 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
req               529 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = skb;
req               531 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe,
req               533 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 			  req);
req               534 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->devinfo = devinfo;
req               535 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
req               537 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	ret = usb_submit_urb(req->urb, GFP_ATOMIC);
req               539 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_del_fromq(devinfo, req);
req               540 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmu_pkt_buf_free_skb(req->skb);
req               541 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req->skb = NULL;
req               542 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
req               549 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq *req;
req               555 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
req               556 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_rx_refill(devinfo, req);
req               587 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct brcmf_usbreq  *req;
req               597 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
req               599 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	if (!req) {
req               605 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = skb;
req               606 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->devinfo = devinfo;
req               607 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
req               608 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 			  skb->data, skb->len, brcmf_usb_tx_complete, req);
req               609 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->urb->transfer_flags |= URB_ZERO_PACKET;
req               610 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
req               611 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	ret = usb_submit_urb(req->urb, GFP_ATOMIC);
req               614 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_del_fromq(devinfo, req);
req               615 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req->skb = NULL;
req               616 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
req              1639 drivers/net/wireless/cisco/airo.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
req              1649 drivers/net/wireless/cisco/airo.c 	skcipher_request_set_sync_tfm(req, tfm);
req              1650 drivers/net/wireless/cisco/airo.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req              1651 drivers/net/wireless/cisco/airo.c 	skcipher_request_set_crypt(req, &sg, &sg, sizeof(context->coeff), iv);
req              1653 drivers/net/wireless/cisco/airo.c 	ret = crypto_skcipher_encrypt(req);
req              9443 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
req              9452 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			int len = min((int)req->essid_len,
req              9454 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			memcpy(priv->direct_scan_ssid, req->essid, len);
req              9457 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
req               820 drivers/net/wireless/intel/ipw2x00/libipw.h 				     struct libipw_probe_request * req,
req               829 drivers/net/wireless/intel/ipw2x00/libipw.h 				       struct libipw_reassoc_request * req);
req              1561 drivers/net/wireless/intel/iwlegacy/common.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              1565 drivers/net/wireless/intel/iwlegacy/common.c 	if (req->n_channels == 0) {
req              1580 drivers/net/wireless/intel/iwlegacy/common.c 	il->scan_request = req;
req              1582 drivers/net/wireless/intel/iwlegacy/common.c 	il->scan_band = req->channels[0]->band;
req              1489 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              1494 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	if (req->n_channels == 0)
req              1506 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 		priv->scan_request = req;
req              1510 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 		priv->scan_request = req;
req              1517 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 					req->channels[0]->band);
req                83 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	mvm->ftm_initiator.req = NULL;
req               105 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (!mvm->ftm_initiator.req)
req               108 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
req               109 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
req               114 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 				     mvm->ftm_initiator.req,
req               119 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			       mvm->ftm_initiator.req, GFP_KERNEL);
req               139 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			       struct cfg80211_pmsr_request *req)
req               143 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	cmd->request_id = req->cookie;
req               144 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	cmd->num_of_ap = req->n_peers;
req               147 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (!req->timeout || req->timeout > 255 * 100)
req               150 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
req               157 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
req               159 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
req               169 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			    struct cfg80211_pmsr_request *req)
req               176 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	cmd->request_id = req->cookie;
req               177 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	cmd->num_of_ap = req->n_peers;
req               183 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (req->timeout)
req               184 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		cmd->req_timeout_ms = cpu_to_le32(req->timeout);
req               188 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
req               190 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
req               196 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		for (i = 0; i < req->n_peers; i++) {
req               197 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			if (req->peers[i].report_ap_tsf) {
req               324 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		      struct cfg80211_pmsr_request *req)
req               340 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (mvm->ftm_initiator.req)
req               344 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
req               349 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
req               356 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
req               376 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		mvm->ftm_initiator.req = req;
req               383 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
req               386 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		.request_id = req->cookie,
req               391 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (req != mvm->ftm_initiator.req)
req               402 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
req               407 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	for (i = 0; i < req->n_peers; i++) {
req               408 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
req               462 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
req               464 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			request_id, (u8)mvm->ftm_initiator.req->cookie);
req               468 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
req               508 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (!mvm->ftm_initiator.req) {
req               530 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		       mvm->ftm_initiator.req->cookie, num_of_aps);
req               558 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
req               611 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 				     mvm->ftm_initiator.req,
req               624 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 				       mvm->ftm_initiator.req,
req              2697 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	if (hw_req->req.n_channels == 0 ||
req              2698 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	    hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
req              2702 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
req              3268 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 					struct cfg80211_sched_scan_request *req,
req              3282 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
req              1119 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 		struct cfg80211_pmsr_request *req;
req              1718 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			   struct cfg80211_scan_request *req,
req              1733 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			     struct cfg80211_sched_scan_request *req,
req              2011 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
req               343 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 			      struct cfg80211_scan_request *req,
req               348 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (!req->duration)
req               357 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (req->duration_mandatory && req->duration > duration) {
req               360 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 			       req->duration,
req               365 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	return min_t(u32, (u32)req->duration, duration);
req               593 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 				   struct cfg80211_sched_scan_request *req)
req               608 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
req               632 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	profile_cfg->num_profiles = req->n_match_sets;
req               636 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
req               639 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	for (i = 0; i < req->n_match_sets; i++) {
req               661 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 				  struct cfg80211_sched_scan_request *req)
req               663 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
req               666 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 			       req->n_match_sets);
req              1733 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 			   struct cfg80211_scan_request *req,
req              1760 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
req              1763 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.n_ssids = req->n_ssids;
req              1764 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.flags = req->flags;
req              1765 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.n_channels = req->n_channels;
req              1767 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.ssids = req->ssids;
req              1768 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.channels = req->channels;
req              1769 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.mac_addr = req->mac_addr;
req              1770 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.mac_addr_mask = req->mac_addr_mask;
req              1771 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.no_cck = req->no_cck;
req              1781 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
req              1826 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 			     struct cfg80211_sched_scan_request *req,
req              1853 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
req              1856 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.n_ssids = req->n_ssids;
req              1857 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.flags = req->flags;
req              1858 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.n_channels = req->n_channels;
req              1859 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.ssids = req->ssids;
req              1860 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.channels = req->channels;
req              1861 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.mac_addr = req->mac_addr;
req              1862 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.mac_addr_mask = req->mac_addr_mask;
req              1864 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
req              1865 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.n_match_sets = req->n_match_sets;
req              1866 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.match_sets = req->match_sets;
req              1867 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (!req->n_scan_plans)
req              1870 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.n_scan_plans = req->n_scan_plans;
req              1871 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	params.scan_plans = req->scan_plans;
req              1880 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	if (req->delay > U16_MAX) {
req              1885 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 		params.delay = req->delay;
req              1888 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
req               170 drivers/net/wireless/intersil/hostap/hostap_info.c 	struct hfa384x_join_request req;
req               221 drivers/net/wireless/intersil/hostap/hostap_info.c 	memcpy(req.bssid, selected->bssid, ETH_ALEN);
req               222 drivers/net/wireless/intersil/hostap/hostap_info.c 	req.channel = selected->chid;
req               227 drivers/net/wireless/intersil/hostap/hostap_info.c 	       dev->name, req.bssid, le16_to_cpu(req.channel));
req               228 drivers/net/wireless/intersil/hostap/hostap_info.c 	if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
req               229 drivers/net/wireless/intersil/hostap/hostap_info.c 				 sizeof(req))) {
req               643 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	struct hfa384x_join_request req;
req               651 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	memcpy(req.bssid, local->preferred_ap, ETH_ALEN);
req               652 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	req.channel = 0;
req               660 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 			req.channel = entry->chid;
req               666 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
req               667 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 				 sizeof(req))) {
req              1730 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	struct iw_scan_req *req = (struct iw_scan_req *) extra;
req              1736 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		req = NULL;
req              1749 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (req && data->flags & IW_SCAN_THIS_ESSID) {
req              1750 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ssid = req->essid;
req              1751 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ssid_len = req->essid_len;
req              1271 drivers/net/wireless/intersil/orinoco/hw.c 			__le16 req[3];
req              1273 drivers/net/wireless/intersil/orinoco/hw.c 			req[0] = cpu_to_le16(0x3fff);	/* All channels */
req              1274 drivers/net/wireless/intersil/orinoco/hw.c 			req[1] = cpu_to_le16(0x0001);	/* rate 1 Mbps */
req              1275 drivers/net/wireless/intersil/orinoco/hw.c 			req[2] = 0;			/* Any ESSID */
req              1277 drivers/net/wireless/intersil/orinoco/hw.c 						  HERMES_RID_CNFHOSTSCAN, &req);
req              1151 drivers/net/wireless/intersil/orinoco/main.c 	} __packed req;
req              1202 drivers/net/wireless/intersil/orinoco/main.c 	memcpy(req.bssid, priv->desired_bssid, ETH_ALEN);
req              1203 drivers/net/wireless/intersil/orinoco/main.c 	req.channel = atom->channel;	/* both are little-endian */
req              1205 drivers/net/wireless/intersil/orinoco/main.c 				  &req);
req               725 drivers/net/wireless/intersil/orinoco/orinoco_usb.c static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
req               728 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	int total_size = sizeof(*req) + length;
req               732 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->magic = cpu_to_le16(EZUSB_MAGIC);
req               733 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->req_reply_count = reply_count;
req               734 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->ans_reply_count = 0;
req               735 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->frame_type = cpu_to_le16(frame_type);
req               736 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->size = cpu_to_le16(length + 4);
req               737 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->crc = cpu_to_le16(build_crc(req));
req               738 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
req               739 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	req->hermes_rid = cpu_to_le16(rid);
req               741 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 		memcpy(req->data, data, length);
req              2012 drivers/net/wireless/mac80211_hwsim.c 	struct cfg80211_scan_request *req = hwsim->hw_scan_request;
req              2016 drivers/net/wireless/mac80211_hwsim.c 	if (hwsim->scan_chan_idx >= req->n_channels) {
req              2031 drivers/net/wireless/mac80211_hwsim.c 		  req->channels[hwsim->scan_chan_idx]->center_freq);
req              2033 drivers/net/wireless/mac80211_hwsim.c 	hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
req              2036 drivers/net/wireless/mac80211_hwsim.c 	    !req->n_ssids) {
req              2041 drivers/net/wireless/mac80211_hwsim.c 		for (i = 0; i < req->n_ssids; i++) {
req              2047 drivers/net/wireless/mac80211_hwsim.c 						       req->ssids[i].ssid,
req              2048 drivers/net/wireless/mac80211_hwsim.c 						       req->ssids[i].ssid_len,
req              2049 drivers/net/wireless/mac80211_hwsim.c 						       req->ie_len);
req              2054 drivers/net/wireless/mac80211_hwsim.c 			memcpy(mgmt->da, req->bssid, ETH_ALEN);
req              2055 drivers/net/wireless/mac80211_hwsim.c 			memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
req              2057 drivers/net/wireless/mac80211_hwsim.c 			if (req->ie_len)
req              2058 drivers/net/wireless/mac80211_hwsim.c 				skb_put_data(probe, req->ie, req->ie_len);
req              2081 drivers/net/wireless/mac80211_hwsim.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              2088 drivers/net/wireless/mac80211_hwsim.c 	hwsim->hw_scan_request = req;
req              2091 drivers/net/wireless/mac80211_hwsim.c 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
req              2093 drivers/net/wireless/mac80211_hwsim.c 				     hw_req->req.mac_addr,
req              2094 drivers/net/wireless/mac80211_hwsim.c 				     hw_req->req.mac_addr_mask);
req               815 drivers/net/wireless/mediatek/mt76/mt76.h int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
req               818 drivers/net/wireless/mediatek/mt76/mt76.h void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
req               112 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	} req = {
req               119 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 				   &req, sizeof(req), true);
req               149 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	} req = {
req               155 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 				   &req, sizeof(req), true);
req               362 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
req               368 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	req = kmalloc(len, GFP_KERNEL);
req               369 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	if (!req)
req               372 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	memcpy(req, &req_hdr, sizeof(req_hdr));
req               373 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	data = (struct req_data *)(req + sizeof(req_hdr));
req               381 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 				  req, len, true);
req               382 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	kfree(req);
req               399 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	} req = {
req               421 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
req               422 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	       sizeof(req.rate_power_delta));
req               424 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
req               425 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	       sizeof(req.temp_comp_power));
req               428 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 				   &req, sizeof(req), true);
req               445 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	} req = {
req               456 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		req.bw = MT_BW_40;
req               458 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 			req.center_chan += 2;
req               460 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 			req.center_chan -= 2;
req               470 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req               471 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		req.txpower[i] = tx_power;
req               474 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 				  &req, sizeof(req), true);
req               244 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req               251 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               282 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req               288 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               301 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req               306 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               314 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req               319 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               587 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
req               589 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	req = kzalloc(len, GFP_KERNEL);
req               590 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	if (!req)
req               593 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req, &req_hdr, sizeof(req_hdr));
req               594 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0,
req               598 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				  req, len, true);
req               599 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	kfree(req);
req               610 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} __packed req = {
req               616 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               627 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} __packed req = {
req               635 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               656 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} __packed req = {
req               667 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.cw_min = fls(params->cw_min);
req               669 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.cw_max = cpu_to_le16(fls(params->cw_max));
req               672 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               695 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} __packed req = {
req               702 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               904 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req               923 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	eth_broadcast_addr(req.g_wtbl.peer_addr);
req               926 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               935 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		struct wtbl_req_hdr req = {
req               941 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 					   &req, sizeof(req), true);
req               956 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req               977 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req.g_wtbl.peer_addr, sta->addr, ETH_ALEN);
req               980 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               987 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct wtbl_req_hdr req = {
req               993 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req               998 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct wtbl_req_hdr req = {
req              1003 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req              1013 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req              1027 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	eth_broadcast_addr(req.basic.peer_addr);
req              1030 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.conn_state = CONN_STATE_PORT_SECURE;
req              1031 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
req              1034 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.conn_state = CONN_STATE_DISCONNECT;
req              1035 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
req              1039 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req              1051 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req              1066 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req.basic.peer_addr, sta->addr, ETH_ALEN);
req              1071 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
req              1074 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
req              1082 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.conn_state = CONN_STATE_PORT_SECURE;
req              1083 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
req              1086 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.conn_state = CONN_STATE_DISCONNECT;
req              1087 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
req              1091 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req              1115 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} __packed req = {
req              1133 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
req              1135 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
req              1136 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req              1137 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
req              1142 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.csa_ie_pos = cpu_to_le16(csa_offs);
req              1143 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
req              1148 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req              1156 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	u8 *req, *data, *eep = (u8 *)dev->mt76.eeprom.data;
req              1171 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	req = kzalloc(len, GFP_KERNEL);
req              1172 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	if (!req)
req              1175 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req, &req_hdr, sizeof(req_hdr));
req              1176 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	data = req + sizeof(req_hdr);
req              1210 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				  req, len, true);
req              1212 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	kfree(req);
req              1227 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req              1235 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req              1248 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req              1254 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	if (dev->radar_pattern.n_pulses > ARRAY_SIZE(req.pattern))
req              1259 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.pattern[i].width = dev->radar_pattern.width;
req              1260 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.pattern[i].power = dev->radar_pattern.power;
req              1261 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.pattern[i].start_time = start_time +
req              1266 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), false);
req              1291 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req              1302 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.switch_reason = CH_SWITCH_DFS;
req              1304 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.switch_reason = CH_SWITCH_NORMAL;
req              1308 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_40MHZ;
req              1311 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_80MHZ;
req              1314 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_8080MHZ;
req              1317 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_160MHZ;
req              1320 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_5MHZ;
req              1323 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_10MHZ;
req              1328 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.bw = CMD_CBW_20MHZ;
req              1331 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memset(req.txpower_sku, 0x3f, 49);
req              1334 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				  &req, sizeof(req), true);
req              1339 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   &req, sizeof(req), true);
req              1599 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	} req = {
req              1603 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
req              1604 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 				   sizeof(req), true);
req                19 drivers/net/wireless/mediatek/mt76/usb.c static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
req                34 drivers/net/wireless/mediatek/mt76/usb.c 		ret = usb_control_msg(udev, pipe, req, req_type, val,
req                44 drivers/net/wireless/mediatek/mt76/usb.c 		req, offset, ret);
req                48 drivers/net/wireless/mediatek/mt76/usb.c int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
req                55 drivers/net/wireless/mediatek/mt76/usb.c 	ret = __mt76u_vendor_request(dev, req, req_type,
req                71 drivers/net/wireless/mediatek/mt76/usb.c 	u8 req;
req                75 drivers/net/wireless/mediatek/mt76/usb.c 		req = MT_VEND_READ_EEPROM;
req                78 drivers/net/wireless/mediatek/mt76/usb.c 		req = MT_VEND_READ_CFG;
req                81 drivers/net/wireless/mediatek/mt76/usb.c 		req = MT_VEND_MULTI_READ;
req                86 drivers/net/wireless/mediatek/mt76/usb.c 	ret = __mt76u_vendor_request(dev, req,
req               112 drivers/net/wireless/mediatek/mt76/usb.c 	u8 req;
req               116 drivers/net/wireless/mediatek/mt76/usb.c 		req = MT_VEND_WRITE_CFG;
req               119 drivers/net/wireless/mediatek/mt76/usb.c 		req = MT_VEND_MULTI_WRITE;
req               125 drivers/net/wireless/mediatek/mt76/usb.c 	__mt76u_vendor_request(dev, req,
req               169 drivers/net/wireless/mediatek/mt76/usb.c void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
req               173 drivers/net/wireless/mediatek/mt76/usb.c 	__mt76u_vendor_request(dev, req,
req               176 drivers/net/wireless/mediatek/mt76/usb.c 	__mt76u_vendor_request(dev, req,
req                99 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
req               101 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
req               104 drivers/net/wireless/mediatek/mt7601u/trace.h 		__field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
req               111 drivers/net/wireless/mediatek/mt7601u/trace.h 		__entry->req = req;
req               121 drivers/net/wireless/mediatek/mt7601u/trace.h 		  DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
req                87 drivers/net/wireless/mediatek/mt7601u/usb.c int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
req                98 drivers/net/wireless/mediatek/mt7601u/usb.c 		ret = usb_control_msg(usb_dev, pipe, req, req_type,
req               101 drivers/net/wireless/mediatek/mt7601u/usb.c 		trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
req               113 drivers/net/wireless/mediatek/mt7601u/usb.c 		req, offset, ret);
req               156 drivers/net/wireless/mediatek/mt7601u/usb.c static int __mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
req               159 drivers/net/wireless/mediatek/mt7601u/usb.c 	int ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
req               162 drivers/net/wireless/mediatek/mt7601u/usb.c 		ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
req               168 drivers/net/wireless/mediatek/mt7601u/usb.c int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
req               174 drivers/net/wireless/mediatek/mt7601u/usb.c 	ret = __mt7601u_vendor_single_wr(dev, req, offset, val);
req                64 drivers/net/wireless/mediatek/mt7601u/usb.h int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
req                68 drivers/net/wireless/mediatek/mt7601u/usb.h int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
req              1002 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 				       struct regulatory_request *req)
req              1008 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 	pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req              1009 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 		 req->alpha2[0], req->alpha2[1]);
req              1011 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 	ret = qtnf_cmd_reg_notify(mac, req, qtnf_mac_slave_radar_get(wiphy));
req              1014 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 		       mac->macid, req->alpha2[0], req->alpha2[1], ret);
req              2371 drivers/net/wireless/quantenna/qtnfmac/commands.c int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
req              2389 drivers/net/wireless/quantenna/qtnfmac/commands.c 	cmd->alpha2[0] = req->alpha2[0];
req              2390 drivers/net/wireless/quantenna/qtnfmac/commands.c 	cmd->alpha2[1] = req->alpha2[1];
req              2392 drivers/net/wireless/quantenna/qtnfmac/commands.c 	switch (req->initiator) {
req              2407 drivers/net/wireless/quantenna/qtnfmac/commands.c 	switch (req->user_reg_hint_type) {
req              2419 drivers/net/wireless/quantenna/qtnfmac/commands.c 	switch (req->dfs_region) {
req                60 drivers/net/wireless/quantenna/qtnfmac/commands.h int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
req               460 drivers/net/wireless/realtek/rtw88/coex.c 					     struct rtw_coex_info_req *req)
req               467 drivers/net/wireless/realtek/rtw88/coex.c 	rtw_fw_query_bt_mp_info(rtwdev, req);
req               488 drivers/net/wireless/realtek/rtw88/coex.c 	struct rtw_coex_info_req req = {0};
req               493 drivers/net/wireless/realtek/rtw88/coex.c 	req.op_code = BT_MP_INFO_OP_SCAN_TYPE;
req               494 drivers/net/wireless/realtek/rtw88/coex.c 	skb = rtw_coex_info_request(rtwdev, &req);
req               510 drivers/net/wireless/realtek/rtw88/coex.c 	struct rtw_coex_info_req req = {0};
req               514 drivers/net/wireless/realtek/rtw88/coex.c 	req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT;
req               515 drivers/net/wireless/realtek/rtw88/coex.c 	req.para1 = lna_constrain_level;
req               516 drivers/net/wireless/realtek/rtw88/coex.c 	skb = rtw_coex_info_request(rtwdev, &req);
req               244 drivers/net/wireless/realtek/rtw88/fw.c 			     struct rtw_coex_info_req *req)
req               250 drivers/net/wireless/realtek/rtw88/fw.c 	SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
req               251 drivers/net/wireless/realtek/rtw88/fw.c 	SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
req               252 drivers/net/wireless/realtek/rtw88/fw.c 	SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
req               253 drivers/net/wireless/realtek/rtw88/fw.c 	SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
req               254 drivers/net/wireless/realtek/rtw88/fw.c 	SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
req               276 drivers/net/wireless/realtek/rtw88/fw.h 			     struct rtw_coex_info_req *req);
req               237 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	struct cfg80211_scan_request *scan_req = &hw_req->req;
req                56 drivers/net/wireless/st/cw1200/scan.c 	struct cfg80211_scan_request *req = &hw_req->req;
req                69 drivers/net/wireless/st/cw1200/scan.c 	if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
req                70 drivers/net/wireless/st/cw1200/scan.c 		req->n_ssids = 0;
req                73 drivers/net/wireless/st/cw1200/scan.c 		  req->n_ssids);
req                75 drivers/net/wireless/st/cw1200/scan.c 	if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
req                83 drivers/net/wireless/st/cw1200/scan.c 		req->ie_len);
req                90 drivers/net/wireless/st/cw1200/scan.c 	if (req->ie_len)
req                91 drivers/net/wireless/st/cw1200/scan.c 		skb_put_data(frame.skb, req->ie, req->ie_len);
req               107 drivers/net/wireless/st/cw1200/scan.c 	BUG_ON(priv->scan.req);
req               108 drivers/net/wireless/st/cw1200/scan.c 	priv->scan.req = req;
req               111 drivers/net/wireless/st/cw1200/scan.c 	priv->scan.begin = &req->channels[0];
req               113 drivers/net/wireless/st/cw1200/scan.c 	priv->scan.end = &req->channels[req->n_channels];
req               116 drivers/net/wireless/st/cw1200/scan.c 	for (i = 0; i < req->n_ssids; ++i) {
req               118 drivers/net/wireless/st/cw1200/scan.c 		memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
req               119 drivers/net/wireless/st/cw1200/scan.c 		dst->length = req->ssids[i].ssid_len;
req               168 drivers/net/wireless/st/cw1200/scan.c 	if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
req               183 drivers/net/wireless/st/cw1200/scan.c 		else if (priv->scan.req)
req               190 drivers/net/wireless/st/cw1200/scan.c 		priv->scan.req = NULL;
req               213 drivers/net/wireless/st/cw1200/scan.c 		if (priv->scan.req->no_cck)
req                25 drivers/net/wireless/st/cw1200/scan.h 	struct cfg80211_scan_request *req;
req               103 drivers/net/wireless/st/cw1200/sta.c 		priv->scan.req = NULL;
req               976 drivers/net/wireless/ti/wl1251/main.c 	struct cfg80211_scan_request *req = &hw_req->req;
req               985 drivers/net/wireless/ti/wl1251/main.c 	if (req->n_ssids) {
req               986 drivers/net/wireless/ti/wl1251/main.c 		ssid = req->ssids[0].ssid;
req               987 drivers/net/wireless/ti/wl1251/main.c 		ssid_len = req->ssids[0].ssid_len;
req              1013 drivers/net/wireless/ti/wl1251/main.c 				     req->ie_len);
req              1018 drivers/net/wireless/ti/wl1251/main.c 	if (req->ie_len)
req              1019 drivers/net/wireless/ti/wl1251/main.c 		skb_put_data(skb, req->ie, req->ie_len);
req              1033 drivers/net/wireless/ti/wl1251/main.c 	ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
req              1034 drivers/net/wireless/ti/wl1251/main.c 			      req->n_channels, WL1251_SCAN_NUM_PROBES);
req                14 drivers/net/wireless/ti/wl12xx/scan.c 				    struct cfg80211_scan_request *req,
req                23 drivers/net/wireless/ti/wl12xx/scan.c 	     i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
req                25 drivers/net/wireless/ti/wl12xx/scan.c 		flags = req->channels[i]->flags;
req                29 drivers/net/wireless/ti/wl12xx/scan.c 		    (req->channels[i]->band == band) &&
req                38 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->band,
req                39 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->center_freq);
req                41 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->hw_value,
req                42 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->flags);
req                45 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->max_antenna_gain,
req                46 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->max_power);
req                48 drivers/net/wireless/ti/wl12xx/scan.c 				     req->channels[i]->beacon_found);
req                62 drivers/net/wireless/ti/wl12xx/scan.c 			channels[j].tx_power_att = req->channels[i]->max_power;
req                63 drivers/net/wireless/ti/wl12xx/scan.c 			channels[j].channel = req->channels[i]->hw_value;
req                91 drivers/net/wireless/ti/wl12xx/scan.c 	if (!passive && wl->scan.req->n_ssids == 0)
req               120 drivers/net/wireless/ti/wl12xx/scan.c 	cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
req               148 drivers/net/wireless/ti/wl12xx/scan.c 					 wl->scan.req->ie,
req               149 drivers/net/wireless/ti/wl12xx/scan.c 					 wl->scan.req->ie_len, NULL, 0, false);
req               217 drivers/net/wireless/ti/wl12xx/scan.c 		if (wl->scan.req->no_cck) {
req               234 drivers/net/wireless/ti/wl12xx/scan.c 		if (wl->scan.req->no_cck) {
req               309 drivers/net/wireless/ti/wl12xx/scan.c 				  struct cfg80211_sched_scan_request *req,
req               316 drivers/net/wireless/ti/wl12xx/scan.c 	bool force_passive = !req->n_ssids;
req               339 drivers/net/wireless/ti/wl12xx/scan.c 		cfg->intervals[i] = cpu_to_le32(req->scan_plans[0].interval *
req               343 drivers/net/wireless/ti/wl12xx/scan.c 	ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
req               357 drivers/net/wireless/ti/wl12xx/scan.c 	if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels,
req               358 drivers/net/wireless/ti/wl12xx/scan.c 					 req->n_channels, req->n_ssids,
req               370 drivers/net/wireless/ti/wl12xx/scan.c 						 req->ssids[0].ssid,
req               371 drivers/net/wireless/ti/wl12xx/scan.c 						 req->ssids[0].ssid_len,
req               387 drivers/net/wireless/ti/wl12xx/scan.c 						 req->ssids[0].ssid,
req               388 drivers/net/wireless/ti/wl12xx/scan.c 						 req->ssids[0].ssid_len,
req               448 drivers/net/wireless/ti/wl12xx/scan.c 			    struct cfg80211_sched_scan_request *req,
req               453 drivers/net/wireless/ti/wl12xx/scan.c 	ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
req               489 drivers/net/wireless/ti/wl12xx/scan.c 		      struct cfg80211_scan_request *req)
req               119 drivers/net/wireless/ti/wl12xx/scan.h 		      struct cfg80211_scan_request *req);
req               123 drivers/net/wireless/ti/wl12xx/scan.h 			    struct cfg80211_sched_scan_request *req,
req                28 drivers/net/wireless/ti/wl18xx/scan.c 			    struct cfg80211_scan_request *req)
req                68 drivers/net/wireless/ti/wl18xx/scan.c 	WARN_ON(req->n_ssids > 1);
req                76 drivers/net/wireless/ti/wl18xx/scan.c 	wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
req                77 drivers/net/wireless/ti/wl18xx/scan.c 				    req->n_channels, req->n_ssids,
req                87 drivers/net/wireless/ti/wl18xx/scan.c 	if (req->no_cck)
req                92 drivers/net/wireless/ti/wl18xx/scan.c 	if (req->n_ssids) {
req                93 drivers/net/wireless/ti/wl18xx/scan.c 		cmd->ssid_len = req->ssids[0].ssid_len;
req                94 drivers/net/wireless/ti/wl18xx/scan.c 		memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len);
req               102 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid : NULL,
req               103 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid_len : 0,
req               104 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ie,
req               105 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ie_len,
req               119 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid : NULL,
req               120 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid_len : 0,
req               121 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ie,
req               122 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ie_len,
req               157 drivers/net/wireless/ti/wl18xx/scan.c 				  struct cfg80211_sched_scan_request *req,
req               168 drivers/net/wireless/ti/wl18xx/scan.c 	filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
req               211 drivers/net/wireless/ti/wl18xx/scan.c 	wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
req               212 drivers/net/wireless/ti/wl18xx/scan.c 				    req->n_channels, req->n_ssids,
req               217 drivers/net/wireless/ti/wl18xx/scan.c 	    c->long_interval > req->scan_plans[0].interval * MSEC_PER_SEC) {
req               219 drivers/net/wireless/ti/wl18xx/scan.c 			cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
req               225 drivers/net/wireless/ti/wl18xx/scan.c 			cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
req               245 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid : NULL,
req               246 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid_len : 0,
req               262 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid : NULL,
req               263 drivers/net/wireless/ti/wl18xx/scan.c 				 req->ssids ? req->ssids[0].ssid_len : 0,
req               290 drivers/net/wireless/ti/wl18xx/scan.c 			    struct cfg80211_sched_scan_request *req,
req               293 drivers/net/wireless/ti/wl18xx/scan.c 	return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies);
req               329 drivers/net/wireless/ti/wl18xx/scan.c 		      struct cfg80211_scan_request *req)
req               331 drivers/net/wireless/ti/wl18xx/scan.c 	return wl18xx_scan_send(wl, wlvif, req);
req               106 drivers/net/wireless/ti/wl18xx/scan.h 		      struct cfg80211_scan_request *req);
req               110 drivers/net/wireless/ti/wl18xx/scan.h 			    struct cfg80211_sched_scan_request *req,
req              2674 drivers/net/wireless/ti/wlcore/main.c 		wl->scan.req = NULL;
req              3680 drivers/net/wireless/ti/wlcore/main.c 	struct cfg80211_scan_request *req = &hw_req->req;
req              3688 drivers/net/wireless/ti/wlcore/main.c 	if (req->n_ssids) {
req              3689 drivers/net/wireless/ti/wlcore/main.c 		ssid = req->ssids[0].ssid;
req              3690 drivers/net/wireless/ti/wlcore/main.c 		len = req->ssids[0].ssid_len;
req              3718 drivers/net/wireless/ti/wlcore/main.c 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
req              3769 drivers/net/wireless/ti/wlcore/main.c 	wl->scan.req = NULL;
req              3783 drivers/net/wireless/ti/wlcore/main.c 				      struct cfg80211_sched_scan_request *req,
req              3805 drivers/net/wireless/ti/wlcore/main.c 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
req                53 drivers/net/wireless/ti/wlcore/scan.c 	wl->scan.req = NULL;
req               334 drivers/net/wireless/ti/wlcore/scan.c 		struct cfg80211_scan_request *req)
req               342 drivers/net/wireless/ti/wlcore/scan.c 	BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
req               357 drivers/net/wireless/ti/wlcore/scan.c 	wl->scan.req = req;
req               365 drivers/net/wireless/ti/wlcore/scan.c 	wl->ops->scan_start(wl, wlvif, req);
req               373 drivers/net/wireless/ti/wlcore/scan.c 				 struct cfg80211_sched_scan_request *req)
req               376 drivers/net/wireless/ti/wlcore/scan.c 	struct cfg80211_match_set *sets = req->match_sets;
req               377 drivers/net/wireless/ti/wlcore/scan.c 	struct cfg80211_ssid *ssids = req->ssids;
req               383 drivers/net/wireless/ti/wlcore/scan.c 	for (i = 0; i < req->n_match_sets; i++)
req               389 drivers/net/wireless/ti/wlcore/scan.c 	    (!req->n_ssids ||
req               390 drivers/net/wireless/ti/wlcore/scan.c 	     (req->n_ssids == 1 && req->ssids[0].ssid_len == 0))) {
req               406 drivers/net/wireless/ti/wlcore/scan.c 		for (i = 0; i < req->n_ssids; i++) {
req               418 drivers/net/wireless/ti/wlcore/scan.c 		for (i = 0; i < req->n_match_sets; i++) {
req               429 drivers/net/wireless/ti/wlcore/scan.c 		if ((req->n_ssids > 1) ||
req               430 drivers/net/wireless/ti/wlcore/scan.c 		    (req->n_ssids == 1 && req->ssids[0].ssid_len > 0)) {
req               435 drivers/net/wireless/ti/wlcore/scan.c 			for (i = 0; i < req->n_ssids; i++) {
req               436 drivers/net/wireless/ti/wlcore/scan.c 				if (!req->ssids[i].ssid_len)
req               440 drivers/net/wireless/ti/wlcore/scan.c 					if ((req->ssids[i].ssid_len ==
req               442 drivers/net/wireless/ti/wlcore/scan.c 					    !memcmp(req->ssids[i].ssid,
req               444 drivers/net/wireless/ti/wlcore/scan.c 						   req->ssids[i].ssid_len)) {
req                17 drivers/net/wireless/ti/wlcore/scan.h 		struct cfg80211_scan_request *req);
req                25 drivers/net/wireless/ti/wlcore/scan.h 				     struct cfg80211_sched_scan_request *req,
req               162 drivers/net/wireless/ti/wlcore/scan.h 				 struct cfg80211_sched_scan_request *req);
req                83 drivers/net/wireless/ti/wlcore/wlcore.h 			  struct cfg80211_scan_request *req);
req                86 drivers/net/wireless/ti/wlcore/wlcore.h 				struct cfg80211_sched_scan_request *req,
req               175 drivers/net/wireless/ti/wlcore/wlcore_i.h 	struct cfg80211_scan_request *req;
req               102 drivers/net/wireless/zydas/zd1211rw/zd_usb.c static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
req               406 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		if (!check_read_regs(usb, intr->read_regs.req,
req              1601 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 				  struct usb_req_read_regs *req,
req              1608 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	intr->read_regs.req = req;
req              1623 drivers/net/wireless/zydas/zd1211rw/zd_usb.c static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
req              1650 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		if (rd->addr != req->addr[i]) {
req              1654 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 				 le16_to_cpu(req->addr[i]));
req              1663 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		       struct usb_req_read_regs *req, unsigned int count,
req              1681 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	if (!check_read_regs(usb, req, count)) {
req              1702 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct usb_req_read_regs *req = NULL;
req              1734 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req = (void *)usb->req_buf;
req              1736 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req->id = cpu_to_le16(USB_REQ_READ_REGS);
req              1738 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		req->addr[i] = cpu_to_le16((u16)addresses[i]);
req              1743 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	prepare_read_regs_int(usb, req, count);
req              1744 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
req              1767 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	r = get_results(usb, values, req, count, &retry);
req              1871 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct usb_req_write_regs *req = NULL;
req              1903 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req_len = struct_size(req, reg_writes, count);
req              1904 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req = kmalloc(req_len, GFP_KERNEL);
req              1905 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	if (!req) {
req              1910 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
req              1912 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		struct reg_data *rw  = &req->reg_writes[i];
req              1922 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 				 req, req_len, iowrite16v_urb_complete, usb,
req              1926 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 				  req, req_len, iowrite16v_urb_complete, usb);
req              1967 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct usb_req_rfwrite *req = NULL;
req              2016 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req = (void *)usb->req_buf;
req              2018 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req->id = cpu_to_le16(USB_REQ_WRITE_RF);
req              2020 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req->value = cpu_to_le16(2);
req              2021 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	req->bits = cpu_to_le16(bits);
req              2027 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		req->bit_values[i] = cpu_to_le16(bv);
req              2031 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
req               134 drivers/net/wireless/zydas/zd1211rw/zd_usb.h 	struct usb_req_read_regs *req;
req                54 drivers/net/xen-netback/common.h 	struct xen_netif_tx_request req; /* tx request */
req               338 drivers/net/xen-netback/netback.c 	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
req               586 drivers/net/xen-netback/netback.c 		txp = &queue->pending_tx_info[pending_idx].req;
req              1005 drivers/net/xen-netback/netback.c 			memcpy(&queue->pending_tx_info[pending_idx].req,
req              1107 drivers/net/xen-netback/netback.c 		txp = &queue->pending_tx_info[pending_idx].req;
req              1358 drivers/net/xen-netback/netback.c 	make_tx_response(queue, &pending_tx_info->req,
req              1514 drivers/net/xen-netback/netback.c 			       const struct xen_netif_ctrl_request *req,
req              1519 drivers/net/xen-netback/netback.c 		.id = req->id,
req              1520 drivers/net/xen-netback/netback.c 		.type = req->type,
req              1539 drivers/net/xen-netback/netback.c 				 const struct xen_netif_ctrl_request *req)
req              1544 drivers/net/xen-netback/netback.c 	switch (req->type) {
req              1546 drivers/net/xen-netback/netback.c 		status = xenvif_set_hash_alg(vif, req->data[0]);
req              1554 drivers/net/xen-netback/netback.c 		status = xenvif_set_hash_flags(vif, req->data[0]);
req              1558 drivers/net/xen-netback/netback.c 		status = xenvif_set_hash_key(vif, req->data[0],
req              1559 drivers/net/xen-netback/netback.c 					     req->data[1]);
req              1569 drivers/net/xen-netback/netback.c 						      req->data[0]);
req              1573 drivers/net/xen-netback/netback.c 		status = xenvif_set_hash_mapping(vif, req->data[0],
req              1574 drivers/net/xen-netback/netback.c 						 req->data[1],
req              1575 drivers/net/xen-netback/netback.c 						 req->data[2]);
req              1582 drivers/net/xen-netback/netback.c 	make_ctrl_response(vif, req, status, data);
req              1601 drivers/net/xen-netback/netback.c 			struct xen_netif_ctrl_request req;
req              1603 drivers/net/xen-netback/netback.c 			RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
req              1606 drivers/net/xen-netback/netback.c 			process_ctrl_request(vif, &req);
req               168 drivers/net/xen-netback/rx.c 			       struct xen_netif_rx_request *req,
req               195 drivers/net/xen-netback/rx.c 	op->dest.u.ref    = req->gref;
req               356 drivers/net/xen-netback/rx.c 				struct xen_netif_rx_request *req,
req               367 drivers/net/xen-netback/rx.c 		xenvif_rx_copy_add(queue, req, offset, data, len);
req               394 drivers/net/xen-netback/rx.c 	rsp->id = req->id;
req               400 drivers/net/xen-netback/rx.c 				 struct xen_netif_rx_request *req,
req               431 drivers/net/xen-netback/rx.c 		struct xen_netif_rx_request *req;
req               434 drivers/net/xen-netback/rx.c 		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
req               439 drivers/net/xen-netback/rx.c 			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
req               441 drivers/net/xen-netback/rx.c 			xenvif_rx_data_slot(queue, &pkt, req, rsp);
req               299 drivers/net/xen-netfront.c 		struct xen_netif_rx_request *req;
req               318 drivers/net/xen-netfront.c 		req = RING_GET_REQUEST(&queue->rx, req_prod);
req               323 drivers/net/xen-netfront.c 		req->id = id;
req               324 drivers/net/xen-netfront.c 		req->gref = ref;
req               382 drivers/nfc/pn533/pn533.c 	dev_kfree_skb(cmd->req);
req               412 drivers/nfc/pn533/pn533.c 			      struct sk_buff *req,
req               426 drivers/nfc/pn533/pn533.c 	cmd->req = req;
req               430 drivers/nfc/pn533/pn533.c 	pn533_build_cmd_frame(dev, cmd_code, req);
req               436 drivers/nfc/pn533/pn533.c 		rc = dev->phy_ops->send_frame(dev, req);
req               462 drivers/nfc/pn533/pn533.c 				 struct sk_buff *req,
req               468 drivers/nfc/pn533/pn533.c 	rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
req               475 drivers/nfc/pn533/pn533.c 				struct sk_buff *req,
req               481 drivers/nfc/pn533/pn533.c 	rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
req               496 drivers/nfc/pn533/pn533.c 				       struct sk_buff *req,
req               508 drivers/nfc/pn533/pn533.c 	cmd->req = req;
req               512 drivers/nfc/pn533/pn533.c 	pn533_build_cmd_frame(dev, cmd_code, req);
req               515 drivers/nfc/pn533/pn533.c 	rc = dev->phy_ops->send_frame(dev, req);
req               555 drivers/nfc/pn533/pn533.c 	rc = dev->phy_ops->send_frame(dev, cmd->req);
req               558 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(cmd->req);
req               599 drivers/nfc/pn533/pn533.c 					       struct sk_buff *req)
req               606 drivers/nfc/pn533/pn533.c 	rc = pn533_send_cmd_async(dev, cmd_code, req,
req               609 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(req);
req               180 drivers/nfc/pn533/pn533.h 	struct sk_buff *req;
req               473 drivers/nfc/port100.c 	struct sk_buff *req;
req               813 drivers/nfc/port100.c 	struct sk_buff *req = cmd->req;
req               816 drivers/nfc/port100.c 	dev_kfree_skb(req);
req               838 drivers/nfc/port100.c 				struct sk_buff *req,
req               866 drivers/nfc/port100.c 	cmd->req = req;
req               872 drivers/nfc/port100.c 	port100_build_cmd_frame(dev, cmd_code, req);
req               876 drivers/nfc/port100.c 	rc = port100_send_frame_async(dev, req, resp, resp_len);
req               909 drivers/nfc/port100.c 					     struct sk_buff *req)
req               916 drivers/nfc/port100.c 	rc = port100_send_cmd_async(dev, cmd_code, req,
req               919 drivers/nfc/port100.c 		dev_kfree_skb(req);
req                57 drivers/nfc/st21nfca/dep.c #define PROTOCOL_ERR(req) pr_err("%d: ST21NFCA Protocol error: %s\n", \
req                58 drivers/nfc/st21nfca/dep.c 				 __LINE__, req)
req                93 drivers/nfc/st95hf/core.c 	enum req_type req;
req               126 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               133 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               140 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               147 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               154 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               161 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               168 drivers/nfc/st95hf/core.c 		.req = ASYNC,
req               175 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               182 drivers/nfc/st95hf/core.c 		.req = SYNC,
req               278 drivers/nfc/st95hf/core.c 			      cmd_array[cmd].req);
req               284 drivers/nfc/st95hf/core.c 	if (cmd_array[cmd].req == SYNC && recv_res) {
req              1393 drivers/nfc/trf7970a.c 	u8 *req = skb->data;
req              1410 drivers/nfc/trf7970a.c 		if (req[0] == NFC_T2T_CMD_READ)
req              1426 drivers/nfc/trf7970a.c 		switch (req[0] & ISO15693_REQ_FLAG_SPEED_MASK) {
req              1451 drivers/nfc/trf7970a.c 		    trf7970a_is_iso15693_write_or_lock(req[1]) &&
req              1452 drivers/nfc/trf7970a.c 		    (req[0] & ISO15693_REQ_FLAG_OPTION))
req                56 drivers/nvdimm/nd_virtio.c 	req_data->req.type = cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH);
req                57 drivers/nvdimm/nd_virtio.c 	sg_init_one(&sg, &req_data->req, sizeof(req_data->req));
req                19 drivers/nvdimm/virtio_pmem.h 	struct virtio_pmem_req req;
req               254 drivers/nvme/host/core.c static inline bool nvme_req_needs_retry(struct request *req)
req               256 drivers/nvme/host/core.c 	if (blk_noretry_request(req))
req               258 drivers/nvme/host/core.c 	if (nvme_req(req)->status & NVME_SC_DNR)
req               260 drivers/nvme/host/core.c 	if (nvme_req(req)->retries >= nvme_max_retries)
req               265 drivers/nvme/host/core.c static void nvme_retry_req(struct request *req)
req               267 drivers/nvme/host/core.c 	struct nvme_ns *ns = req->q->queuedata;
req               272 drivers/nvme/host/core.c 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
req               276 drivers/nvme/host/core.c 	nvme_req(req)->retries++;
req               277 drivers/nvme/host/core.c 	blk_mq_requeue_request(req, false);
req               278 drivers/nvme/host/core.c 	blk_mq_delay_kick_requeue_list(req->q, delay);
req               281 drivers/nvme/host/core.c void nvme_complete_rq(struct request *req)
req               283 drivers/nvme/host/core.c 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
req               285 drivers/nvme/host/core.c 	trace_nvme_complete_rq(req);
req               287 drivers/nvme/host/core.c 	if (nvme_req(req)->ctrl->kas)
req               288 drivers/nvme/host/core.c 		nvme_req(req)->ctrl->comp_seen = true;
req               290 drivers/nvme/host/core.c 	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
req               291 drivers/nvme/host/core.c 		if ((req->cmd_flags & REQ_NVME_MPATH) &&
req               293 drivers/nvme/host/core.c 			nvme_failover_req(req);
req               297 drivers/nvme/host/core.c 		if (!blk_queue_dying(req->q)) {
req               298 drivers/nvme/host/core.c 			nvme_retry_req(req);
req               303 drivers/nvme/host/core.c 	nvme_trace_bio_complete(req, status);
req               304 drivers/nvme/host/core.c 	blk_mq_end_request(req, status);
req               308 drivers/nvme/host/core.c bool nvme_cancel_request(struct request *req, void *data, bool reserved)
req               311 drivers/nvme/host/core.c 				"Cancelling I/O %d", req->tag);
req               314 drivers/nvme/host/core.c 	if (blk_mq_request_completed(req))
req               317 drivers/nvme/host/core.c 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
req               318 drivers/nvme/host/core.c 	blk_mq_complete_request(req);
req               470 drivers/nvme/host/core.c static inline void nvme_clear_nvme_request(struct request *req)
req               472 drivers/nvme/host/core.c 	if (!(req->rq_flags & RQF_DONTPREP)) {
req               473 drivers/nvme/host/core.c 		nvme_req(req)->retries = 0;
req               474 drivers/nvme/host/core.c 		nvme_req(req)->flags = 0;
req               475 drivers/nvme/host/core.c 		req->rq_flags |= RQF_DONTPREP;
req               483 drivers/nvme/host/core.c 	struct request *req;
req               486 drivers/nvme/host/core.c 		req = blk_mq_alloc_request(q, op, flags);
req               488 drivers/nvme/host/core.c 		req = blk_mq_alloc_request_hctx(q, op, flags,
req               491 drivers/nvme/host/core.c 	if (IS_ERR(req))
req               492 drivers/nvme/host/core.c 		return req;
req               494 drivers/nvme/host/core.c 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
req               495 drivers/nvme/host/core.c 	nvme_clear_nvme_request(req);
req               496 drivers/nvme/host/core.c 	nvme_req(req)->cmd = cmd;
req               498 drivers/nvme/host/core.c 	return req;
req               581 drivers/nvme/host/core.c 				     struct request *req, u16 *control,
req               584 drivers/nvme/host/core.c 	enum rw_hint streamid = req->write_hint;
req               597 drivers/nvme/host/core.c 	if (streamid < ARRAY_SIZE(req->q->write_hints))
req               598 drivers/nvme/host/core.c 		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
req               608 drivers/nvme/host/core.c static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
req               611 drivers/nvme/host/core.c 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
req               635 drivers/nvme/host/core.c 	__rq_for_each_bio(bio, req) {
req               660 drivers/nvme/host/core.c 	req->special_vec.bv_page = virt_to_page(range);
req               661 drivers/nvme/host/core.c 	req->special_vec.bv_offset = offset_in_page(range);
req               662 drivers/nvme/host/core.c 	req->special_vec.bv_len = alloc_size;
req               663 drivers/nvme/host/core.c 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
req               669 drivers/nvme/host/core.c 		struct request *req, struct nvme_command *cmnd)
req               672 drivers/nvme/host/core.c 		return nvme_setup_discard(ns, req, cmnd);
req               677 drivers/nvme/host/core.c 		cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
req               679 drivers/nvme/host/core.c 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
req               685 drivers/nvme/host/core.c 		struct request *req, struct nvme_command *cmnd)
req               691 drivers/nvme/host/core.c 	if (req->cmd_flags & REQ_FUA)
req               693 drivers/nvme/host/core.c 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
req               696 drivers/nvme/host/core.c 	if (req->cmd_flags & REQ_RAHEAD)
req               699 drivers/nvme/host/core.c 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
req               701 drivers/nvme/host/core.c 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
req               702 drivers/nvme/host/core.c 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
req               704 drivers/nvme/host/core.c 	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
req               705 drivers/nvme/host/core.c 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
req               714 drivers/nvme/host/core.c 		if (!blk_integrity_rq(req)) {
req               728 drivers/nvme/host/core.c 			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
req               738 drivers/nvme/host/core.c void nvme_cleanup_cmd(struct request *req)
req               740 drivers/nvme/host/core.c 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
req               741 drivers/nvme/host/core.c 		struct nvme_ns *ns = req->rq_disk->private_data;
req               742 drivers/nvme/host/core.c 		struct page *page = req->special_vec.bv_page;
req               747 drivers/nvme/host/core.c 			kfree(page_address(page) + req->special_vec.bv_offset);
req               752 drivers/nvme/host/core.c blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
req               757 drivers/nvme/host/core.c 	nvme_clear_nvme_request(req);
req               760 drivers/nvme/host/core.c 	switch (req_op(req)) {
req               763 drivers/nvme/host/core.c 		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
req               769 drivers/nvme/host/core.c 		ret = nvme_setup_write_zeroes(ns, req, cmd);
req               772 drivers/nvme/host/core.c 		ret = nvme_setup_discard(ns, req, cmd);
req               776 drivers/nvme/host/core.c 		ret = nvme_setup_rw(ns, req, cmd);
req               783 drivers/nvme/host/core.c 	cmd->common.command_id = req->tag;
req               784 drivers/nvme/host/core.c 	trace_nvme_setup_cmd(req, cmd);
req               823 drivers/nvme/host/core.c 	struct request *req;
req               826 drivers/nvme/host/core.c 	req = nvme_alloc_request(q, cmd, flags, qid);
req               827 drivers/nvme/host/core.c 	if (IS_ERR(req))
req               828 drivers/nvme/host/core.c 		return PTR_ERR(req);
req               830 drivers/nvme/host/core.c 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
req               833 drivers/nvme/host/core.c 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
req               839 drivers/nvme/host/core.c 		nvme_execute_rq_polled(req->q, NULL, req, at_head);
req               841 drivers/nvme/host/core.c 		blk_execute_rq(req->q, NULL, req, at_head);
req               843 drivers/nvme/host/core.c 		*result = nvme_req(req)->result;
req               844 drivers/nvme/host/core.c 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
req               847 drivers/nvme/host/core.c 		ret = nvme_req(req)->status;
req               849 drivers/nvme/host/core.c 	blk_mq_free_request(req);
req               904 drivers/nvme/host/core.c 	struct request *req;
req               909 drivers/nvme/host/core.c 	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
req               910 drivers/nvme/host/core.c 	if (IS_ERR(req))
req               911 drivers/nvme/host/core.c 		return PTR_ERR(req);
req               913 drivers/nvme/host/core.c 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
req               914 drivers/nvme/host/core.c 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
req               917 drivers/nvme/host/core.c 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
req               921 drivers/nvme/host/core.c 		bio = req->bio;
req               930 drivers/nvme/host/core.c 			req->cmd_flags |= REQ_INTEGRITY;
req               934 drivers/nvme/host/core.c 	blk_execute_rq(req->q, disk, req, 0);
req               935 drivers/nvme/host/core.c 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
req               938 drivers/nvme/host/core.c 		ret = nvme_req(req)->status;
req               940 drivers/nvme/host/core.c 		*result = le64_to_cpu(nvme_req(req)->result.u64);
req               950 drivers/nvme/host/core.c 	blk_mq_free_request(req);
req               565 drivers/nvme/host/fabrics.c 	struct nvme_request *req = nvme_req(rq);
req               571 drivers/nvme/host/fabrics.c 	if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
req               581 drivers/nvme/host/fabrics.c 		if (nvme_is_fabrics(req->cmd) &&
req               582 drivers/nvme/host/fabrics.c 		    req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
req                57 drivers/nvme/host/fault_inject.c void nvme_should_fail(struct request *req)
req                59 drivers/nvme/host/fault_inject.c 	struct gendisk *disk = req->rq_disk;
req                71 drivers/nvme/host/fault_inject.c 		fault_inject = &nvme_req(req)->ctrl->fault_inject;
req                79 drivers/nvme/host/fault_inject.c 		nvme_req(req)->status =	status;
req              1034 drivers/nvme/host/fc.c 		void (*done)(struct nvmefc_ls_req *req, int status))
req              1136 drivers/nvme/host/fc.c 		void (*done)(struct nvmefc_ls_req *req, int status))
req              1554 drivers/nvme/host/fc.c nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
req              1556 drivers/nvme/host/fc.c 	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
req              2418 drivers/nvme/host/fc.c nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
req              2422 drivers/nvme/host/fc.c 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
req                67 drivers/nvme/host/multipath.c void nvme_failover_req(struct request *req)
req                69 drivers/nvme/host/multipath.c 	struct nvme_ns *ns = req->q->queuedata;
req                70 drivers/nvme/host/multipath.c 	u16 status = nvme_req(req)->status;
req                74 drivers/nvme/host/multipath.c 	blk_steal_bios(&ns->head->requeue_list, req);
req                76 drivers/nvme/host/multipath.c 	blk_mq_end_request(req, 0);
req               143 drivers/nvme/host/nvme.h static inline struct nvme_request *nvme_req(struct request *req)
req               145 drivers/nvme/host/nvme.h 	return blk_mq_rq_to_pdu(req);
req               148 drivers/nvme/host/nvme.h static inline u16 nvme_req_qid(struct request *req)
req               150 drivers/nvme/host/nvme.h 	if (!req->rq_disk)
req               152 drivers/nvme/host/nvme.h 	return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
req               403 drivers/nvme/host/nvme.h void nvme_should_fail(struct request *req);
req               412 drivers/nvme/host/nvme.h static inline void nvme_should_fail(struct request *req) {}
req               427 drivers/nvme/host/nvme.h static inline void nvme_end_request(struct request *req, __le16 status,
req               430 drivers/nvme/host/nvme.h 	struct nvme_request *rq = nvme_req(req);
req               435 drivers/nvme/host/nvme.h 	nvme_should_fail(req);
req               436 drivers/nvme/host/nvme.h 	blk_mq_complete_request(req);
req               449 drivers/nvme/host/nvme.h void nvme_complete_rq(struct request *req);
req               450 drivers/nvme/host/nvme.h bool nvme_cancel_request(struct request *req, void *data, bool reserved);
req               485 drivers/nvme/host/nvme.h void nvme_cleanup_cmd(struct request *req);
req               486 drivers/nvme/host/nvme.h blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
req               524 drivers/nvme/host/nvme.h void nvme_failover_req(struct request *req);
req               544 drivers/nvme/host/nvme.h static inline void nvme_trace_bio_complete(struct request *req,
req               547 drivers/nvme/host/nvme.h 	struct nvme_ns *ns = req->q->queuedata;
req               549 drivers/nvme/host/nvme.h 	if (req->cmd_flags & REQ_NVME_MPATH)
req               551 drivers/nvme/host/nvme.h 					 req->bio, status);
req               573 drivers/nvme/host/nvme.h static inline void nvme_failover_req(struct request *req)
req               601 drivers/nvme/host/nvme.h static inline void nvme_trace_bio_complete(struct request *req,
req               201 drivers/nvme/host/pci.c 	struct nvme_request req;
req               395 drivers/nvme/host/pci.c static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
req               399 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               406 drivers/nvme/host/pci.c 	nvme_req(req)->ctrl = &dev->ctrl;
req               498 drivers/nvme/host/pci.c static void **nvme_pci_iod_list(struct request *req)
req               500 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               501 drivers/nvme/host/pci.c 	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
req               504 drivers/nvme/host/pci.c static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
req               506 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               507 drivers/nvme/host/pci.c 	int nseg = blk_rq_nr_phys_segments(req);
req               513 drivers/nvme/host/pci.c 	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
req               524 drivers/nvme/host/pci.c static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
req               526 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               533 drivers/nvme/host/pci.c 			       rq_dma_dir(req));
req               541 drivers/nvme/host/pci.c 				    rq_dma_dir(req));
req               543 drivers/nvme/host/pci.c 		dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
req               547 drivers/nvme/host/pci.c 		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
req               551 drivers/nvme/host/pci.c 		void *addr = nvme_pci_iod_list(req)[i];
req               586 drivers/nvme/host/pci.c 		struct request *req, struct nvme_rw_command *cmnd)
req               588 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               590 drivers/nvme/host/pci.c 	int length = blk_rq_payload_bytes(req);
req               597 drivers/nvme/host/pci.c 	void **list = nvme_pci_iod_list(req);
req               674 drivers/nvme/host/pci.c 			blk_rq_payload_bytes(req), iod->nents);
req               700 drivers/nvme/host/pci.c 		struct request *req, struct nvme_rw_command *cmd, int entries)
req               702 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               731 drivers/nvme/host/pci.c 	nvme_pci_iod_list(req)[0] = sg_list;
req               746 drivers/nvme/host/pci.c 			nvme_pci_iod_list(req)[iod->npages++] = sg_list;
req               759 drivers/nvme/host/pci.c 		struct request *req, struct nvme_rw_command *cmnd,
req               762 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               766 drivers/nvme/host/pci.c 	iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
req               778 drivers/nvme/host/pci.c 		struct request *req, struct nvme_rw_command *cmnd,
req               781 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               783 drivers/nvme/host/pci.c 	iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
req               795 drivers/nvme/host/pci.c static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
req               798 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               802 drivers/nvme/host/pci.c 	if (blk_rq_nr_phys_segments(req) == 1) {
req               803 drivers/nvme/host/pci.c 		struct bio_vec bv = req_bvec(req);
req               807 drivers/nvme/host/pci.c 				return nvme_setup_prp_simple(dev, req,
req               812 drivers/nvme/host/pci.c 				return nvme_setup_sgl_simple(dev, req,
req               821 drivers/nvme/host/pci.c 	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
req               822 drivers/nvme/host/pci.c 	iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
req               828 drivers/nvme/host/pci.c 				iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
req               831 drivers/nvme/host/pci.c 					     rq_dma_dir(req), DMA_ATTR_NO_WARN);
req               835 drivers/nvme/host/pci.c 	iod->use_sgl = nvme_pci_use_sgls(dev, req);
req               837 drivers/nvme/host/pci.c 		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
req               839 drivers/nvme/host/pci.c 		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
req               842 drivers/nvme/host/pci.c 		nvme_unmap_data(dev, req);
req               846 drivers/nvme/host/pci.c static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
req               849 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               851 drivers/nvme/host/pci.c 	iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
req               852 drivers/nvme/host/pci.c 			rq_dma_dir(req), 0);
req               868 drivers/nvme/host/pci.c 	struct request *req = bd->rq;
req               869 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               884 drivers/nvme/host/pci.c 	ret = nvme_setup_cmd(ns, req, &cmnd);
req               888 drivers/nvme/host/pci.c 	if (blk_rq_nr_phys_segments(req)) {
req               889 drivers/nvme/host/pci.c 		ret = nvme_map_data(dev, req, &cmnd);
req               894 drivers/nvme/host/pci.c 	if (blk_integrity_rq(req)) {
req               895 drivers/nvme/host/pci.c 		ret = nvme_map_metadata(dev, req, &cmnd);
req               900 drivers/nvme/host/pci.c 	blk_mq_start_request(req);
req               904 drivers/nvme/host/pci.c 	nvme_unmap_data(dev, req);
req               906 drivers/nvme/host/pci.c 	nvme_cleanup_cmd(req);
req               910 drivers/nvme/host/pci.c static void nvme_pci_complete_rq(struct request *req)
req               912 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req               915 drivers/nvme/host/pci.c 	nvme_cleanup_cmd(req);
req               916 drivers/nvme/host/pci.c 	if (blk_integrity_rq(req))
req               918 drivers/nvme/host/pci.c 			       rq_integrity_vec(req)->bv_len, rq_data_dir(req));
req               919 drivers/nvme/host/pci.c 	if (blk_rq_nr_phys_segments(req))
req               920 drivers/nvme/host/pci.c 		nvme_unmap_data(dev, req);
req               921 drivers/nvme/host/pci.c 	nvme_complete_rq(req);
req               950 drivers/nvme/host/pci.c 	struct request *req;
req               972 drivers/nvme/host/pci.c 	req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
req               973 drivers/nvme/host/pci.c 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
req               974 drivers/nvme/host/pci.c 	nvme_end_request(req, cqe->status, cqe->result);
req              1180 drivers/nvme/host/pci.c static void abort_endio(struct request *req, blk_status_t error)
req              1182 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req              1186 drivers/nvme/host/pci.c 		 "Abort status: 0x%x", nvme_req(req)->status);
req              1188 drivers/nvme/host/pci.c 	blk_mq_free_request(req);
req              1235 drivers/nvme/host/pci.c static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req              1237 drivers/nvme/host/pci.c 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
req              1264 drivers/nvme/host/pci.c 	if (nvme_poll_irqdisable(nvmeq, req->tag)) {
req              1267 drivers/nvme/host/pci.c 			 req->tag, nvmeq->qid);
req              1284 drivers/nvme/host/pci.c 			 req->tag, nvmeq->qid);
req              1286 drivers/nvme/host/pci.c 		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
req              1302 drivers/nvme/host/pci.c 			 req->tag, nvmeq->qid);
req              1306 drivers/nvme/host/pci.c 		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
req              1318 drivers/nvme/host/pci.c 	cmd.abort.cid = req->tag;
req              1323 drivers/nvme/host/pci.c 		 req->tag, nvmeq->qid);
req              2198 drivers/nvme/host/pci.c static void nvme_del_queue_end(struct request *req, blk_status_t error)
req              2200 drivers/nvme/host/pci.c 	struct nvme_queue *nvmeq = req->end_io_data;
req              2202 drivers/nvme/host/pci.c 	blk_mq_free_request(req);
req              2206 drivers/nvme/host/pci.c static void nvme_del_cq_end(struct request *req, blk_status_t error)
req              2208 drivers/nvme/host/pci.c 	struct nvme_queue *nvmeq = req->end_io_data;
req              2213 drivers/nvme/host/pci.c 	nvme_del_queue_end(req, error);
req              2219 drivers/nvme/host/pci.c 	struct request *req;
req              2226 drivers/nvme/host/pci.c 	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
req              2227 drivers/nvme/host/pci.c 	if (IS_ERR(req))
req              2228 drivers/nvme/host/pci.c 		return PTR_ERR(req);
req              2230 drivers/nvme/host/pci.c 	req->timeout = ADMIN_TIMEOUT;
req              2231 drivers/nvme/host/pci.c 	req->end_io_data = nvmeq;
req              2234 drivers/nvme/host/pci.c 	blk_execute_rq_nowait(q, NULL, req, false,
req                53 drivers/nvme/host/rdma.c 	struct nvme_request	req;
req               282 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req               284 drivers/nvme/host/rdma.c 	kfree(req->sqe.data);
req               292 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req               297 drivers/nvme/host/rdma.c 	req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
req               298 drivers/nvme/host/rdma.c 	if (!req->sqe.data)
req               301 drivers/nvme/host/rdma.c 	req->queue = queue;
req              1118 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req =
req              1120 drivers/nvme/host/rdma.c 	struct request *rq = blk_mq_rq_from_pdu(req);
req              1127 drivers/nvme/host/rdma.c 	if (refcount_dec_and_test(&req->ref))
req              1128 drivers/nvme/host/rdma.c 		nvme_end_request(rq, req->status, req->result);
req              1133 drivers/nvme/host/rdma.c 		struct nvme_rdma_request *req)
req              1140 drivers/nvme/host/rdma.c 		.ex.invalidate_rkey = req->mr->rkey,
req              1143 drivers/nvme/host/rdma.c 	req->reg_cqe.done = nvme_rdma_inv_rkey_done;
req              1144 drivers/nvme/host/rdma.c 	wr.wr_cqe = &req->reg_cqe;
req              1152 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req              1159 drivers/nvme/host/rdma.c 	if (req->mr) {
req              1160 drivers/nvme/host/rdma.c 		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req              1161 drivers/nvme/host/rdma.c 		req->mr = NULL;
req              1164 drivers/nvme/host/rdma.c 	ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
req              1167 drivers/nvme/host/rdma.c 	sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
req              1182 drivers/nvme/host/rdma.c 		struct nvme_rdma_request *req, struct nvme_command *c,
req              1186 drivers/nvme/host/rdma.c 	struct scatterlist *sgl = req->sg_table.sgl;
req              1187 drivers/nvme/host/rdma.c 	struct ib_sge *sge = &req->sge[1];
req              1202 drivers/nvme/host/rdma.c 	req->num_sge += count;
req              1207 drivers/nvme/host/rdma.c 		struct nvme_rdma_request *req, struct nvme_command *c)
req              1211 drivers/nvme/host/rdma.c 	sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
req              1212 drivers/nvme/host/rdma.c 	put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
req              1219 drivers/nvme/host/rdma.c 		struct nvme_rdma_request *req, struct nvme_command *c,
req              1225 drivers/nvme/host/rdma.c 	req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
req              1226 drivers/nvme/host/rdma.c 	if (WARN_ON_ONCE(!req->mr))
req              1233 drivers/nvme/host/rdma.c 	nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
req              1235 drivers/nvme/host/rdma.c 		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req              1236 drivers/nvme/host/rdma.c 		req->mr = NULL;
req              1242 drivers/nvme/host/rdma.c 	ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
req              1244 drivers/nvme/host/rdma.c 	req->reg_cqe.done = nvme_rdma_memreg_done;
req              1245 drivers/nvme/host/rdma.c 	memset(&req->reg_wr, 0, sizeof(req->reg_wr));
req              1246 drivers/nvme/host/rdma.c 	req->reg_wr.wr.opcode = IB_WR_REG_MR;
req              1247 drivers/nvme/host/rdma.c 	req->reg_wr.wr.wr_cqe = &req->reg_cqe;
req              1248 drivers/nvme/host/rdma.c 	req->reg_wr.wr.num_sge = 0;
req              1249 drivers/nvme/host/rdma.c 	req->reg_wr.mr = req->mr;
req              1250 drivers/nvme/host/rdma.c 	req->reg_wr.key = req->mr->rkey;
req              1251 drivers/nvme/host/rdma.c 	req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
req              1255 drivers/nvme/host/rdma.c 	sg->addr = cpu_to_le64(req->mr->iova);
req              1256 drivers/nvme/host/rdma.c 	put_unaligned_le24(req->mr->length, sg->length);
req              1257 drivers/nvme/host/rdma.c 	put_unaligned_le32(req->mr->rkey, sg->key);
req              1267 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req              1272 drivers/nvme/host/rdma.c 	req->num_sge = 1;
req              1273 drivers/nvme/host/rdma.c 	refcount_set(&req->ref, 2); /* send and recv completions */
req              1280 drivers/nvme/host/rdma.c 	req->sg_table.sgl = req->first_sgl;
req              1281 drivers/nvme/host/rdma.c 	ret = sg_alloc_table_chained(&req->sg_table,
req              1282 drivers/nvme/host/rdma.c 			blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
req              1287 drivers/nvme/host/rdma.c 	req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
req              1289 drivers/nvme/host/rdma.c 	count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
req              1301 drivers/nvme/host/rdma.c 			ret = nvme_rdma_map_sg_inline(queue, req, c, count);
req              1306 drivers/nvme/host/rdma.c 			ret = nvme_rdma_map_sg_single(queue, req, c);
req              1311 drivers/nvme/host/rdma.c 	ret = nvme_rdma_map_sg_fr(queue, req, c, count);
req              1319 drivers/nvme/host/rdma.c 	ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
req              1321 drivers/nvme/host/rdma.c 	sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
req              1329 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req =
req              1331 drivers/nvme/host/rdma.c 	struct request *rq = blk_mq_rq_from_pdu(req);
req              1338 drivers/nvme/host/rdma.c 	if (refcount_dec_and_test(&req->ref))
req              1339 drivers/nvme/host/rdma.c 		nvme_end_request(rq, req->status, req->result);
req              1445 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req;
req              1455 drivers/nvme/host/rdma.c 	req = blk_mq_rq_to_pdu(rq);
req              1457 drivers/nvme/host/rdma.c 	req->status = cqe->status;
req              1458 drivers/nvme/host/rdma.c 	req->result = cqe->result;
req              1461 drivers/nvme/host/rdma.c 		if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
req              1464 drivers/nvme/host/rdma.c 				req->mr->rkey);
req              1467 drivers/nvme/host/rdma.c 	} else if (req->mr) {
req              1470 drivers/nvme/host/rdma.c 		ret = nvme_rdma_inv_rkey(queue, req);
req              1474 drivers/nvme/host/rdma.c 				req->mr->rkey, ret);
req              1481 drivers/nvme/host/rdma.c 	if (refcount_dec_and_test(&req->ref))
req              1482 drivers/nvme/host/rdma.c 		nvme_end_request(rq, req->status, req->result);
req              1699 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req              1700 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = req->queue;
req              1738 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req              1739 drivers/nvme/host/rdma.c 	struct nvme_rdma_qe *sqe = &req->sqe;
req              1753 drivers/nvme/host/rdma.c 	req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
req              1756 drivers/nvme/host/rdma.c 	err = ib_dma_mapping_error(dev, req->sqe.dma);
req              1782 drivers/nvme/host/rdma.c 	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req              1783 drivers/nvme/host/rdma.c 			req->mr ? &req->reg_wr.wr : NULL);
req              1797 drivers/nvme/host/rdma.c 	ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
req              1811 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
req              1812 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = req->queue;
req              1816 drivers/nvme/host/rdma.c 	ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
req                31 drivers/nvme/host/tcp.c 	struct nvme_request	req;
req               159 drivers/nvme/host/tcp.c static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
req               161 drivers/nvme/host/tcp.c 	return req == &req->queue->ctrl->async_req;
req               164 drivers/nvme/host/tcp.c static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
req               168 drivers/nvme/host/tcp.c 	if (unlikely(nvme_tcp_async_req(req)))
req               171 drivers/nvme/host/tcp.c 	rq = blk_mq_rq_from_pdu(req);
req               173 drivers/nvme/host/tcp.c 	return rq_data_dir(rq) == WRITE && req->data_len &&
req               174 drivers/nvme/host/tcp.c 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
req               177 drivers/nvme/host/tcp.c static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
req               179 drivers/nvme/host/tcp.c 	return req->iter.bvec->bv_page;
req               182 drivers/nvme/host/tcp.c static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
req               184 drivers/nvme/host/tcp.c 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
req               187 drivers/nvme/host/tcp.c static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
req               189 drivers/nvme/host/tcp.c 	return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
req               190 drivers/nvme/host/tcp.c 			req->pdu_len - req->pdu_sent);
req               193 drivers/nvme/host/tcp.c static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
req               195 drivers/nvme/host/tcp.c 	return req->iter.iov_offset;
req               198 drivers/nvme/host/tcp.c static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
req               200 drivers/nvme/host/tcp.c 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
req               201 drivers/nvme/host/tcp.c 			req->pdu_len - req->pdu_sent : 0;
req               204 drivers/nvme/host/tcp.c static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
req               207 drivers/nvme/host/tcp.c 	return nvme_tcp_pdu_data_left(req) <= len;
req               210 drivers/nvme/host/tcp.c static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
req               213 drivers/nvme/host/tcp.c 	struct request *rq = blk_mq_rq_from_pdu(req);
req               225 drivers/nvme/host/tcp.c 		struct bio *bio = req->curr_bio;
req               233 drivers/nvme/host/tcp.c 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
req               234 drivers/nvme/host/tcp.c 	req->iter.iov_offset = offset;
req               237 drivers/nvme/host/tcp.c static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
req               240 drivers/nvme/host/tcp.c 	req->data_sent += len;
req               241 drivers/nvme/host/tcp.c 	req->pdu_sent += len;
req               242 drivers/nvme/host/tcp.c 	iov_iter_advance(&req->iter, len);
req               243 drivers/nvme/host/tcp.c 	if (!iov_iter_count(&req->iter) &&
req               244 drivers/nvme/host/tcp.c 	    req->data_sent < req->data_len) {
req               245 drivers/nvme/host/tcp.c 		req->curr_bio = req->curr_bio->bi_next;
req               246 drivers/nvme/host/tcp.c 		nvme_tcp_init_iter(req, WRITE);
req               250 drivers/nvme/host/tcp.c static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
req               252 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req               255 drivers/nvme/host/tcp.c 	list_add_tail(&req->entry, &queue->send_list);
req               264 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req;
req               267 drivers/nvme/host/tcp.c 	req = list_first_entry_or_null(&queue->send_list,
req               269 drivers/nvme/host/tcp.c 	if (req)
req               270 drivers/nvme/host/tcp.c 		list_del(&req->entry);
req               273 drivers/nvme/host/tcp.c 	return req;
req               354 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req               356 drivers/nvme/host/tcp.c 	page_frag_free(req->pdu);
req               364 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req               369 drivers/nvme/host/tcp.c 	req->pdu = page_frag_alloc(&queue->pf_cache,
req               372 drivers/nvme/host/tcp.c 	if (!req->pdu)
req               375 drivers/nvme/host/tcp.c 	req->queue = queue;
req               502 drivers/nvme/host/tcp.c static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
req               505 drivers/nvme/host/tcp.c 	struct nvme_tcp_data_pdu *data = req->pdu;
req               506 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req               507 drivers/nvme/host/tcp.c 	struct request *rq = blk_mq_rq_from_pdu(req);
req               511 drivers/nvme/host/tcp.c 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
req               512 drivers/nvme/host/tcp.c 	req->pdu_sent = 0;
req               514 drivers/nvme/host/tcp.c 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
req               517 drivers/nvme/host/tcp.c 			rq->tag, req->pdu_len, req->data_len,
req               518 drivers/nvme/host/tcp.c 			req->data_sent);
req               522 drivers/nvme/host/tcp.c 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
req               526 drivers/nvme/host/tcp.c 			req->data_sent);
req               540 drivers/nvme/host/tcp.c 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
req               543 drivers/nvme/host/tcp.c 	data->data_offset = cpu_to_le32(req->data_sent);
req               544 drivers/nvme/host/tcp.c 	data->data_length = cpu_to_le32(req->pdu_len);
req               551 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req;
req               562 drivers/nvme/host/tcp.c 	req = blk_mq_rq_to_pdu(rq);
req               564 drivers/nvme/host/tcp.c 	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
req               568 drivers/nvme/host/tcp.c 	req->state = NVME_TCP_SEND_H2C_PDU;
req               569 drivers/nvme/host/tcp.c 	req->offset = 0;
req               571 drivers/nvme/host/tcp.c 	nvme_tcp_queue_request(req);
req               637 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req;
req               647 drivers/nvme/host/tcp.c 	req = blk_mq_rq_to_pdu(rq);
req               656 drivers/nvme/host/tcp.c 		if (!iov_iter_count(&req->iter)) {
req               657 drivers/nvme/host/tcp.c 			req->curr_bio = req->curr_bio->bi_next;
req               663 drivers/nvme/host/tcp.c 			if (!req->curr_bio) {
req               670 drivers/nvme/host/tcp.c 			nvme_tcp_init_iter(req, READ);
req               675 drivers/nvme/host/tcp.c 				iov_iter_count(&req->iter));
req               679 drivers/nvme/host/tcp.c 				&req->iter, recv_len, queue->rcv_hash);
req               682 drivers/nvme/host/tcp.c 					&req->iter, recv_len);
req               841 drivers/nvme/host/tcp.c static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
req               843 drivers/nvme/host/tcp.c 	nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
req               846 drivers/nvme/host/tcp.c static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
req               848 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req               851 drivers/nvme/host/tcp.c 		struct page *page = nvme_tcp_req_cur_page(req);
req               852 drivers/nvme/host/tcp.c 		size_t offset = nvme_tcp_req_cur_offset(req);
req               853 drivers/nvme/host/tcp.c 		size_t len = nvme_tcp_req_cur_length(req);
req               854 drivers/nvme/host/tcp.c 		bool last = nvme_tcp_pdu_last_send(req, len);
req               873 drivers/nvme/host/tcp.c 		nvme_tcp_advance_req(req, ret);
req               882 drivers/nvme/host/tcp.c 					&req->ddgst);
req               883 drivers/nvme/host/tcp.c 				req->state = NVME_TCP_SEND_DDGST;
req               884 drivers/nvme/host/tcp.c 				req->offset = 0;
req               894 drivers/nvme/host/tcp.c static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
req               896 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req               897 drivers/nvme/host/tcp.c 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
req               898 drivers/nvme/host/tcp.c 	bool inline_data = nvme_tcp_has_inline_data(req);
req               901 drivers/nvme/host/tcp.c 	int len = sizeof(*pdu) + hdgst - req->offset;
req               904 drivers/nvme/host/tcp.c 	if (queue->hdr_digest && !req->offset)
req               908 drivers/nvme/host/tcp.c 			offset_in_page(pdu) + req->offset, len,  flags);
req               915 drivers/nvme/host/tcp.c 			req->state = NVME_TCP_SEND_DATA;
req               918 drivers/nvme/host/tcp.c 			nvme_tcp_init_iter(req, WRITE);
req               924 drivers/nvme/host/tcp.c 	req->offset += ret;
req               929 drivers/nvme/host/tcp.c static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
req               931 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req               932 drivers/nvme/host/tcp.c 	struct nvme_tcp_data_pdu *pdu = req->pdu;
req               934 drivers/nvme/host/tcp.c 	int len = sizeof(*pdu) - req->offset + hdgst;
req               937 drivers/nvme/host/tcp.c 	if (queue->hdr_digest && !req->offset)
req               941 drivers/nvme/host/tcp.c 			offset_in_page(pdu) + req->offset, len,
req               948 drivers/nvme/host/tcp.c 		req->state = NVME_TCP_SEND_DATA;
req               951 drivers/nvme/host/tcp.c 		if (!req->data_sent)
req               952 drivers/nvme/host/tcp.c 			nvme_tcp_init_iter(req, WRITE);
req               955 drivers/nvme/host/tcp.c 	req->offset += ret;
req               960 drivers/nvme/host/tcp.c static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
req               962 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req               966 drivers/nvme/host/tcp.c 		.iov_base = &req->ddgst + req->offset,
req               967 drivers/nvme/host/tcp.c 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
req               974 drivers/nvme/host/tcp.c 	if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
req               979 drivers/nvme/host/tcp.c 	req->offset += ret;
req               985 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req;
req               993 drivers/nvme/host/tcp.c 	req = queue->request;
req               995 drivers/nvme/host/tcp.c 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
req               996 drivers/nvme/host/tcp.c 		ret = nvme_tcp_try_send_cmd_pdu(req);
req               999 drivers/nvme/host/tcp.c 		if (!nvme_tcp_has_inline_data(req))
req              1003 drivers/nvme/host/tcp.c 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
req              1004 drivers/nvme/host/tcp.c 		ret = nvme_tcp_try_send_data_pdu(req);
req              1009 drivers/nvme/host/tcp.c 	if (req->state == NVME_TCP_SEND_DATA) {
req              1010 drivers/nvme/host/tcp.c 		ret = nvme_tcp_try_send_data(req);
req              1015 drivers/nvme/host/tcp.c 	if (req->state == NVME_TCP_SEND_DDGST)
req              1016 drivers/nvme/host/tcp.c 		ret = nvme_tcp_try_send_ddgst(req);
req              2048 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req              2049 drivers/nvme/host/tcp.c 	struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
req              2050 drivers/nvme/host/tcp.c 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
req              2062 drivers/nvme/host/tcp.c 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
req              2085 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req              2086 drivers/nvme/host/tcp.c 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
req              2094 drivers/nvme/host/tcp.c 	    req->data_len <= nvme_tcp_inline_data_size(queue))
req              2095 drivers/nvme/host/tcp.c 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
req              2097 drivers/nvme/host/tcp.c 		nvme_tcp_set_sg_host_data(c, req->data_len);
req              2105 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req              2106 drivers/nvme/host/tcp.c 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
req              2107 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
req              2115 drivers/nvme/host/tcp.c 	req->state = NVME_TCP_SEND_CMD_PDU;
req              2116 drivers/nvme/host/tcp.c 	req->offset = 0;
req              2117 drivers/nvme/host/tcp.c 	req->data_sent = 0;
req              2118 drivers/nvme/host/tcp.c 	req->pdu_len = 0;
req              2119 drivers/nvme/host/tcp.c 	req->pdu_sent = 0;
req              2120 drivers/nvme/host/tcp.c 	req->data_len = blk_rq_nr_phys_segments(rq) ?
req              2122 drivers/nvme/host/tcp.c 	req->curr_bio = rq->bio;
req              2125 drivers/nvme/host/tcp.c 	    req->data_len <= nvme_tcp_inline_data_size(queue))
req              2126 drivers/nvme/host/tcp.c 		req->pdu_len = req->data_len;
req              2127 drivers/nvme/host/tcp.c 	else if (req->curr_bio)
req              2128 drivers/nvme/host/tcp.c 		nvme_tcp_init_iter(req, READ);
req              2134 drivers/nvme/host/tcp.c 	if (queue->data_digest && req->pdu_len) {
req              2139 drivers/nvme/host/tcp.c 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
req              2141 drivers/nvme/host/tcp.c 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
req              2160 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req              2173 drivers/nvme/host/tcp.c 	nvme_tcp_queue_request(req);
req                48 drivers/nvme/host/trace.h 	    TP_PROTO(struct request *req, struct nvme_command *cmd),
req                49 drivers/nvme/host/trace.h 	    TP_ARGS(req, cmd),
req                63 drivers/nvme/host/trace.h 		__entry->ctrl_id = nvme_req(req)->ctrl->instance;
req                64 drivers/nvme/host/trace.h 		__entry->qid = nvme_req_qid(req);
req                71 drivers/nvme/host/trace.h 		__assign_disk_name(__entry->disk, req->rq_disk);
req                86 drivers/nvme/host/trace.h 	    TP_PROTO(struct request *req),
req                87 drivers/nvme/host/trace.h 	    TP_ARGS(req),
req                99 drivers/nvme/host/trace.h 		__entry->ctrl_id = nvme_req(req)->ctrl->instance;
req               100 drivers/nvme/host/trace.h 		__entry->qid = nvme_req_qid(req);
req               101 drivers/nvme/host/trace.h 		__entry->cid = req->tag;
req               102 drivers/nvme/host/trace.h 		__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
req               103 drivers/nvme/host/trace.h 		__entry->retries = nvme_req(req)->retries;
req               104 drivers/nvme/host/trace.h 		__entry->flags = nvme_req(req)->flags;
req               105 drivers/nvme/host/trace.h 		__entry->status = nvme_req(req)->status;
req               106 drivers/nvme/host/trace.h 		__assign_disk_name(__entry->disk, req->rq_disk);
req               145 drivers/nvme/host/trace.h 	TP_PROTO(struct request *req, __le16 sq_head, int sq_tail),
req               146 drivers/nvme/host/trace.h 	TP_ARGS(req, sq_head, sq_tail),
req               155 drivers/nvme/host/trace.h 		__entry->ctrl_id = nvme_req(req)->ctrl->instance;
req               156 drivers/nvme/host/trace.h 		__assign_disk_name(__entry->disk, req->rq_disk);
req               157 drivers/nvme/host/trace.h 		__entry->qid = nvme_req_qid(req);
req                32 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
req                34 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
req                37 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
req                39 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req                49 drivers/nvme/target/admin-cmd.c 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
req                60 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, 0);
req                63 drivers/nvme/target/admin-cmd.c static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
req                69 drivers/nvme/target/admin-cmd.c 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
req                72 drivers/nvme/target/admin-cmd.c 				le32_to_cpu(req->cmd->get_log_page.nsid));
req                73 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
req                98 drivers/nvme/target/admin-cmd.c static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
req               106 drivers/nvme/target/admin-cmd.c 	ctrl = req->sq->ctrl;
req               131 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
req               137 drivers/nvme/target/admin-cmd.c 	if (req->data_len != sizeof(*log))
req               144 drivers/nvme/target/admin-cmd.c 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
req               145 drivers/nvme/target/admin-cmd.c 		status = nvmet_get_smart_log_all(req, log);
req               147 drivers/nvme/target/admin-cmd.c 		status = nvmet_get_smart_log_nsid(req, log);
req               151 drivers/nvme/target/admin-cmd.c 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
req               152 drivers/nvme/target/admin-cmd.c 	put_unaligned_le64(req->sq->ctrl->err_counter,
req               154 drivers/nvme/target/admin-cmd.c 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
req               156 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
req               160 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               163 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
req               186 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
req               190 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               193 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
req               195 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               199 drivers/nvme/target/admin-cmd.c 	if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
req               207 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
req               209 drivers/nvme/target/admin-cmd.c 		status = nvmet_zero_sgl(req, len, req->data_len - len);
req               211 drivers/nvme/target/admin-cmd.c 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
req               214 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               217 drivers/nvme/target/admin-cmd.c static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
req               220 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               224 drivers/nvme/target/admin-cmd.c 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
req               235 drivers/nvme/target/admin-cmd.c 	desc->state = req->port->ana_state[grpid];
req               240 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
req               260 drivers/nvme/target/admin-cmd.c 		len = nvmet_format_ana_group(req, grpid, desc);
req               261 drivers/nvme/target/admin-cmd.c 		status = nvmet_copy_to_sgl(req, offset, desc, len);
req               274 drivers/nvme/target/admin-cmd.c 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
req               280 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
req               282 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               285 drivers/nvme/target/admin-cmd.c static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
req               287 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               373 drivers/nvme/target/admin-cmd.c 	if (req->port->inline_data_size)
req               380 drivers/nvme/target/admin-cmd.c 				  req->port->inline_data_size) / 16);
req               401 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
req               405 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               408 drivers/nvme/target/admin-cmd.c static void nvmet_execute_identify_ns(struct nvmet_req *req)
req               414 drivers/nvme/target/admin-cmd.c 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req               415 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_identify, nsid);
req               427 drivers/nvme/target/admin-cmd.c 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
req               436 drivers/nvme/target/admin-cmd.c 	switch (req->port->ana_state[ns->anagrpid]) {
req               470 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
req               473 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               476 drivers/nvme/target/admin-cmd.c static void nvmet_execute_identify_nslist(struct nvmet_req *req)
req               479 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               481 drivers/nvme/target/admin-cmd.c 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
req               502 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
req               506 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               509 drivers/nvme/target/admin-cmd.c static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
req               518 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
req               523 drivers/nvme/target/admin-cmd.c 	status = nvmet_copy_to_sgl(req, *off, id, len);
req               531 drivers/nvme/target/admin-cmd.c static void nvmet_execute_identify_desclist(struct nvmet_req *req)
req               537 drivers/nvme/target/admin-cmd.c 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
req               539 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_identify, nsid);
req               545 drivers/nvme/target/admin-cmd.c 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
req               552 drivers/nvme/target/admin-cmd.c 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
req               559 drivers/nvme/target/admin-cmd.c 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
req               565 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               575 drivers/nvme/target/admin-cmd.c static void nvmet_execute_abort(struct nvmet_req *req)
req               577 drivers/nvme/target/admin-cmd.c 	nvmet_set_result(req, 1);
req               578 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, 0);
req               581 drivers/nvme/target/admin-cmd.c static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
req               585 drivers/nvme/target/admin-cmd.c 	if (req->ns->file)
req               586 drivers/nvme/target/admin-cmd.c 		status = nvmet_file_flush(req);
req               588 drivers/nvme/target/admin-cmd.c 		status = nvmet_bdev_flush(req);
req               591 drivers/nvme/target/admin-cmd.c 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
req               595 drivers/nvme/target/admin-cmd.c static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
req               597 drivers/nvme/target/admin-cmd.c 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
req               598 drivers/nvme/target/admin-cmd.c 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
req               601 drivers/nvme/target/admin-cmd.c 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
req               602 drivers/nvme/target/admin-cmd.c 	if (unlikely(!req->ns)) {
req               603 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_common_command, nsid);
req               610 drivers/nvme/target/admin-cmd.c 		req->ns->readonly = true;
req               611 drivers/nvme/target/admin-cmd.c 		status = nvmet_write_protect_flush_sync(req);
req               613 drivers/nvme/target/admin-cmd.c 			req->ns->readonly = false;
req               616 drivers/nvme/target/admin-cmd.c 		req->ns->readonly = false;
req               624 drivers/nvme/target/admin-cmd.c 		nvmet_ns_changed(subsys, req->ns->nsid);
req               629 drivers/nvme/target/admin-cmd.c u16 nvmet_set_feat_kato(struct nvmet_req *req)
req               631 drivers/nvme/target/admin-cmd.c 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
req               633 drivers/nvme/target/admin-cmd.c 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
req               635 drivers/nvme/target/admin-cmd.c 	nvmet_set_result(req, req->sq->ctrl->kato);
req               640 drivers/nvme/target/admin-cmd.c u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
req               642 drivers/nvme/target/admin-cmd.c 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
req               645 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
req               649 drivers/nvme/target/admin-cmd.c 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
req               650 drivers/nvme/target/admin-cmd.c 	nvmet_set_result(req, val32);
req               655 drivers/nvme/target/admin-cmd.c static void nvmet_execute_set_features(struct nvmet_req *req)
req               657 drivers/nvme/target/admin-cmd.c 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
req               658 drivers/nvme/target/admin-cmd.c 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
req               663 drivers/nvme/target/admin-cmd.c 		nvmet_set_result(req,
req               667 drivers/nvme/target/admin-cmd.c 		status = nvmet_set_feat_kato(req);
req               670 drivers/nvme/target/admin-cmd.c 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
req               676 drivers/nvme/target/admin-cmd.c 		status = nvmet_set_feat_write_protect(req);
req               679 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
req               684 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               687 drivers/nvme/target/admin-cmd.c static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
req               689 drivers/nvme/target/admin-cmd.c 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
req               692 drivers/nvme/target/admin-cmd.c 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
req               693 drivers/nvme/target/admin-cmd.c 	if (!req->ns)  {
req               694 drivers/nvme/target/admin-cmd.c 		req->error_loc = offsetof(struct nvme_common_command, nsid);
req               698 drivers/nvme/target/admin-cmd.c 	if (req->ns->readonly == true)
req               702 drivers/nvme/target/admin-cmd.c 	nvmet_set_result(req, result);
req               708 drivers/nvme/target/admin-cmd.c void nvmet_get_feat_kato(struct nvmet_req *req)
req               710 drivers/nvme/target/admin-cmd.c 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
req               713 drivers/nvme/target/admin-cmd.c void nvmet_get_feat_async_event(struct nvmet_req *req)
req               715 drivers/nvme/target/admin-cmd.c 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
req               718 drivers/nvme/target/admin-cmd.c static void nvmet_execute_get_features(struct nvmet_req *req)
req               720 drivers/nvme/target/admin-cmd.c 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
req               721 drivers/nvme/target/admin-cmd.c 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
req               747 drivers/nvme/target/admin-cmd.c 		nvmet_get_feat_async_event(req);
req               750 drivers/nvme/target/admin-cmd.c 		nvmet_set_result(req, 1);
req               753 drivers/nvme/target/admin-cmd.c 		nvmet_set_result(req,
req               757 drivers/nvme/target/admin-cmd.c 		nvmet_get_feat_kato(req);
req               761 drivers/nvme/target/admin-cmd.c 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
req               762 drivers/nvme/target/admin-cmd.c 			req->error_loc =
req               768 drivers/nvme/target/admin-cmd.c 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
req               769 drivers/nvme/target/admin-cmd.c 				sizeof(req->sq->ctrl->hostid));
req               772 drivers/nvme/target/admin-cmd.c 		status = nvmet_get_feat_write_protect(req);
req               775 drivers/nvme/target/admin-cmd.c 		req->error_loc =
req               781 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, status);
req               784 drivers/nvme/target/admin-cmd.c void nvmet_execute_async_event(struct nvmet_req *req)
req               786 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               791 drivers/nvme/target/admin-cmd.c 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
req               794 drivers/nvme/target/admin-cmd.c 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
req               800 drivers/nvme/target/admin-cmd.c void nvmet_execute_keep_alive(struct nvmet_req *req)
req               802 drivers/nvme/target/admin-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               808 drivers/nvme/target/admin-cmd.c 	nvmet_req_complete(req, 0);
req               811 drivers/nvme/target/admin-cmd.c u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
req               813 drivers/nvme/target/admin-cmd.c 	struct nvme_command *cmd = req->cmd;
req               816 drivers/nvme/target/admin-cmd.c 	ret = nvmet_check_ctrl_status(req, cmd);
req               822 drivers/nvme/target/admin-cmd.c 		req->data_len = nvmet_get_log_page_len(cmd);
req               826 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_get_log_page_error;
req               829 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_get_log_page_smart;
req               838 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_get_log_page_noop;
req               841 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_get_log_changed_ns;
req               844 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_get_log_cmd_effects_ns;
req               847 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_get_log_page_ana;
req               852 drivers/nvme/target/admin-cmd.c 		req->data_len = NVME_IDENTIFY_DATA_SIZE;
req               855 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_identify_ns;
req               858 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_identify_ctrl;
req               861 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_identify_nslist;
req               864 drivers/nvme/target/admin-cmd.c 			req->execute = nvmet_execute_identify_desclist;
req               869 drivers/nvme/target/admin-cmd.c 		req->execute = nvmet_execute_abort;
req               870 drivers/nvme/target/admin-cmd.c 		req->data_len = 0;
req               873 drivers/nvme/target/admin-cmd.c 		req->execute = nvmet_execute_set_features;
req               874 drivers/nvme/target/admin-cmd.c 		req->data_len = 0;
req               877 drivers/nvme/target/admin-cmd.c 		req->execute = nvmet_execute_get_features;
req               878 drivers/nvme/target/admin-cmd.c 		req->data_len = 0;
req               881 drivers/nvme/target/admin-cmd.c 		req->execute = nvmet_execute_async_event;
req               882 drivers/nvme/target/admin-cmd.c 		req->data_len = 0;
req               885 drivers/nvme/target/admin-cmd.c 		req->execute = nvmet_execute_keep_alive;
req               886 drivers/nvme/target/admin-cmd.c 		req->data_len = 0;
req               891 drivers/nvme/target/admin-cmd.c 	       req->sq->qid);
req               892 drivers/nvme/target/admin-cmd.c 	req->error_loc = offsetof(struct nvme_common_command, opcode);
req                44 drivers/nvme/target/core.c inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
req                53 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_rw_command, length);
req                57 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_rw_command, slba);
req                61 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req                62 drivers/nvme/target/core.c 		switch (req->cmd->common.opcode) {
req                72 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
req                78 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req                88 drivers/nvme/target/core.c u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
req                91 drivers/nvme/target/core.c 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req                92 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, dptr);
req                98 drivers/nvme/target/core.c u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
req               100 drivers/nvme/target/core.c 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req               101 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, dptr);
req               107 drivers/nvme/target/core.c u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
req               109 drivers/nvme/target/core.c 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
req               110 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, dptr);
req               134 drivers/nvme/target/core.c 	struct nvmet_req *req;
req               143 drivers/nvme/target/core.c 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
req               145 drivers/nvme/target/core.c 		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
req               154 drivers/nvme/target/core.c 	struct nvmet_req *req;
req               165 drivers/nvme/target/core.c 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
req               166 drivers/nvme/target/core.c 		nvmet_set_result(req, nvmet_async_event_result(aen));
req               172 drivers/nvme/target/core.c 		nvmet_req_complete(req, 0);
req               661 drivers/nvme/target/core.c static void nvmet_update_sq_head(struct nvmet_req *req)
req               663 drivers/nvme/target/core.c 	if (req->sq->size) {
req               667 drivers/nvme/target/core.c 			old_sqhd = req->sq->sqhd;
req               668 drivers/nvme/target/core.c 			new_sqhd = (old_sqhd + 1) % req->sq->size;
req               669 drivers/nvme/target/core.c 		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
req               672 drivers/nvme/target/core.c 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
req               675 drivers/nvme/target/core.c static void nvmet_set_error(struct nvmet_req *req, u16 status)
req               677 drivers/nvme/target/core.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               681 drivers/nvme/target/core.c 	req->cqe->status = cpu_to_le16(status << 1);
req               683 drivers/nvme/target/core.c 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
req               692 drivers/nvme/target/core.c 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
req               693 drivers/nvme/target/core.c 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
req               695 drivers/nvme/target/core.c 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
req               696 drivers/nvme/target/core.c 	new_error_slot->lba = cpu_to_le64(req->error_slba);
req               697 drivers/nvme/target/core.c 	new_error_slot->nsid = req->cmd->common.nsid;
req               701 drivers/nvme/target/core.c 	req->cqe->status |= cpu_to_le16(1 << 14);
req               704 drivers/nvme/target/core.c static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
req               706 drivers/nvme/target/core.c 	if (!req->sq->sqhd_disabled)
req               707 drivers/nvme/target/core.c 		nvmet_update_sq_head(req);
req               708 drivers/nvme/target/core.c 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
req               709 drivers/nvme/target/core.c 	req->cqe->command_id = req->cmd->common.command_id;
req               712 drivers/nvme/target/core.c 		nvmet_set_error(req, status);
req               714 drivers/nvme/target/core.c 	trace_nvmet_req_complete(req);
req               716 drivers/nvme/target/core.c 	if (req->ns)
req               717 drivers/nvme/target/core.c 		nvmet_put_namespace(req->ns);
req               718 drivers/nvme/target/core.c 	req->ops->queue_response(req);
req               721 drivers/nvme/target/core.c void nvmet_req_complete(struct nvmet_req *req, u16 status)
req               723 drivers/nvme/target/core.c 	__nvmet_req_complete(req, status);
req               724 drivers/nvme/target/core.c 	percpu_ref_put(&req->sq->ref);
req               811 drivers/nvme/target/core.c static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
req               813 drivers/nvme/target/core.c 	if (unlikely(req->ns->readonly)) {
req               814 drivers/nvme/target/core.c 		switch (req->cmd->common.opcode) {
req               826 drivers/nvme/target/core.c static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
req               828 drivers/nvme/target/core.c 	struct nvme_command *cmd = req->cmd;
req               831 drivers/nvme/target/core.c 	ret = nvmet_check_ctrl_status(req, cmd);
req               835 drivers/nvme/target/core.c 	req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
req               836 drivers/nvme/target/core.c 	if (unlikely(!req->ns)) {
req               837 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, nsid);
req               840 drivers/nvme/target/core.c 	ret = nvmet_check_ana_state(req->port, req->ns);
req               842 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, nsid);
req               845 drivers/nvme/target/core.c 	ret = nvmet_io_cmd_check_access(req);
req               847 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, nsid);
req               851 drivers/nvme/target/core.c 	if (req->ns->file)
req               852 drivers/nvme/target/core.c 		return nvmet_file_parse_io_cmd(req);
req               854 drivers/nvme/target/core.c 		return nvmet_bdev_parse_io_cmd(req);
req               857 drivers/nvme/target/core.c bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req               860 drivers/nvme/target/core.c 	u8 flags = req->cmd->common.flags;
req               863 drivers/nvme/target/core.c 	req->cq = cq;
req               864 drivers/nvme/target/core.c 	req->sq = sq;
req               865 drivers/nvme/target/core.c 	req->ops = ops;
req               866 drivers/nvme/target/core.c 	req->sg = NULL;
req               867 drivers/nvme/target/core.c 	req->sg_cnt = 0;
req               868 drivers/nvme/target/core.c 	req->transfer_len = 0;
req               869 drivers/nvme/target/core.c 	req->cqe->status = 0;
req               870 drivers/nvme/target/core.c 	req->cqe->sq_head = 0;
req               871 drivers/nvme/target/core.c 	req->ns = NULL;
req               872 drivers/nvme/target/core.c 	req->error_loc = NVMET_NO_ERROR_LOC;
req               873 drivers/nvme/target/core.c 	req->error_slba = 0;
req               875 drivers/nvme/target/core.c 	trace_nvmet_req_init(req, req->cmd);
req               879 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, flags);
req               890 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, flags);
req               895 drivers/nvme/target/core.c 	if (unlikely(!req->sq->ctrl))
req               897 drivers/nvme/target/core.c 		status = nvmet_parse_connect_cmd(req);
req               898 drivers/nvme/target/core.c 	else if (likely(req->sq->qid != 0))
req               899 drivers/nvme/target/core.c 		status = nvmet_parse_io_cmd(req);
req               900 drivers/nvme/target/core.c 	else if (nvme_is_fabrics(req->cmd))
req               901 drivers/nvme/target/core.c 		status = nvmet_parse_fabrics_cmd(req);
req               902 drivers/nvme/target/core.c 	else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
req               903 drivers/nvme/target/core.c 		status = nvmet_parse_discovery_cmd(req);
req               905 drivers/nvme/target/core.c 		status = nvmet_parse_admin_cmd(req);
req               921 drivers/nvme/target/core.c 	__nvmet_req_complete(req, status);
req               926 drivers/nvme/target/core.c void nvmet_req_uninit(struct nvmet_req *req)
req               928 drivers/nvme/target/core.c 	percpu_ref_put(&req->sq->ref);
req               929 drivers/nvme/target/core.c 	if (req->ns)
req               930 drivers/nvme/target/core.c 		nvmet_put_namespace(req->ns);
req               934 drivers/nvme/target/core.c void nvmet_req_execute(struct nvmet_req *req)
req               936 drivers/nvme/target/core.c 	if (unlikely(req->data_len != req->transfer_len)) {
req               937 drivers/nvme/target/core.c 		req->error_loc = offsetof(struct nvme_common_command, dptr);
req               938 drivers/nvme/target/core.c 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
req               940 drivers/nvme/target/core.c 		req->execute(req);
req               944 drivers/nvme/target/core.c int nvmet_req_alloc_sgl(struct nvmet_req *req)
req               949 drivers/nvme/target/core.c 		if (req->sq->ctrl && req->ns)
req               950 drivers/nvme/target/core.c 			p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
req               951 drivers/nvme/target/core.c 						    req->ns->nsid);
req               953 drivers/nvme/target/core.c 		req->p2p_dev = NULL;
req               954 drivers/nvme/target/core.c 		if (req->sq->qid && p2p_dev) {
req               955 drivers/nvme/target/core.c 			req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
req               956 drivers/nvme/target/core.c 						       req->transfer_len);
req               957 drivers/nvme/target/core.c 			if (req->sg) {
req               958 drivers/nvme/target/core.c 				req->p2p_dev = p2p_dev;
req               969 drivers/nvme/target/core.c 	req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
req               970 drivers/nvme/target/core.c 	if (!req->sg)
req               977 drivers/nvme/target/core.c void nvmet_req_free_sgl(struct nvmet_req *req)
req               979 drivers/nvme/target/core.c 	if (req->p2p_dev)
req               980 drivers/nvme/target/core.c 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
req               982 drivers/nvme/target/core.c 		sgl_free(req->sg);
req               984 drivers/nvme/target/core.c 	req->sg = NULL;
req               985 drivers/nvme/target/core.c 	req->sg_cnt = 0;
req              1089 drivers/nvme/target/core.c 		struct nvmet_req *req, struct nvmet_ctrl **ret)
req              1095 drivers/nvme/target/core.c 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
req              1099 drivers/nvme/target/core.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
req              1120 drivers/nvme/target/core.c 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
req              1129 drivers/nvme/target/core.c u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
req              1131 drivers/nvme/target/core.c 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
req              1133 drivers/nvme/target/core.c 		       cmd->common.opcode, req->sq->qid);
req              1137 drivers/nvme/target/core.c 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
req              1139 drivers/nvme/target/core.c 		       cmd->common.opcode, req->sq->qid);
req              1169 drivers/nvme/target/core.c 		struct nvmet_req *req)
req              1173 drivers/nvme/target/core.c 	if (!req->p2p_client)
req              1176 drivers/nvme/target/core.c 	ctrl->p2p_client = get_device(req->p2p_client);
req              1207 drivers/nvme/target/core.c 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
req              1215 drivers/nvme/target/core.c 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
req              1219 drivers/nvme/target/core.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
req              1228 drivers/nvme/target/core.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
req              1243 drivers/nvme/target/core.c 	ctrl->port = req->port;
req              1283 drivers/nvme/target/core.c 	ctrl->ops = req->ops;
req              1302 drivers/nvme/target/core.c 	nvmet_setup_p2p_ns_map(ctrl, req);
req               134 drivers/nvme/target/discovery.c static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
req               137 drivers/nvme/target/discovery.c 	if (req->ops->disc_traddr)
req               138 drivers/nvme/target/discovery.c 		req->ops->disc_traddr(req, port, traddr);
req               143 drivers/nvme/target/discovery.c static size_t discovery_log_entries(struct nvmet_req *req)
req               145 drivers/nvme/target/discovery.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               150 drivers/nvme/target/discovery.c 	list_for_each_entry(p, &req->port->subsystems, entry) {
req               155 drivers/nvme/target/discovery.c 	list_for_each_entry(r, &req->port->referrals, entry)
req               160 drivers/nvme/target/discovery.c static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
req               163 drivers/nvme/target/discovery.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               165 drivers/nvme/target/discovery.c 	u64 offset = nvmet_get_log_page_offset(req->cmd);
req               166 drivers/nvme/target/discovery.c 	size_t data_len = nvmet_get_log_page_len(req->cmd);
req               186 drivers/nvme/target/discovery.c 	alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
req               195 drivers/nvme/target/discovery.c 	list_for_each_entry(p, &req->port->subsystems, entry) {
req               201 drivers/nvme/target/discovery.c 		nvmet_set_disc_traddr(req, req->port, traddr);
req               202 drivers/nvme/target/discovery.c 		nvmet_format_discovery_entry(hdr, req->port,
req               208 drivers/nvme/target/discovery.c 	list_for_each_entry(r, &req->port->referrals, entry) {
req               220 drivers/nvme/target/discovery.c 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
req               224 drivers/nvme/target/discovery.c 	status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
req               227 drivers/nvme/target/discovery.c 	nvmet_req_complete(req, status);
req               230 drivers/nvme/target/discovery.c static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
req               232 drivers/nvme/target/discovery.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req               257 drivers/nvme/target/discovery.c 	if (req->port->inline_data_size)
req               264 drivers/nvme/target/discovery.c 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
req               268 drivers/nvme/target/discovery.c 	nvmet_req_complete(req, status);
req               271 drivers/nvme/target/discovery.c static void nvmet_execute_disc_set_features(struct nvmet_req *req)
req               273 drivers/nvme/target/discovery.c 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
req               278 drivers/nvme/target/discovery.c 		stat = nvmet_set_feat_kato(req);
req               281 drivers/nvme/target/discovery.c 		stat = nvmet_set_feat_async_event(req,
req               285 drivers/nvme/target/discovery.c 		req->error_loc =
req               291 drivers/nvme/target/discovery.c 	nvmet_req_complete(req, stat);
req               294 drivers/nvme/target/discovery.c static void nvmet_execute_disc_get_features(struct nvmet_req *req)
req               296 drivers/nvme/target/discovery.c 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
req               301 drivers/nvme/target/discovery.c 		nvmet_get_feat_kato(req);
req               304 drivers/nvme/target/discovery.c 		nvmet_get_feat_async_event(req);
req               307 drivers/nvme/target/discovery.c 		req->error_loc =
req               313 drivers/nvme/target/discovery.c 	nvmet_req_complete(req, stat);
req               316 drivers/nvme/target/discovery.c u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
req               318 drivers/nvme/target/discovery.c 	struct nvme_command *cmd = req->cmd;
req               320 drivers/nvme/target/discovery.c 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
req               323 drivers/nvme/target/discovery.c 		req->error_loc =
req               330 drivers/nvme/target/discovery.c 		req->execute = nvmet_execute_disc_set_features;
req               331 drivers/nvme/target/discovery.c 		req->data_len = 0;
req               334 drivers/nvme/target/discovery.c 		req->execute = nvmet_execute_disc_get_features;
req               335 drivers/nvme/target/discovery.c 		req->data_len = 0;
req               338 drivers/nvme/target/discovery.c 		req->execute = nvmet_execute_async_event;
req               339 drivers/nvme/target/discovery.c 		req->data_len = 0;
req               342 drivers/nvme/target/discovery.c 		req->execute = nvmet_execute_keep_alive;
req               343 drivers/nvme/target/discovery.c 		req->data_len = 0;
req               346 drivers/nvme/target/discovery.c 		req->data_len = nvmet_get_log_page_len(cmd);
req               350 drivers/nvme/target/discovery.c 			req->execute = nvmet_execute_get_disc_log_page;
req               355 drivers/nvme/target/discovery.c 			req->error_loc =
req               360 drivers/nvme/target/discovery.c 		req->data_len = NVME_IDENTIFY_DATA_SIZE;
req               363 drivers/nvme/target/discovery.c 			req->execute =
req               369 drivers/nvme/target/discovery.c 			req->error_loc = offsetof(struct nvme_identify, cns);
req               374 drivers/nvme/target/discovery.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req                10 drivers/nvme/target/fabrics-cmd.c static void nvmet_execute_prop_set(struct nvmet_req *req)
req                12 drivers/nvme/target/fabrics-cmd.c 	u64 val = le64_to_cpu(req->cmd->prop_set.value);
req                15 drivers/nvme/target/fabrics-cmd.c 	if (req->cmd->prop_set.attrib & 1) {
req                16 drivers/nvme/target/fabrics-cmd.c 		req->error_loc =
req                22 drivers/nvme/target/fabrics-cmd.c 	switch (le32_to_cpu(req->cmd->prop_set.offset)) {
req                24 drivers/nvme/target/fabrics-cmd.c 		nvmet_update_cc(req->sq->ctrl, val);
req                27 drivers/nvme/target/fabrics-cmd.c 		req->error_loc =
req                32 drivers/nvme/target/fabrics-cmd.c 	nvmet_req_complete(req, status);
req                35 drivers/nvme/target/fabrics-cmd.c static void nvmet_execute_prop_get(struct nvmet_req *req)
req                37 drivers/nvme/target/fabrics-cmd.c 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
req                41 drivers/nvme/target/fabrics-cmd.c 	if (req->cmd->prop_get.attrib & 1) {
req                42 drivers/nvme/target/fabrics-cmd.c 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
req                51 drivers/nvme/target/fabrics-cmd.c 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
req                67 drivers/nvme/target/fabrics-cmd.c 	if (status && req->cmd->prop_get.attrib & 1) {
req                68 drivers/nvme/target/fabrics-cmd.c 		req->error_loc =
req                71 drivers/nvme/target/fabrics-cmd.c 		req->error_loc =
req                75 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u64 = cpu_to_le64(val);
req                76 drivers/nvme/target/fabrics-cmd.c 	nvmet_req_complete(req, status);
req                79 drivers/nvme/target/fabrics-cmd.c u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
req                81 drivers/nvme/target/fabrics-cmd.c 	struct nvme_command *cmd = req->cmd;
req                85 drivers/nvme/target/fabrics-cmd.c 		req->data_len = 0;
req                86 drivers/nvme/target/fabrics-cmd.c 		req->execute = nvmet_execute_prop_set;
req                89 drivers/nvme/target/fabrics-cmd.c 		req->data_len = 0;
req                90 drivers/nvme/target/fabrics-cmd.c 		req->execute = nvmet_execute_prop_get;
req                95 drivers/nvme/target/fabrics-cmd.c 		req->error_loc = offsetof(struct nvmf_common_command, fctype);
req               102 drivers/nvme/target/fabrics-cmd.c static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
req               104 drivers/nvme/target/fabrics-cmd.c 	struct nvmf_connect_command *c = &req->cmd->connect;
req               110 drivers/nvme/target/fabrics-cmd.c 	old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
req               113 drivers/nvme/target/fabrics-cmd.c 		req->error_loc = offsetof(struct nvmf_connect_command, opcode);
req               118 drivers/nvme/target/fabrics-cmd.c 		req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req               124 drivers/nvme/target/fabrics-cmd.c 	nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
req               125 drivers/nvme/target/fabrics-cmd.c 	nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
req               128 drivers/nvme/target/fabrics-cmd.c 		req->sq->sqhd_disabled = true;
req               129 drivers/nvme/target/fabrics-cmd.c 		req->cqe->sq_head = cpu_to_le16(0xffff);
req               133 drivers/nvme/target/fabrics-cmd.c 		ret = ctrl->ops->install_queue(req->sq);
req               144 drivers/nvme/target/fabrics-cmd.c 	req->sq->ctrl = NULL;
req               148 drivers/nvme/target/fabrics-cmd.c static void nvmet_execute_admin_connect(struct nvmet_req *req)
req               150 drivers/nvme/target/fabrics-cmd.c 	struct nvmf_connect_command *c = &req->cmd->connect;
req               161 drivers/nvme/target/fabrics-cmd.c 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
req               166 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u32 = 0;
req               171 drivers/nvme/target/fabrics-cmd.c 		req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
req               180 drivers/nvme/target/fabrics-cmd.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
req               184 drivers/nvme/target/fabrics-cmd.c 	status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
req               188 drivers/nvme/target/fabrics-cmd.c 			req->error_loc =
req               195 drivers/nvme/target/fabrics-cmd.c 	status = nvmet_install_queue(ctrl, req);
req               203 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
req               208 drivers/nvme/target/fabrics-cmd.c 	nvmet_req_complete(req, status);
req               211 drivers/nvme/target/fabrics-cmd.c static void nvmet_execute_io_connect(struct nvmet_req *req)
req               213 drivers/nvme/target/fabrics-cmd.c 	struct nvmf_connect_command *c = &req->cmd->connect;
req               225 drivers/nvme/target/fabrics-cmd.c 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
req               230 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u32 = 0;
req               241 drivers/nvme/target/fabrics-cmd.c 				     req, &ctrl);
req               248 drivers/nvme/target/fabrics-cmd.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
req               252 drivers/nvme/target/fabrics-cmd.c 	status = nvmet_install_queue(ctrl, req);
req               255 drivers/nvme/target/fabrics-cmd.c 		req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
req               264 drivers/nvme/target/fabrics-cmd.c 	nvmet_req_complete(req, status);
req               272 drivers/nvme/target/fabrics-cmd.c u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
req               274 drivers/nvme/target/fabrics-cmd.c 	struct nvme_command *cmd = req->cmd;
req               279 drivers/nvme/target/fabrics-cmd.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req               285 drivers/nvme/target/fabrics-cmd.c 		req->error_loc = offsetof(struct nvmf_common_command, fctype);
req               289 drivers/nvme/target/fabrics-cmd.c 	req->data_len = sizeof(struct nvmf_connect_data);
req               291 drivers/nvme/target/fabrics-cmd.c 		req->execute = nvmet_execute_admin_connect;
req               293 drivers/nvme/target/fabrics-cmd.c 		req->execute = nvmet_execute_io_connect;
req                76 drivers/nvme/target/fc.c 	struct nvmet_req		req;
req               205 drivers/nvme/target/fc.c 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
req              1723 drivers/nvme/target/fc.c 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
req              1784 drivers/nvme/target/fc.c 		xfr_length = fod->req.transfer_len;
req              1810 drivers/nvme/target/fc.c 	    xfr_length != fod->req.transfer_len ||
req              1880 drivers/nvme/target/fc.c 	u32 remaininglen = fod->req.transfer_len - fod->offset;
req              1926 drivers/nvme/target/fc.c 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
req              1945 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
req              1963 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
req              2001 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
req              2006 drivers/nvme/target/fc.c 		if (fod->offset != fod->req.transfer_len) {
req              2018 drivers/nvme/target/fc.c 		nvmet_req_execute(&fod->req);
req              2041 drivers/nvme/target/fc.c 		if (fod->offset != fod->req.transfer_len) {
req              2186 drivers/nvme/target/fc.c 	fod->req.cmd = &fod->cmdiubuf.sqe;
req              2187 drivers/nvme/target/fc.c 	fod->req.cqe = &fod->rspiubuf.cqe;
req              2188 drivers/nvme/target/fc.c 	fod->req.port = tgtport->pe->port;
req              2196 drivers/nvme/target/fc.c 	ret = nvmet_req_init(&fod->req,
req              2206 drivers/nvme/target/fc.c 	fod->req.transfer_len = xfrlen;
req              2211 drivers/nvme/target/fc.c 	if (fod->req.transfer_len) {
req              2214 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, ret);
req              2218 drivers/nvme/target/fc.c 	fod->req.sg = fod->data_sg;
req              2219 drivers/nvme/target/fc.c 	fod->req.sg_cnt = fod->data_sg_cnt;
req              2234 drivers/nvme/target/fc.c 	nvmet_req_execute(&fod->req);
req                78 drivers/nvme/target/io-cmd-bdev.c static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
req                93 drivers/nvme/target/io-cmd-bdev.c 		req->error_loc = offsetof(struct nvme_rw_command, length);
req                97 drivers/nvme/target/io-cmd-bdev.c 		req->error_loc = offsetof(struct nvme_rw_command, slba);
req               100 drivers/nvme/target/io-cmd-bdev.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req               101 drivers/nvme/target/io-cmd-bdev.c 		switch (req->cmd->common.opcode) {
req               112 drivers/nvme/target/io-cmd-bdev.c 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
req               118 drivers/nvme/target/io-cmd-bdev.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req               121 drivers/nvme/target/io-cmd-bdev.c 	switch (req->cmd->common.opcode) {
req               124 drivers/nvme/target/io-cmd-bdev.c 		req->error_slba = le64_to_cpu(req->cmd->rw.slba);
req               127 drivers/nvme/target/io-cmd-bdev.c 		req->error_slba =
req               128 drivers/nvme/target/io-cmd-bdev.c 			le64_to_cpu(req->cmd->write_zeroes.slba);
req               131 drivers/nvme/target/io-cmd-bdev.c 		req->error_slba = 0;
req               138 drivers/nvme/target/io-cmd-bdev.c 	struct nvmet_req *req = bio->bi_private;
req               140 drivers/nvme/target/io-cmd-bdev.c 	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
req               141 drivers/nvme/target/io-cmd-bdev.c 	if (bio != &req->b.inline_bio)
req               145 drivers/nvme/target/io-cmd-bdev.c static void nvmet_bdev_execute_rw(struct nvmet_req *req)
req               147 drivers/nvme/target/io-cmd-bdev.c 	int sg_cnt = req->sg_cnt;
req               153 drivers/nvme/target/io-cmd-bdev.c 	if (!req->sg_cnt) {
req               154 drivers/nvme/target/io-cmd-bdev.c 		nvmet_req_complete(req, 0);
req               158 drivers/nvme/target/io-cmd-bdev.c 	if (req->cmd->rw.opcode == nvme_cmd_write) {
req               161 drivers/nvme/target/io-cmd-bdev.c 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
req               167 drivers/nvme/target/io-cmd-bdev.c 	if (is_pci_p2pdma_page(sg_page(req->sg)))
req               170 drivers/nvme/target/io-cmd-bdev.c 	sector = le64_to_cpu(req->cmd->rw.slba);
req               171 drivers/nvme/target/io-cmd-bdev.c 	sector <<= (req->ns->blksize_shift - 9);
req               173 drivers/nvme/target/io-cmd-bdev.c 	if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
req               174 drivers/nvme/target/io-cmd-bdev.c 		bio = &req->b.inline_bio;
req               175 drivers/nvme/target/io-cmd-bdev.c 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
req               179 drivers/nvme/target/io-cmd-bdev.c 	bio_set_dev(bio, req->ns->bdev);
req               181 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_private = req;
req               185 drivers/nvme/target/io-cmd-bdev.c 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
req               191 drivers/nvme/target/io-cmd-bdev.c 			bio_set_dev(bio, req->ns->bdev);
req               206 drivers/nvme/target/io-cmd-bdev.c static void nvmet_bdev_execute_flush(struct nvmet_req *req)
req               208 drivers/nvme/target/io-cmd-bdev.c 	struct bio *bio = &req->b.inline_bio;
req               210 drivers/nvme/target/io-cmd-bdev.c 	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
req               211 drivers/nvme/target/io-cmd-bdev.c 	bio_set_dev(bio, req->ns->bdev);
req               212 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_private = req;
req               219 drivers/nvme/target/io-cmd-bdev.c u16 nvmet_bdev_flush(struct nvmet_req *req)
req               221 drivers/nvme/target/io-cmd-bdev.c 	if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
req               226 drivers/nvme/target/io-cmd-bdev.c static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
req               229 drivers/nvme/target/io-cmd-bdev.c 	struct nvmet_ns *ns = req->ns;
req               237 drivers/nvme/target/io-cmd-bdev.c 		req->error_slba = le64_to_cpu(range->slba);
req               238 drivers/nvme/target/io-cmd-bdev.c 		return errno_to_nvme_status(req, ret);
req               243 drivers/nvme/target/io-cmd-bdev.c static void nvmet_bdev_execute_discard(struct nvmet_req *req)
req               250 drivers/nvme/target/io-cmd-bdev.c 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
req               251 drivers/nvme/target/io-cmd-bdev.c 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
req               256 drivers/nvme/target/io-cmd-bdev.c 		status = nvmet_bdev_discard_range(req, &range, &bio);
req               262 drivers/nvme/target/io-cmd-bdev.c 		bio->bi_private = req;
req               271 drivers/nvme/target/io-cmd-bdev.c 		nvmet_req_complete(req, status);
req               275 drivers/nvme/target/io-cmd-bdev.c static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
req               277 drivers/nvme/target/io-cmd-bdev.c 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
req               279 drivers/nvme/target/io-cmd-bdev.c 		nvmet_bdev_execute_discard(req);
req               285 drivers/nvme/target/io-cmd-bdev.c 		nvmet_req_complete(req, 0);
req               290 drivers/nvme/target/io-cmd-bdev.c static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
req               292 drivers/nvme/target/io-cmd-bdev.c 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
req               299 drivers/nvme/target/io-cmd-bdev.c 		(req->ns->blksize_shift - 9);
req               301 drivers/nvme/target/io-cmd-bdev.c 		(req->ns->blksize_shift - 9));
req               303 drivers/nvme/target/io-cmd-bdev.c 	ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
req               306 drivers/nvme/target/io-cmd-bdev.c 		bio->bi_private = req;
req               310 drivers/nvme/target/io-cmd-bdev.c 		nvmet_req_complete(req, errno_to_nvme_status(req, ret));
req               314 drivers/nvme/target/io-cmd-bdev.c u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req               316 drivers/nvme/target/io-cmd-bdev.c 	struct nvme_command *cmd = req->cmd;
req               321 drivers/nvme/target/io-cmd-bdev.c 		req->execute = nvmet_bdev_execute_rw;
req               322 drivers/nvme/target/io-cmd-bdev.c 		req->data_len = nvmet_rw_len(req);
req               325 drivers/nvme/target/io-cmd-bdev.c 		req->execute = nvmet_bdev_execute_flush;
req               326 drivers/nvme/target/io-cmd-bdev.c 		req->data_len = 0;
req               329 drivers/nvme/target/io-cmd-bdev.c 		req->execute = nvmet_bdev_execute_dsm;
req               330 drivers/nvme/target/io-cmd-bdev.c 		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
req               334 drivers/nvme/target/io-cmd-bdev.c 		req->execute = nvmet_bdev_execute_write_zeroes;
req               335 drivers/nvme/target/io-cmd-bdev.c 		req->data_len = 0;
req               339 drivers/nvme/target/io-cmd-bdev.c 		       req->sq->qid);
req               340 drivers/nvme/target/io-cmd-bdev.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req                90 drivers/nvme/target/io-cmd-file.c static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
req                93 drivers/nvme/target/io-cmd-file.c 	struct kiocb *iocb = &req->f.iocb;
req                98 drivers/nvme/target/io-cmd-file.c 	if (req->cmd->rw.opcode == nvme_cmd_write) {
req                99 drivers/nvme/target/io-cmd-file.c 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
req               101 drivers/nvme/target/io-cmd-file.c 		call_iter = req->ns->file->f_op->write_iter;
req               104 drivers/nvme/target/io-cmd-file.c 		call_iter = req->ns->file->f_op->read_iter;
req               108 drivers/nvme/target/io-cmd-file.c 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
req               111 drivers/nvme/target/io-cmd-file.c 	iocb->ki_filp = req->ns->file;
req               112 drivers/nvme/target/io-cmd-file.c 	iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
req               119 drivers/nvme/target/io-cmd-file.c 	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
req               122 drivers/nvme/target/io-cmd-file.c 	if (req->f.bvec != req->inline_bvec) {
req               123 drivers/nvme/target/io-cmd-file.c 		if (likely(req->f.mpool_alloc == false))
req               124 drivers/nvme/target/io-cmd-file.c 			kfree(req->f.bvec);
req               126 drivers/nvme/target/io-cmd-file.c 			mempool_free(req->f.bvec, req->ns->bvec_pool);
req               129 drivers/nvme/target/io-cmd-file.c 	if (unlikely(ret != req->data_len))
req               130 drivers/nvme/target/io-cmd-file.c 		status = errno_to_nvme_status(req, ret);
req               131 drivers/nvme/target/io-cmd-file.c 	nvmet_req_complete(req, status);
req               134 drivers/nvme/target/io-cmd-file.c static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
req               136 drivers/nvme/target/io-cmd-file.c 	ssize_t nr_bvec = req->sg_cnt;
req               145 drivers/nvme/target/io-cmd-file.c 	if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
req               148 drivers/nvme/target/io-cmd-file.c 	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
req               149 drivers/nvme/target/io-cmd-file.c 	if (unlikely(pos + req->data_len > req->ns->size)) {
req               150 drivers/nvme/target/io-cmd-file.c 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
req               154 drivers/nvme/target/io-cmd-file.c 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
req               155 drivers/nvme/target/io-cmd-file.c 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
req               156 drivers/nvme/target/io-cmd-file.c 		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
req               157 drivers/nvme/target/io-cmd-file.c 		len += req->f.bvec[bv_cnt].bv_len;
req               158 drivers/nvme/target/io-cmd-file.c 		total_len += req->f.bvec[bv_cnt].bv_len;
req               165 drivers/nvme/target/io-cmd-file.c 			ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
req               176 drivers/nvme/target/io-cmd-file.c 	if (WARN_ON_ONCE(total_len != req->data_len)) {
req               191 drivers/nvme/target/io-cmd-file.c 		req->f.iocb.ki_complete = nvmet_file_io_done;
req               193 drivers/nvme/target/io-cmd-file.c 	ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
req               214 drivers/nvme/target/io-cmd-file.c 	nvmet_file_io_done(&req->f.iocb, ret, 0);
req               220 drivers/nvme/target/io-cmd-file.c 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
req               222 drivers/nvme/target/io-cmd-file.c 	nvmet_file_execute_io(req, 0);
req               225 drivers/nvme/target/io-cmd-file.c static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
req               227 drivers/nvme/target/io-cmd-file.c 	INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
req               228 drivers/nvme/target/io-cmd-file.c 	queue_work(buffered_io_wq, &req->f.work);
req               231 drivers/nvme/target/io-cmd-file.c static void nvmet_file_execute_rw(struct nvmet_req *req)
req               233 drivers/nvme/target/io-cmd-file.c 	ssize_t nr_bvec = req->sg_cnt;
req               235 drivers/nvme/target/io-cmd-file.c 	if (!req->sg_cnt || !nr_bvec) {
req               236 drivers/nvme/target/io-cmd-file.c 		nvmet_req_complete(req, 0);
req               241 drivers/nvme/target/io-cmd-file.c 		req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
req               244 drivers/nvme/target/io-cmd-file.c 		req->f.bvec = req->inline_bvec;
req               246 drivers/nvme/target/io-cmd-file.c 	if (unlikely(!req->f.bvec)) {
req               248 drivers/nvme/target/io-cmd-file.c 		req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
req               249 drivers/nvme/target/io-cmd-file.c 		req->f.mpool_alloc = true;
req               251 drivers/nvme/target/io-cmd-file.c 		req->f.mpool_alloc = false;
req               253 drivers/nvme/target/io-cmd-file.c 	if (req->ns->buffered_io) {
req               254 drivers/nvme/target/io-cmd-file.c 		if (likely(!req->f.mpool_alloc) &&
req               255 drivers/nvme/target/io-cmd-file.c 				nvmet_file_execute_io(req, IOCB_NOWAIT))
req               257 drivers/nvme/target/io-cmd-file.c 		nvmet_file_submit_buffered_io(req);
req               259 drivers/nvme/target/io-cmd-file.c 		nvmet_file_execute_io(req, 0);
req               262 drivers/nvme/target/io-cmd-file.c u16 nvmet_file_flush(struct nvmet_req *req)
req               264 drivers/nvme/target/io-cmd-file.c 	return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
req               269 drivers/nvme/target/io-cmd-file.c 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
req               271 drivers/nvme/target/io-cmd-file.c 	nvmet_req_complete(req, nvmet_file_flush(req));
req               274 drivers/nvme/target/io-cmd-file.c static void nvmet_file_execute_flush(struct nvmet_req *req)
req               276 drivers/nvme/target/io-cmd-file.c 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
req               277 drivers/nvme/target/io-cmd-file.c 	schedule_work(&req->f.work);
req               280 drivers/nvme/target/io-cmd-file.c static void nvmet_file_execute_discard(struct nvmet_req *req)
req               289 drivers/nvme/target/io-cmd-file.c 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
req               290 drivers/nvme/target/io-cmd-file.c 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
req               295 drivers/nvme/target/io-cmd-file.c 		offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
req               297 drivers/nvme/target/io-cmd-file.c 		len <<= req->ns->blksize_shift;
req               298 drivers/nvme/target/io-cmd-file.c 		if (offset + len > req->ns->size) {
req               299 drivers/nvme/target/io-cmd-file.c 			req->error_slba = le64_to_cpu(range.slba);
req               300 drivers/nvme/target/io-cmd-file.c 			status = errno_to_nvme_status(req, -ENOSPC);
req               304 drivers/nvme/target/io-cmd-file.c 		ret = vfs_fallocate(req->ns->file, mode, offset, len);
req               306 drivers/nvme/target/io-cmd-file.c 			req->error_slba = le64_to_cpu(range.slba);
req               307 drivers/nvme/target/io-cmd-file.c 			status = errno_to_nvme_status(req, ret);
req               312 drivers/nvme/target/io-cmd-file.c 	nvmet_req_complete(req, status);
req               317 drivers/nvme/target/io-cmd-file.c 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
req               319 drivers/nvme/target/io-cmd-file.c 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
req               321 drivers/nvme/target/io-cmd-file.c 		nvmet_file_execute_discard(req);
req               327 drivers/nvme/target/io-cmd-file.c 		nvmet_req_complete(req, 0);
req               332 drivers/nvme/target/io-cmd-file.c static void nvmet_file_execute_dsm(struct nvmet_req *req)
req               334 drivers/nvme/target/io-cmd-file.c 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
req               335 drivers/nvme/target/io-cmd-file.c 	schedule_work(&req->f.work);
req               340 drivers/nvme/target/io-cmd-file.c 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
req               341 drivers/nvme/target/io-cmd-file.c 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
req               347 drivers/nvme/target/io-cmd-file.c 	offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
req               349 drivers/nvme/target/io-cmd-file.c 			req->ns->blksize_shift);
req               351 drivers/nvme/target/io-cmd-file.c 	if (unlikely(offset + len > req->ns->size)) {
req               352 drivers/nvme/target/io-cmd-file.c 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
req               356 drivers/nvme/target/io-cmd-file.c 	ret = vfs_fallocate(req->ns->file, mode, offset, len);
req               357 drivers/nvme/target/io-cmd-file.c 	nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
req               360 drivers/nvme/target/io-cmd-file.c static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
req               362 drivers/nvme/target/io-cmd-file.c 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
req               363 drivers/nvme/target/io-cmd-file.c 	schedule_work(&req->f.work);
req               366 drivers/nvme/target/io-cmd-file.c u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
req               368 drivers/nvme/target/io-cmd-file.c 	struct nvme_command *cmd = req->cmd;
req               373 drivers/nvme/target/io-cmd-file.c 		req->execute = nvmet_file_execute_rw;
req               374 drivers/nvme/target/io-cmd-file.c 		req->data_len = nvmet_rw_len(req);
req               377 drivers/nvme/target/io-cmd-file.c 		req->execute = nvmet_file_execute_flush;
req               378 drivers/nvme/target/io-cmd-file.c 		req->data_len = 0;
req               381 drivers/nvme/target/io-cmd-file.c 		req->execute = nvmet_file_execute_dsm;
req               382 drivers/nvme/target/io-cmd-file.c 		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
req               386 drivers/nvme/target/io-cmd-file.c 		req->execute = nvmet_file_execute_write_zeroes;
req               387 drivers/nvme/target/io-cmd-file.c 		req->data_len = 0;
req               391 drivers/nvme/target/io-cmd-file.c 				cmd->common.opcode, req->sq->qid);
req               392 drivers/nvme/target/io-cmd-file.c 		req->error_loc = offsetof(struct nvme_common_command, opcode);
req                22 drivers/nvme/target/loop.c 	struct nvmet_req	req;
req                75 drivers/nvme/target/loop.c static void nvme_loop_complete_rq(struct request *req)
req                77 drivers/nvme/target/loop.c 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
req                79 drivers/nvme/target/loop.c 	nvme_cleanup_cmd(req);
req                81 drivers/nvme/target/loop.c 	nvme_complete_rq(req);
req                93 drivers/nvme/target/loop.c static void nvme_loop_queue_response(struct nvmet_req *req)
req                96 drivers/nvme/target/loop.c 		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
req                97 drivers/nvme/target/loop.c 	struct nvme_completion *cqe = req->cqe;
req               129 drivers/nvme/target/loop.c 	nvmet_req_execute(&iod->req);
req               137 drivers/nvme/target/loop.c 	struct request *req = bd->rq;
req               138 drivers/nvme/target/loop.c 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
req               142 drivers/nvme/target/loop.c 	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
req               143 drivers/nvme/target/loop.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
req               145 drivers/nvme/target/loop.c 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
req               149 drivers/nvme/target/loop.c 	blk_mq_start_request(req);
req               151 drivers/nvme/target/loop.c 	iod->req.port = queue->ctrl->port;
req               152 drivers/nvme/target/loop.c 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
req               156 drivers/nvme/target/loop.c 	if (blk_rq_nr_phys_segments(req)) {
req               159 drivers/nvme/target/loop.c 				blk_rq_nr_phys_segments(req),
req               161 drivers/nvme/target/loop.c 			nvme_cleanup_cmd(req);
req               165 drivers/nvme/target/loop.c 		iod->req.sg = iod->sg_table.sgl;
req               166 drivers/nvme/target/loop.c 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
req               167 drivers/nvme/target/loop.c 		iod->req.transfer_len = blk_rq_payload_bytes(req);
req               185 drivers/nvme/target/loop.c 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
req               197 drivers/nvme/target/loop.c 	iod->req.cmd = &iod->cmd;
req               198 drivers/nvme/target/loop.c 	iod->req.cqe = &iod->cqe;
req               205 drivers/nvme/target/loop.c 		struct request *req, unsigned int hctx_idx,
req               210 drivers/nvme/target/loop.c 	nvme_req(req)->ctrl = &ctrl->ctrl;
req               211 drivers/nvme/target/loop.c 	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
req               274 drivers/nvme/target/nvmet.h 	void (*queue_response)(struct nvmet_req *req);
req               278 drivers/nvme/target/nvmet.h 	void (*disc_traddr)(struct nvmet_req *req,
req               314 drivers/nvme/target/nvmet.h 	void (*execute)(struct nvmet_req *req);
req               325 drivers/nvme/target/nvmet.h static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
req               327 drivers/nvme/target/nvmet.h 	req->cqe->result.u32 = cpu_to_le32(result);
req               334 drivers/nvme/target/nvmet.h nvmet_data_dir(struct nvmet_req *req)
req               336 drivers/nvme/target/nvmet.h 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
req               346 drivers/nvme/target/nvmet.h static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
req               348 drivers/nvme/target/nvmet.h 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
req               351 drivers/nvme/target/nvmet.h 		clear_bit(bn, &req->sq->ctrl->aen_masked);
req               361 drivers/nvme/target/nvmet.h void nvmet_get_feat_kato(struct nvmet_req *req);
req               362 drivers/nvme/target/nvmet.h void nvmet_get_feat_async_event(struct nvmet_req *req);
req               363 drivers/nvme/target/nvmet.h u16 nvmet_set_feat_kato(struct nvmet_req *req);
req               364 drivers/nvme/target/nvmet.h u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
req               365 drivers/nvme/target/nvmet.h void nvmet_execute_async_event(struct nvmet_req *req);
req               367 drivers/nvme/target/nvmet.h u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
req               369 drivers/nvme/target/nvmet.h u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
req               370 drivers/nvme/target/nvmet.h u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
req               371 drivers/nvme/target/nvmet.h u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
req               372 drivers/nvme/target/nvmet.h u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
req               373 drivers/nvme/target/nvmet.h u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
req               375 drivers/nvme/target/nvmet.h bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req               377 drivers/nvme/target/nvmet.h void nvmet_req_uninit(struct nvmet_req *req);
req               378 drivers/nvme/target/nvmet.h void nvmet_req_execute(struct nvmet_req *req);
req               379 drivers/nvme/target/nvmet.h void nvmet_req_complete(struct nvmet_req *req, u16 status);
req               380 drivers/nvme/target/nvmet.h int nvmet_req_alloc_sgl(struct nvmet_req *req);
req               381 drivers/nvme/target/nvmet.h void nvmet_req_free_sgl(struct nvmet_req *req);
req               383 drivers/nvme/target/nvmet.h void nvmet_execute_keep_alive(struct nvmet_req *req);
req               396 drivers/nvme/target/nvmet.h 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
req               398 drivers/nvme/target/nvmet.h 		struct nvmet_req *req, struct nvmet_ctrl **ret);
req               400 drivers/nvme/target/nvmet.h u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
req               430 drivers/nvme/target/nvmet.h u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
req               432 drivers/nvme/target/nvmet.h u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
req               434 drivers/nvme/target/nvmet.h u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
req               488 drivers/nvme/target/nvmet.h u16 nvmet_bdev_flush(struct nvmet_req *req);
req               489 drivers/nvme/target/nvmet.h u16 nvmet_file_flush(struct nvmet_req *req);
req               492 drivers/nvme/target/nvmet.h static inline u32 nvmet_rw_len(struct nvmet_req *req)
req               494 drivers/nvme/target/nvmet.h 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
req               495 drivers/nvme/target/nvmet.h 			req->ns->blksize_shift;
req               498 drivers/nvme/target/nvmet.h u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
req                59 drivers/nvme/target/rdma.c 	struct nvmet_req	req;
req               154 drivers/nvme/target/rdma.c 	return nvme_is_write(rsp->req.cmd) &&
req               155 drivers/nvme/target/rdma.c 		rsp->req.transfer_len &&
req               161 drivers/nvme/target/rdma.c 	return !nvme_is_write(rsp->req.cmd) &&
req               162 drivers/nvme/target/rdma.c 		rsp->req.transfer_len &&
req               163 drivers/nvme/target/rdma.c 		!rsp->req.cqe->status &&
req               367 drivers/nvme/target/rdma.c 	r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
req               368 drivers/nvme/target/rdma.c 	if (!r->req.cqe)
req               371 drivers/nvme/target/rdma.c 	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
req               372 drivers/nvme/target/rdma.c 			sizeof(*r->req.cqe), DMA_TO_DEVICE);
req               376 drivers/nvme/target/rdma.c 	r->req.p2p_client = &ndev->device->dev;
req               377 drivers/nvme/target/rdma.c 	r->send_sge.length = sizeof(*r->req.cqe);
req               392 drivers/nvme/target/rdma.c 	kfree(r->req.cqe);
req               401 drivers/nvme/target/rdma.c 				sizeof(*r->req.cqe), DMA_TO_DEVICE);
req               402 drivers/nvme/target/rdma.c 	kfree(r->req.cqe);
req               507 drivers/nvme/target/rdma.c 				queue->cm_id->port_num, rsp->req.sg,
req               508 drivers/nvme/target/rdma.c 				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
req               511 drivers/nvme/target/rdma.c 	if (rsp->req.sg != rsp->cmd->inline_sg)
req               512 drivers/nvme/target/rdma.c 		nvmet_req_free_sgl(&rsp->req);
req               550 drivers/nvme/target/rdma.c static void nvmet_rdma_queue_response(struct nvmet_req *req)
req               553 drivers/nvme/target/rdma.c 		container_of(req, struct nvmet_rdma_rsp, req);
req               591 drivers/nvme/target/rdma.c 			queue->cm_id->port_num, rsp->req.sg,
req               592 drivers/nvme/target/rdma.c 			rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
req               596 drivers/nvme/target/rdma.c 		nvmet_req_uninit(&rsp->req);
req               606 drivers/nvme/target/rdma.c 	nvmet_req_execute(&rsp->req);
req               629 drivers/nvme/target/rdma.c 	rsp->req.sg = rsp->cmd->inline_sg;
req               630 drivers/nvme/target/rdma.c 	rsp->req.sg_cnt = sg_count;
req               635 drivers/nvme/target/rdma.c 	struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
req               639 drivers/nvme/target/rdma.c 	if (!nvme_is_write(rsp->req.cmd)) {
req               640 drivers/nvme/target/rdma.c 		rsp->req.error_loc =
req               656 drivers/nvme/target/rdma.c 	rsp->req.transfer_len += len;
req               668 drivers/nvme/target/rdma.c 	rsp->req.transfer_len = get_unaligned_le24(sgl->length);
req               671 drivers/nvme/target/rdma.c 	if (!rsp->req.transfer_len)
req               674 drivers/nvme/target/rdma.c 	ret = nvmet_req_alloc_sgl(&rsp->req);
req               679 drivers/nvme/target/rdma.c 			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
req               680 drivers/nvme/target/rdma.c 			nvmet_data_dir(&rsp->req));
req               693 drivers/nvme/target/rdma.c 	rsp->req.transfer_len = 0;
req               699 drivers/nvme/target/rdma.c 	struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
req               708 drivers/nvme/target/rdma.c 			rsp->req.error_loc =
req               720 drivers/nvme/target/rdma.c 			rsp->req.error_loc =
req               726 drivers/nvme/target/rdma.c 		rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
req               747 drivers/nvme/target/rdma.c 			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
req               749 drivers/nvme/target/rdma.c 		nvmet_req_execute(&rsp->req);
req               767 drivers/nvme/target/rdma.c 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
req               784 drivers/nvme/target/rdma.c 	nvmet_req_complete(&cmd->req, status);
req               824 drivers/nvme/target/rdma.c 	rsp->req.cmd = cmd->nvme_cmd;
req               825 drivers/nvme/target/rdma.c 	rsp->req.port = queue->port;
req              1096 drivers/nvme/target/rdma.c 	struct nvme_rdma_cm_req *req;
req              1098 drivers/nvme/target/rdma.c 	req = (struct nvme_rdma_cm_req *)conn->private_data;
req              1099 drivers/nvme/target/rdma.c 	if (!req || conn->private_data_len == 0)
req              1102 drivers/nvme/target/rdma.c 	if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
req              1105 drivers/nvme/target/rdma.c 	queue->host_qid = le16_to_cpu(req->qid);
req              1111 drivers/nvme/target/rdma.c 	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
req              1112 drivers/nvme/target/rdma.c 	queue->send_queue_size = le16_to_cpu(req->hrqsize);
req              1588 drivers/nvme/target/rdma.c static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
req              1595 drivers/nvme/target/rdma.c 			container_of(req, struct nvmet_rdma_rsp, req);
req                47 drivers/nvme/target/tcp.c 	struct nvmet_req		req;
req               158 drivers/nvme/target/tcp.c 	return nvme_is_write(cmd->req.cmd) &&
req               159 drivers/nvme/target/tcp.c 		cmd->rbytes_done < cmd->req.transfer_len;
req               164 drivers/nvme/target/tcp.c 	return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
req               169 drivers/nvme/target/tcp.c 	return !nvme_is_write(cmd->req.cmd) &&
req               170 drivers/nvme/target/tcp.c 		cmd->req.transfer_len > 0 &&
req               171 drivers/nvme/target/tcp.c 		!cmd->req.cqe->status;
req               176 drivers/nvme/target/tcp.c 	return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
req               275 drivers/nvme/target/tcp.c 	sg = &cmd->req.sg[cmd->sg_idx];
req               292 drivers/nvme/target/tcp.c 	sg = &cmd->req.sg[cmd->sg_idx];
req               320 drivers/nvme/target/tcp.c 	struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
req               323 drivers/nvme/target/tcp.c 	if (!cmd->req.data_len)
req               328 drivers/nvme/target/tcp.c 		if (!nvme_is_write(cmd->req.cmd))
req               331 drivers/nvme/target/tcp.c 		if (len > cmd->req.port->inline_data_size)
req               335 drivers/nvme/target/tcp.c 	cmd->req.transfer_len += len;
req               337 drivers/nvme/target/tcp.c 	cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
req               338 drivers/nvme/target/tcp.c 	if (!cmd->req.sg)
req               340 drivers/nvme/target/tcp.c 	cmd->cur_sg = cmd->req.sg;
req               343 drivers/nvme/target/tcp.c 		cmd->iov = kmalloc_array(cmd->req.sg_cnt,
req               351 drivers/nvme/target/tcp.c 	sgl_free(cmd->req.sg);
req               358 drivers/nvme/target/tcp.c 	ahash_request_set_crypt(hash, cmd->req.sg,
req               359 drivers/nvme/target/tcp.c 		(void *)&cmd->exp_ddgst, cmd->req.transfer_len);
req               380 drivers/nvme/target/tcp.c 				cmd->req.transfer_len + ddgst);
req               381 drivers/nvme/target/tcp.c 	pdu->command_id = cmd->req.cqe->command_id;
req               382 drivers/nvme/target/tcp.c 	pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
req               411 drivers/nvme/target/tcp.c 	pdu->command_id = cmd->req.cmd->common.command_id;
req               413 drivers/nvme/target/tcp.c 	pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
req               485 drivers/nvme/target/tcp.c static void nvmet_tcp_queue_response(struct nvmet_req *req)
req               488 drivers/nvme/target/tcp.c 		container_of(req, struct nvmet_tcp_cmd, req);
req               529 drivers/nvme/target/tcp.c 		    cmd->wbytes_done + left < cmd->req.transfer_len ||
req               562 drivers/nvme/target/tcp.c 		sgl_free(cmd->req.sg);
req               593 drivers/nvme/target/tcp.c 	sgl_free(cmd->req.sg);
req               820 drivers/nvme/target/tcp.c 		struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
req               825 drivers/nvme/target/tcp.c 	req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
req               827 drivers/nvme/target/tcp.c 	if (!nvme_is_write(cmd->req.cmd) ||
req               828 drivers/nvme/target/tcp.c 	    req->data_len > cmd->req.port->inline_data_size) {
req               857 drivers/nvme/target/tcp.c 		nvmet_req_complete(&cmd->req,
req               875 drivers/nvme/target/tcp.c 	struct nvmet_req *req;
req               905 drivers/nvme/target/tcp.c 	req = &queue->cmd->req;
req               906 drivers/nvme/target/tcp.c 	memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
req               908 drivers/nvme/target/tcp.c 	if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
req               911 drivers/nvme/target/tcp.c 			req->cmd, req->cmd->common.command_id,
req               912 drivers/nvme/target/tcp.c 			req->cmd->common.opcode,
req               913 drivers/nvme/target/tcp.c 			le32_to_cpu(req->cmd->common.dptr.sgl.length));
req               915 drivers/nvme/target/tcp.c 		nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
req               925 drivers/nvme/target/tcp.c 			nvmet_req_complete(req, ret);
req               937 drivers/nvme/target/tcp.c 		nvmet_tcp_queue_response(&queue->cmd->req);
req               941 drivers/nvme/target/tcp.c 	nvmet_req_execute(&queue->cmd->req);
req              1056 drivers/nvme/target/tcp.c 	    cmd->rbytes_done == cmd->req.transfer_len) {
req              1061 drivers/nvme/target/tcp.c 		nvmet_req_execute(&cmd->req);
req              1090 drivers/nvme/target/tcp.c 			queue->idx, cmd->req.cmd->common.command_id,
req              1100 drivers/nvme/target/tcp.c 	    cmd->rbytes_done == cmd->req.transfer_len)
req              1101 drivers/nvme/target/tcp.c 		nvmet_req_execute(&cmd->req);
req              1215 drivers/nvme/target/tcp.c 	c->req.port = queue->port->nport;
req              1221 drivers/nvme/target/tcp.c 	c->req.cmd = &c->cmd_pdu->cmd;
req              1227 drivers/nvme/target/tcp.c 	c->req.cqe = &c->rsp_pdu->cqe;
req              1313 drivers/nvme/target/tcp.c 	nvmet_req_uninit(&cmd->req);
req              1316 drivers/nvme/target/tcp.c 	sgl_free(cmd->req.sg);
req              1700 drivers/nvme/target/tcp.c static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
req              1707 drivers/nvme/target/tcp.c 			container_of(req, struct nvmet_tcp_cmd, req);
req                44 drivers/nvme/target/trace.h static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
req                46 drivers/nvme/target/trace.h 	return req->sq->ctrl;
req                49 drivers/nvme/target/trace.h static inline void __assign_disk_name(char *name, struct nvmet_req *req,
req                52 drivers/nvme/target/trace.h 	struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
req                55 drivers/nvme/target/trace.h 	if ((init && req->sq->qid) || (!init && req->cq->qid)) {
req                56 drivers/nvme/target/trace.h 		ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
req                66 drivers/nvme/target/trace.h 	TP_PROTO(struct nvmet_req *req, struct nvme_command *cmd),
req                67 drivers/nvme/target/trace.h 	TP_ARGS(req, cmd),
req                83 drivers/nvme/target/trace.h 		__entry->ctrl = nvmet_req_to_ctrl(req);
req                84 drivers/nvme/target/trace.h 		__assign_disk_name(__entry->disk, req, true);
req                85 drivers/nvme/target/trace.h 		__entry->qid = req->sq->qid;
req               108 drivers/nvme/target/trace.h 	TP_PROTO(struct nvmet_req *req),
req               109 drivers/nvme/target/trace.h 	TP_ARGS(req),
req               119 drivers/nvme/target/trace.h 		__entry->ctrl = nvmet_req_to_ctrl(req);
req               120 drivers/nvme/target/trace.h 		__entry->qid = req->cq->qid;
req               121 drivers/nvme/target/trace.h 		__entry->cid = req->cqe->command_id;
req               122 drivers/nvme/target/trace.h 		__entry->result = le64_to_cpu(req->cqe->result.u64);
req               123 drivers/nvme/target/trace.h 		__entry->status = le16_to_cpu(req->cqe->status) >> 1;
req               124 drivers/nvme/target/trace.h 		__assign_disk_name(__entry->disk, req, false);
req               999 drivers/pci/controller/dwc/pcie-tegra194.c 	struct mrq_uphy_request req;
req              1005 drivers/pci/controller/dwc/pcie-tegra194.c 	memset(&req, 0, sizeof(req));
req              1008 drivers/pci/controller/dwc/pcie-tegra194.c 	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
req              1009 drivers/pci/controller/dwc/pcie-tegra194.c 	req.controller_state.pcie_controller = pcie->cid;
req              1010 drivers/pci/controller/dwc/pcie-tegra194.c 	req.controller_state.enable = enable;
req              1014 drivers/pci/controller/dwc/pcie-tegra194.c 	msg.tx.data = &req;
req              1015 drivers/pci/controller/dwc/pcie-tegra194.c 	msg.tx.size = sizeof(req);
req               460 drivers/pcmcia/cistpl.c 		cisdata_t req = tuple->DesiredTuple;
req               468 drivers/pcmcia/cistpl.c 		tuple->DesiredTuple = req;
req               132 drivers/perf/fsl_imx8_ddr_perf.c 	IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
req               134 drivers/perf/fsl_imx8_ddr_perf.c 	IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
req               409 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall,	0x12),
req               410 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall,	0x13),
req               436 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all,		0x01),
req               437 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd,		0x02),
req               438 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr,		0x03),
req               439 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-all-cp-req,			0x04),
req               440 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-cp-blk-req,			0x05),
req               441 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req,			0x06),
req               442 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-cp-rd-req,			0x07),
req               443 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-cp-wr-req,			0x08),
req               444 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(ba-all-req,			0x09),
req               445 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(ba-rd-req,				0x0a),
req               446 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(ba-wr-req,				0x0b),
req               447 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued,		0x10),
req               448 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued,	0x11),
req               449 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
req               450 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
req               451 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable,	0x14),
req               452 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
req               453 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req,			0x16),
req               454 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req,			0x17),
req               466 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-req-buffer-full,		0x28),
req               467 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full,	0x29),
req               473 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure,	0x2f),
req               479 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req,			0x01),
req               480 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req,			0x02),
req               481 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req,			0x03),
req               482 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req,			0x04),
req               483 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(ba-all-axi-req,			0x07),
req               484 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(ba-axi-rd-req,			0x08),
req               485 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(ba-axi-wr-req,			0x09),
req               492 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(req-receive,			0x01),
req               493 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-recv,			0x02),
req               494 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-recv-2,			0x03),
req               495 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(wr-req-recv,			0x04),
req               496 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(wr-req-recv-2,			0x05),
req               497 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu,		0x06),
req               498 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2,		0x07),
req               499 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu,		0x08),
req               500 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2,		0x09),
req               504 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req,	0x0d),
req               505 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2,	0x0e),
req               506 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu,		0x0f),
req               512 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall,			0x15),
req               513 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked,		0x16),
req               517 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(mcu-req-table-full,		0x1a),
req               524 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload,		0x21),
req               525 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass,		0x22),
req               549 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld,		0x12),
req               550 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(lprd-req-vld,			0x13),
req               551 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(hprd-req-vld,			0x14),
req               552 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld,			0x15),
req               553 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(wr-req-vld,			0x16),
req               554 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(partial-wr-req-vld,		0x17),
req                82 drivers/platform/chrome/wilco_ec/sysfs.c 	struct get_ec_info_req req = { .cmd = CMD_EC_INFO, .op = op };
req                88 drivers/platform/chrome/wilco_ec/sysfs.c 		.request_data = &req,
req                89 drivers/platform/chrome/wilco_ec/sysfs.c 		.request_size = sizeof(req),
req               125 drivers/platform/x86/intel-hid.c 	union acpi_object *obj, argv4, req;
req               139 drivers/platform/x86/intel-hid.c 	req.type = ACPI_TYPE_INTEGER;
req               140 drivers/platform/x86/intel-hid.c 	req.integer.value = arg;
req               144 drivers/platform/x86/intel-hid.c 	argv4.package.elements = &req;
req               164 drivers/power/supply/cros_usbpd-charger.c 	struct ec_params_usb_pd_info_request req;
req               167 drivers/power/supply/cros_usbpd-charger.c 	req.port = port->port_number;
req               171 drivers/power/supply/cros_usbpd-charger.c 					    &req, sizeof(req),
req               193 drivers/power/supply/cros_usbpd-charger.c 	struct ec_params_usb_pd_power_info req;
req               198 drivers/power/supply/cros_usbpd-charger.c 	req.port = port->port_number;
req               201 drivers/power/supply/cros_usbpd-charger.c 					    &req, sizeof(req),
req               338 drivers/power/supply/cros_usbpd-charger.c 	struct ec_params_external_power_limit_v1 req;
req               341 drivers/power/supply/cros_usbpd-charger.c 	req.current_lim = current_lim;
req               342 drivers/power/supply/cros_usbpd-charger.c 	req.voltage_lim = voltage_lim;
req               346 drivers/power/supply/cros_usbpd-charger.c 					    &req, sizeof(req), NULL, 0);
req               118 drivers/ptp/ptp_chardev.c 	struct ptp_clock_request req;
req               145 drivers/ptp/ptp_chardev.c 		memset(&req, 0, sizeof(req));
req               147 drivers/ptp/ptp_chardev.c 		if (copy_from_user(&req.extts, (void __user *)arg,
req               148 drivers/ptp/ptp_chardev.c 				   sizeof(req.extts))) {
req               154 drivers/ptp/ptp_chardev.c 			req.extts.flags |= PTP_STRICT_FLAGS;
req               156 drivers/ptp/ptp_chardev.c 			if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
req               157 drivers/ptp/ptp_chardev.c 			    req.extts.rsv[0] || req.extts.rsv[1]) {
req               162 drivers/ptp/ptp_chardev.c 			if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
req               163 drivers/ptp/ptp_chardev.c 			    (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
req               168 drivers/ptp/ptp_chardev.c 			req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req               169 drivers/ptp/ptp_chardev.c 			req.extts.rsv[0] = 0;
req               170 drivers/ptp/ptp_chardev.c 			req.extts.rsv[1] = 0;
req               172 drivers/ptp/ptp_chardev.c 		if (req.extts.index >= ops->n_ext_ts) {
req               176 drivers/ptp/ptp_chardev.c 		req.type = PTP_CLK_REQ_EXTTS;
req               177 drivers/ptp/ptp_chardev.c 		enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
req               178 drivers/ptp/ptp_chardev.c 		err = ops->enable(ops, &req, enable);
req               183 drivers/ptp/ptp_chardev.c 		memset(&req, 0, sizeof(req));
req               185 drivers/ptp/ptp_chardev.c 		if (copy_from_user(&req.perout, (void __user *)arg,
req               186 drivers/ptp/ptp_chardev.c 				   sizeof(req.perout))) {
req               190 drivers/ptp/ptp_chardev.c 		if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) ||
req               191 drivers/ptp/ptp_chardev.c 			req.perout.rsv[0] || req.perout.rsv[1] ||
req               192 drivers/ptp/ptp_chardev.c 			req.perout.rsv[2] || req.perout.rsv[3]) &&
req               197 drivers/ptp/ptp_chardev.c 			req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
req               198 drivers/ptp/ptp_chardev.c 			req.perout.rsv[0] = 0;
req               199 drivers/ptp/ptp_chardev.c 			req.perout.rsv[1] = 0;
req               200 drivers/ptp/ptp_chardev.c 			req.perout.rsv[2] = 0;
req               201 drivers/ptp/ptp_chardev.c 			req.perout.rsv[3] = 0;
req               203 drivers/ptp/ptp_chardev.c 		if (req.perout.index >= ops->n_per_out) {
req               207 drivers/ptp/ptp_chardev.c 		req.type = PTP_CLK_REQ_PEROUT;
req               208 drivers/ptp/ptp_chardev.c 		enable = req.perout.period.sec || req.perout.period.nsec;
req               209 drivers/ptp/ptp_chardev.c 		err = ops->enable(ops, &req, enable);
req               214 drivers/ptp/ptp_chardev.c 		memset(&req, 0, sizeof(req));
req               218 drivers/ptp/ptp_chardev.c 		req.type = PTP_CLK_REQ_PPS;
req               220 drivers/ptp/ptp_chardev.c 		err = ops->enable(ops, &req, enable);
req                42 drivers/ptp/ptp_sysfs.c 	struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
req                46 drivers/ptp/ptp_sysfs.c 	cnt = sscanf(buf, "%u %d", &req.extts.index, &enable);
req                49 drivers/ptp/ptp_sysfs.c 	if (req.extts.index >= ops->n_ext_ts)
req                52 drivers/ptp/ptp_sysfs.c 	err = ops->enable(ops, &req, enable ? 1 : 0);
req               102 drivers/ptp/ptp_sysfs.c 	struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
req               105 drivers/ptp/ptp_sysfs.c 	cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index,
req               106 drivers/ptp/ptp_sysfs.c 		     &req.perout.start.sec, &req.perout.start.nsec,
req               107 drivers/ptp/ptp_sysfs.c 		     &req.perout.period.sec, &req.perout.period.nsec);
req               110 drivers/ptp/ptp_sysfs.c 	if (req.perout.index >= ops->n_per_out)
req               113 drivers/ptp/ptp_sysfs.c 	enable = req.perout.period.sec || req.perout.period.nsec;
req               114 drivers/ptp/ptp_sysfs.c 	err = ops->enable(ops, &req, enable);
req               130 drivers/ptp/ptp_sysfs.c 	struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
req               141 drivers/ptp/ptp_sysfs.c 	err = ops->enable(ops, &req, enable ? 1 : 0);
req               572 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
req               574 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_cdev_priv *priv = req->priv;
req               577 drivers/rapidio/devices/rio_mport_cdev.c 	dma_unmap_sg(req->dmach->device->dev,
req               578 drivers/rapidio/devices/rio_mport_cdev.c 		     req->sgt.sgl, req->sgt.nents, req->dir);
req               579 drivers/rapidio/devices/rio_mport_cdev.c 	sg_free_table(&req->sgt);
req               580 drivers/rapidio/devices/rio_mport_cdev.c 	if (req->page_list) {
req               581 drivers/rapidio/devices/rio_mport_cdev.c 		for (i = 0; i < req->nr_pages; i++)
req               582 drivers/rapidio/devices/rio_mport_cdev.c 			put_page(req->page_list[i]);
req               583 drivers/rapidio/devices/rio_mport_cdev.c 		kfree(req->page_list);
req               586 drivers/rapidio/devices/rio_mport_cdev.c 	if (req->map) {
req               587 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_lock(&req->map->md->buf_mutex);
req               588 drivers/rapidio/devices/rio_mport_cdev.c 		kref_put(&req->map->ref, mport_release_mapping);
req               589 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_unlock(&req->map->md->buf_mutex);
req               594 drivers/rapidio/devices/rio_mport_cdev.c 	kfree(req);
req               599 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dma_req *req = (struct mport_dma_req *)param;
req               600 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_cdev_priv *priv = req->priv;
req               602 drivers/rapidio/devices/rio_mport_cdev.c 	req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
req               604 drivers/rapidio/devices/rio_mport_cdev.c 	complete(&req->req_comp);
req               605 drivers/rapidio/devices/rio_mport_cdev.c 	kref_put(&req->refcount, dma_req_free);
req               693 drivers/rapidio/devices/rio_mport_cdev.c static int do_dma_request(struct mport_dma_req *req,
req               707 drivers/rapidio/devices/rio_mport_cdev.c 	priv = req->priv;
req               708 drivers/rapidio/devices/rio_mport_cdev.c 	sgt = &req->sgt;
req               711 drivers/rapidio/devices/rio_mport_cdev.c 	dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
req               737 drivers/rapidio/devices/rio_mport_cdev.c 	tx->callback_param = req;
req               739 drivers/rapidio/devices/rio_mport_cdev.c 	req->status = DMA_IN_PROGRESS;
req               740 drivers/rapidio/devices/rio_mport_cdev.c 	kref_get(&req->refcount);
req               743 drivers/rapidio/devices/rio_mport_cdev.c 	req->cookie = cookie;
req               751 drivers/rapidio/devices/rio_mport_cdev.c 		kref_put(&req->refcount, dma_req_free);
req               760 drivers/rapidio/devices/rio_mport_cdev.c 		list_add_tail(&req->node, &priv->async_list);
req               766 drivers/rapidio/devices/rio_mport_cdev.c 	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
req               784 drivers/rapidio/devices/rio_mport_cdev.c 	if (req->status != DMA_COMPLETE) {
req               789 drivers/rapidio/devices/rio_mport_cdev.c 			cookie, req->status, ret);
req               815 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dma_req *req;
req               823 drivers/rapidio/devices/rio_mport_cdev.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               824 drivers/rapidio/devices/rio_mport_cdev.c 	if (!req)
req               829 drivers/rapidio/devices/rio_mport_cdev.c 		kfree(req);
req               834 drivers/rapidio/devices/rio_mport_cdev.c 	kref_init(&req->refcount);
req               835 drivers/rapidio/devices/rio_mport_cdev.c 	init_completion(&req->req_comp);
req               836 drivers/rapidio/devices/rio_mport_cdev.c 	req->dir = dir;
req               837 drivers/rapidio/devices/rio_mport_cdev.c 	req->filp = filp;
req               838 drivers/rapidio/devices/rio_mport_cdev.c 	req->priv = priv;
req               839 drivers/rapidio/devices/rio_mport_cdev.c 	req->dmach = chan;
req               840 drivers/rapidio/devices/rio_mport_cdev.c 	req->sync = sync;
req               888 drivers/rapidio/devices/rio_mport_cdev.c 		ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
req               895 drivers/rapidio/devices/rio_mport_cdev.c 		req->page_list = page_list;
req               896 drivers/rapidio/devices/rio_mport_cdev.c 		req->nr_pages = nr_pages;
req               908 drivers/rapidio/devices/rio_mport_cdev.c 				req->map = map;
req               914 drivers/rapidio/devices/rio_mport_cdev.c 		if (req->map == NULL) {
req               924 drivers/rapidio/devices/rio_mport_cdev.c 		ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
req               930 drivers/rapidio/devices/rio_mport_cdev.c 		sg_set_buf(req->sgt.sgl,
req               936 drivers/rapidio/devices/rio_mport_cdev.c 			   req->sgt.sgl, req->sgt.nents, dir);
req               943 drivers/rapidio/devices/rio_mport_cdev.c 	ret = do_dma_request(req, xfer, sync, nents);
req               953 drivers/rapidio/devices/rio_mport_cdev.c 	if (!req->page_list) {
req               959 drivers/rapidio/devices/rio_mport_cdev.c 	kref_put(&req->refcount, dma_req_free);
req              1013 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dma_req *req;
req              1032 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(req, &priv->async_list, node) {
req              1033 drivers/rapidio/devices/rio_mport_cdev.c 		if (req->cookie == cookie) {
req              1034 drivers/rapidio/devices/rio_mport_cdev.c 			list_del(&req->node);
req              1044 drivers/rapidio/devices/rio_mport_cdev.c 	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
req              1050 drivers/rapidio/devices/rio_mport_cdev.c 		       (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
req              1059 drivers/rapidio/devices/rio_mport_cdev.c 			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
req              1064 drivers/rapidio/devices/rio_mport_cdev.c 	if (req->status != DMA_COMPLETE) {
req              1068 drivers/rapidio/devices/rio_mport_cdev.c 			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
req              1069 drivers/rapidio/devices/rio_mport_cdev.c 			req->status);
req              1074 drivers/rapidio/devices/rio_mport_cdev.c 	if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
req              1075 drivers/rapidio/devices/rio_mport_cdev.c 		kref_put(&req->refcount, dma_req_free);
req              1082 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&req->node, &priv->async_list);
req              1946 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dma_req *req, *req_next;
req              1971 drivers/rapidio/devices/rio_mport_cdev.c 		list_for_each_entry_safe(req, req_next, &list, node) {
req              1973 drivers/rapidio/devices/rio_mport_cdev.c 				   req->filp, req->cookie,
req              1974 drivers/rapidio/devices/rio_mport_cdev.c 				   completion_done(&req->req_comp)?"yes":"no");
req              1975 drivers/rapidio/devices/rio_mport_cdev.c 			list_del(&req->node);
req              1976 drivers/rapidio/devices/rio_mport_cdev.c 			kref_put(&req->refcount, dma_req_free);
req               379 drivers/rapidio/rio_cm.c 	struct conn_req *req;
req               396 drivers/rapidio/rio_cm.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               397 drivers/rapidio/rio_cm.c 	if (!req) {
req               402 drivers/rapidio/rio_cm.c 	req->destid = ntohl(hh->bhdr.src_id);
req               403 drivers/rapidio/rio_cm.c 	req->chan = ntohs(hh->src_ch);
req               404 drivers/rapidio/rio_cm.c 	req->cmdev = cm;
req               407 drivers/rapidio/rio_cm.c 	list_add_tail(&req->node, &ch->accept_queue);
req               672 drivers/rapidio/rio_cm.c 		struct tx_req *req, *_req;
req               675 drivers/rapidio/rio_cm.c 		list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
req               676 drivers/rapidio/rio_cm.c 			list_del(&req->node);
req               677 drivers/rapidio/rio_cm.c 			cm->tx_buf[cm->tx_slot] = req->buffer;
req               678 drivers/rapidio/rio_cm.c 			rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
req               679 drivers/rapidio/rio_cm.c 						  req->buffer, req->len);
req               680 drivers/rapidio/rio_cm.c 			kfree(req->buffer);
req               681 drivers/rapidio/rio_cm.c 			kfree(req);
req              1076 drivers/rapidio/rio_cm.c 	struct conn_req *req;
req              1140 drivers/rapidio/rio_cm.c 	req = list_first_entry(&ch->accept_queue, struct conn_req, node);
req              1141 drivers/rapidio/rio_cm.c 	list_del(&req->node);
req              1144 drivers/rapidio/rio_cm.c 	new_ch->rem_destid = req->destid;
req              1145 drivers/rapidio/rio_cm.c 	new_ch->rem_channel = req->chan;
req              1150 drivers/rapidio/rio_cm.c 	kfree(req);
req               190 drivers/regulator/qcom_rpm-regulator.c 			 const struct request_member *req,
req               193 drivers/regulator/qcom_rpm-regulator.c 	if (WARN_ON((value << req->shift) & ~req->mask))
req               196 drivers/regulator/qcom_rpm-regulator.c 	vreg->val[req->word] &= ~req->mask;
req               197 drivers/regulator/qcom_rpm-regulator.c 	vreg->val[req->word] |= value << req->shift;
req               211 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->mV;
req               215 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               224 drivers/regulator/qcom_rpm-regulator.c 		ret = rpm_reg_write(vreg, req, uV / 1000);
req               238 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->uV;
req               242 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               251 drivers/regulator/qcom_rpm-regulator.c 		ret = rpm_reg_write(vreg, req, uV);
req               271 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->mV;
req               274 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               278 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, vreg->uV / 1000);
req               290 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->uV;
req               293 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               297 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, vreg->uV);
req               309 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->enable_state;
req               312 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               316 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, 1);
req               328 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->mV;
req               331 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               335 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, 0);
req               347 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->uV;
req               350 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               354 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, 0);
req               366 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->enable_state;
req               369 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               373 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, 0);
req               392 drivers/regulator/qcom_rpm-regulator.c 	const struct request_member *req = &parts->ia;
req               394 drivers/regulator/qcom_rpm-regulator.c 	int max_mA = req->mask >> req->shift;
req               397 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0)
req               404 drivers/regulator/qcom_rpm-regulator.c 	ret = rpm_reg_write(vreg, req, load_mA);
req               641 drivers/regulator/qcom_rpm-regulator.c 		       const struct request_member *req,
req               644 drivers/regulator/qcom_rpm-regulator.c 	if (req->mask == 0 || (value << req->shift) & ~req->mask)
req               647 drivers/regulator/qcom_rpm-regulator.c 	vreg->val[req->word] &= ~req->mask;
req               648 drivers/regulator/qcom_rpm-regulator.c 	vreg->val[req->word] |= value << req->shift;
req                45 drivers/regulator/qcom_smd-regulator.c 	struct rpm_regulator_req req[3];
req                50 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].key = cpu_to_le32(RPM_KEY_SWEN);
req                51 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
req                52 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].value = cpu_to_le32(vreg->is_enabled);
req                57 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].key = cpu_to_le32(RPM_KEY_UV);
req                58 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
req                59 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].value = cpu_to_le32(vreg->uV);
req                64 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].key = cpu_to_le32(RPM_KEY_MA);
req                65 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
req                66 drivers/regulator/qcom_smd-regulator.c 		req[reqlen].value = cpu_to_le32(vreg->load / 1000);
req                75 drivers/regulator/qcom_smd-regulator.c 				 req, sizeof(req[0]) * reqlen);
req                59 drivers/remoteproc/qcom_sysmon.c 	char req[50];
req                63 drivers/remoteproc/qcom_sysmon.c 	len = snprintf(req, sizeof(req), "ssr:%s:before_shutdown", name);
req                64 drivers/remoteproc/qcom_sysmon.c 	if (len >= sizeof(req))
req                71 drivers/remoteproc/qcom_sysmon.c 	ret = rpmsg_send(sysmon->ept, req, len);
req                97 drivers/remoteproc/qcom_sysmon.c 	char *req = "ssr:shutdown";
req               104 drivers/remoteproc/qcom_sysmon.c 	ret = rpmsg_send(sysmon->ept, req, strlen(req) + 1);
req               337 drivers/remoteproc/qcom_sysmon.c 	struct ssctl_subsys_event_req req;
req               348 drivers/remoteproc/qcom_sysmon.c 	memset(&req, 0, sizeof(req));
req               349 drivers/remoteproc/qcom_sysmon.c 	strlcpy(req.subsys_name, name, sizeof(req.subsys_name));
req               350 drivers/remoteproc/qcom_sysmon.c 	req.subsys_name_len = strlen(req.subsys_name);
req               351 drivers/remoteproc/qcom_sysmon.c 	req.event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
req               352 drivers/remoteproc/qcom_sysmon.c 	req.evt_driven_valid = true;
req               353 drivers/remoteproc/qcom_sysmon.c 	req.evt_driven = SSCTL_SSR_EVENT_FORCED;
req               357 drivers/remoteproc/qcom_sysmon.c 			       ssctl_subsys_event_req_ei, &req);
req               411 drivers/rpmsg/qcom_glink_native.c 	} __packed req;
req               413 drivers/rpmsg/qcom_glink_native.c 	int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
req               429 drivers/rpmsg/qcom_glink_native.c 	req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
req               430 drivers/rpmsg/qcom_glink_native.c 	req.msg.param1 = cpu_to_le16(channel->lcid);
req               431 drivers/rpmsg/qcom_glink_native.c 	req.msg.param2 = cpu_to_le32(name_len);
req               432 drivers/rpmsg/qcom_glink_native.c 	strcpy(req.name, channel->name);
req               434 drivers/rpmsg/qcom_glink_native.c 	ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
req               452 drivers/rpmsg/qcom_glink_native.c 	struct glink_msg req;
req               454 drivers/rpmsg/qcom_glink_native.c 	req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
req               455 drivers/rpmsg/qcom_glink_native.c 	req.param1 = cpu_to_le16(channel->lcid);
req               456 drivers/rpmsg/qcom_glink_native.c 	req.param2 = 0;
req               458 drivers/rpmsg/qcom_glink_native.c 	qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
req               464 drivers/rpmsg/qcom_glink_native.c 	struct glink_msg req;
req               466 drivers/rpmsg/qcom_glink_native.c 	req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
req               467 drivers/rpmsg/qcom_glink_native.c 	req.param1 = cpu_to_le16(rcid);
req               468 drivers/rpmsg/qcom_glink_native.c 	req.param2 = 0;
req               470 drivers/rpmsg/qcom_glink_native.c 	qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
req              1270 drivers/rpmsg/qcom_glink_native.c 	} __packed req;
req              1306 drivers/rpmsg/qcom_glink_native.c 	req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
req              1307 drivers/rpmsg/qcom_glink_native.c 	req.msg.param1 = cpu_to_le16(channel->lcid);
req              1308 drivers/rpmsg/qcom_glink_native.c 	req.msg.param2 = cpu_to_le32(iid);
req              1309 drivers/rpmsg/qcom_glink_native.c 	req.chunk_size = cpu_to_le32(len);
req              1310 drivers/rpmsg/qcom_glink_native.c 	req.left_size = cpu_to_le32(0);
req              1312 drivers/rpmsg/qcom_glink_native.c 	ret = qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
req               170 drivers/rtc/rtc-ac100.c 				       struct clk_rate_request *req)
req               202 drivers/rtc/rtc-ac100.c 		tmp = ac100_clkout_round_rate(hw, req->rate, prate);
req               204 drivers/rtc/rtc-ac100.c 		if (tmp > req->rate)
req               206 drivers/rtc/rtc-ac100.c 		if (req->rate - tmp < req->rate - best) {
req               215 drivers/rtc/rtc-ac100.c 	req->best_parent_hw = best_parent;
req               216 drivers/rtc/rtc-ac100.c 	req->best_parent_rate = best;
req               217 drivers/rtc/rtc-ac100.c 	req->rate = best;
req               705 drivers/s390/block/dasd.c 			       struct request *req)
req               721 drivers/s390/block/dasd.c 		if (rq_data_dir(req) == READ)
req               729 drivers/s390/block/dasd.c 		if (rq_data_dir(req) == READ)
req               749 drivers/s390/block/dasd.c 		if (rq_data_dir(req) == READ)
req               815 drivers/s390/block/dasd.c 			     struct request *req)
req               830 drivers/s390/block/dasd.c 	sectors = blk_rq_sectors(req);
req               860 drivers/s390/block/dasd.c 					  rq_data_dir(req) == READ,
req               878 drivers/s390/block/dasd.c 					  rq_data_dir(req) == READ,
req               896 drivers/s390/block/dasd.c 					  rq_data_dir(req) == READ,
req              1155 drivers/s390/block/dasd.c #define dasd_profile_start(block, cqr, req) do {} while (0)
req              1156 drivers/s390/block/dasd.c #define dasd_profile_end(block, cqr, req) do {} while (0)
req              2757 drivers/s390/block/dasd.c 	struct request *req;
req              2762 drivers/s390/block/dasd.c 	req = (struct request *) cqr->callback_data;
req              2763 drivers/s390/block/dasd.c 	dasd_profile_end(cqr->block, cqr, req);
req              2766 drivers/s390/block/dasd.c 	status = cqr->block->base->discipline->free_cp(cqr, req);
req              2793 drivers/s390/block/dasd.c 		blk_mq_end_request(req, error);
req              2794 drivers/s390/block/dasd.c 		blk_mq_run_hw_queues(req->q, true);
req              2802 drivers/s390/block/dasd.c 			blk_update_request(req, BLK_STS_OK,
req              2803 drivers/s390/block/dasd.c 					   blk_rq_bytes(req) - proc_bytes);
req              2804 drivers/s390/block/dasd.c 			blk_mq_requeue_request(req, true);
req              2806 drivers/s390/block/dasd.c 			blk_mq_complete_request(req);
req              2979 drivers/s390/block/dasd.c 	struct request *req;
req              2984 drivers/s390/block/dasd.c 	req = (struct request *) cqr->callback_data;
req              2985 drivers/s390/block/dasd.c 	blk_mq_requeue_request(req, false);
req              3077 drivers/s390/block/dasd.c 	struct request *req = qd->rq;
req              3086 drivers/s390/block/dasd.c 			      "device not ready for request %p", req);
req              3098 drivers/s390/block/dasd.c 			      "device stopped request %p", req);
req              3104 drivers/s390/block/dasd.c 	    rq_data_dir(req) == WRITE) {
req              3106 drivers/s390/block/dasd.c 			      "Rejecting write request %p", req);
req              3113 drivers/s390/block/dasd.c 	     blk_noretry_request(req))) {
req              3115 drivers/s390/block/dasd.c 			      "Rejecting failfast request %p", req);
req              3120 drivers/s390/block/dasd.c 	cqr = basedev->discipline->build_cp(basedev, block, req);
req              3130 drivers/s390/block/dasd.c 			      PTR_ERR(cqr), req);
req              3138 drivers/s390/block/dasd.c 	cqr->callback_data = req;
req              3142 drivers/s390/block/dasd.c 	blk_mq_start_request(req);
req              3146 drivers/s390/block/dasd.c 	dasd_profile_start(block, cqr, req);
req              3163 drivers/s390/block/dasd.c enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
req              3165 drivers/s390/block/dasd.c 	struct dasd_block *block = req->q->queuedata;
req              3171 drivers/s390/block/dasd.c 	cqr = blk_mq_rq_to_pdu(req);
req              3251 drivers/s390/block/dasd.c static void dasd_request_done(struct request *req)
req              3253 drivers/s390/block/dasd.c 	blk_mq_end_request(req, 0);
req              3254 drivers/s390/block/dasd.c 	blk_mq_run_hw_queues(req->q, true);
req               501 drivers/s390/block/dasd_diag.c 					       struct request *req)
req               514 drivers/s390/block/dasd_diag.c 	if (rq_data_dir(req) == READ)
req               516 drivers/s390/block/dasd_diag.c 	else if (rq_data_dir(req) == WRITE)
req               522 drivers/s390/block/dasd_diag.c 	first_rec = blk_rq_pos(req) >> block->s2b_shift;
req               524 drivers/s390/block/dasd_diag.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
req               527 drivers/s390/block/dasd_diag.c 	rq_for_each_segment(bv, req, iter) {
req               540 drivers/s390/block/dasd_diag.c 				   blk_mq_rq_to_pdu(req));
req               548 drivers/s390/block/dasd_diag.c 	rq_for_each_segment(bv, req, iter) {
req               562 drivers/s390/block/dasd_diag.c 	if (blk_noretry_request(req) ||
req               576 drivers/s390/block/dasd_diag.c dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
req               611 drivers/s390/block/dasd_diag.c dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
req              3085 drivers/s390/block/dasd_eckd.c 	struct request *req;
req              3091 drivers/s390/block/dasd_eckd.c 	req = cqr->callback_data;
req              3099 drivers/s390/block/dasd_eckd.c 	first_trk = blk_rq_pos(req) >> block->s2b_shift;
req              3102 drivers/s390/block/dasd_eckd.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
req              3166 drivers/s390/block/dasd_eckd.c 	struct request *req;
req              3173 drivers/s390/block/dasd_eckd.c 	req = (struct request *) cqr->callback_data;
req              3182 drivers/s390/block/dasd_eckd.c 	first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
req              3185 drivers/s390/block/dasd_eckd.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
req              3209 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              3631 drivers/s390/block/dasd_eckd.c 		  struct request *req, unsigned int first_trk,
req              3653 drivers/s390/block/dasd_eckd.c 	rq = req ? blk_mq_rq_to_pdu(req) : NULL;
req              3829 drivers/s390/block/dasd_eckd.c 					       struct request *req,
req              3856 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == READ)
req              3858 drivers/s390/block/dasd_eckd.c 	else if (rq_data_dir(req) == WRITE)
req              3866 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              3904 drivers/s390/block/dasd_eckd.c 				   startdev, blk_mq_rq_to_pdu(req));
req              3941 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              3946 drivers/s390/block/dasd_eckd.c 			if (copy && rq_data_dir(req) == WRITE)
req              3962 drivers/s390/block/dasd_eckd.c 					    rq_data_dir(req) == READ)
req              3996 drivers/s390/block/dasd_eckd.c 	if (blk_noretry_request(req) ||
req              4021 drivers/s390/block/dasd_eckd.c 					       struct request *req,
req              4049 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == READ)
req              4051 drivers/s390/block/dasd_eckd.c 	else if (rq_data_dir(req) == WRITE)
req              4071 drivers/s390/block/dasd_eckd.c 				   startdev, blk_mq_rq_to_pdu(req));
req              4108 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              4175 drivers/s390/block/dasd_eckd.c 	if (blk_noretry_request(req) ||
req              4353 drivers/s390/block/dasd_eckd.c 					       struct request *req,
req              4384 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == READ) {
req              4387 drivers/s390/block/dasd_eckd.c 	} else if (rq_data_dir(req) == WRITE) {
req              4401 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              4404 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == WRITE)
req              4410 drivers/s390/block/dasd_eckd.c 				   blk_mq_rq_to_pdu(req));
req              4448 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == WRITE) {
req              4451 drivers/s390/block/dasd_eckd.c 		rq_for_each_segment(bv, req, iter) {
req              4484 drivers/s390/block/dasd_eckd.c 		rq_for_each_segment(bv, req, iter) {
req              4498 drivers/s390/block/dasd_eckd.c 	if (blk_noretry_request(req) ||
req              4526 drivers/s390/block/dasd_eckd.c 					       struct request *req)
req              4550 drivers/s390/block/dasd_eckd.c 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
req              4553 drivers/s390/block/dasd_eckd.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
req              4558 drivers/s390/block/dasd_eckd.c 	data_size = blk_rq_bytes(req);
req              4562 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == WRITE)
req              4575 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
req              4584 drivers/s390/block/dasd_eckd.c 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
req              4585 drivers/s390/block/dasd_eckd.c 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
req              4586 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
req              4596 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
req              4606 drivers/s390/block/dasd_eckd.c 						   struct request *req)
req              4632 drivers/s390/block/dasd_eckd.c 	start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
req              4633 drivers/s390/block/dasd_eckd.c 	end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
req              4639 drivers/s390/block/dasd_eckd.c 	    (rq_data_dir(req) == WRITE)) {
req              4642 drivers/s390/block/dasd_eckd.c 			      start_padding_sectors, end_padding_sectors, req);
req              4646 drivers/s390/block/dasd_eckd.c 	first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
req              4647 drivers/s390/block/dasd_eckd.c 	last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
req              4652 drivers/s390/block/dasd_eckd.c 	if (rq_data_dir(req) == READ)
req              4654 drivers/s390/block/dasd_eckd.c 	else if (rq_data_dir(req) == WRITE)
req              4685 drivers/s390/block/dasd_eckd.c 				   datasize, startdev, blk_mq_rq_to_pdu(req));
req              4720 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              4742 drivers/s390/block/dasd_eckd.c 	if (blk_noretry_request(req) ||
req              4759 drivers/s390/block/dasd_eckd.c dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
req              4775 drivers/s390/block/dasd_eckd.c 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
req              4781 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
req              4793 drivers/s390/block/dasd_eckd.c 					if (rq_data_dir(req) == READ)
req              4846 drivers/s390/block/dasd_eckd.c 						     struct request *req)
req              4863 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_raw(startdev, block, req);
req              4865 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp(startdev, block, req);
req              4873 drivers/s390/block/dasd_eckd.c 				   struct request *req)
req              4882 drivers/s390/block/dasd_eckd.c 	return dasd_eckd_free_cp(cqr, req);
req              5450 drivers/s390/block/dasd_eckd.c 				 struct dasd_ccw_req *req, struct irb *irb)
req              5469 drivers/s390/block/dasd_eckd.c 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
req              5472 drivers/s390/block/dasd_eckd.c 		       req ? req->intrc : 0);
req              5510 drivers/s390/block/dasd_eckd.c 	if (req) {
req              5514 drivers/s390/block/dasd_eckd.c 		first = req->cpaddr;
req              5518 drivers/s390/block/dasd_eckd.c 			      " Related CP in req: %p\n", req);
req              5553 drivers/s390/block/dasd_eckd.c 				 struct dasd_ccw_req *req, struct irb *irb)
req              5573 drivers/s390/block/dasd_eckd.c 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
req              5578 drivers/s390/block/dasd_eckd.c 		       req ? req->intrc : 0);
req              5683 drivers/s390/block/dasd_eckd.c 				 struct dasd_ccw_req *req, struct irb *irb)
req              5694 drivers/s390/block/dasd_eckd.c 		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
req              5697 drivers/s390/block/dasd_eckd.c 		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
req              5700 drivers/s390/block/dasd_eckd.c 		dasd_eckd_dump_sense_tcw(device, req, irb);
req              5708 drivers/s390/block/dasd_eckd.c 		    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
req              5712 drivers/s390/block/dasd_eckd.c 		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
req              5715 drivers/s390/block/dasd_eckd.c 		dasd_eckd_dump_sense_ccw(device, req, irb);
req               327 drivers/s390/block/dasd_fba.c 						struct request *req)
req               346 drivers/s390/block/dasd_fba.c 	first_rec = blk_rq_pos(req) >> block->s2b_shift;
req               348 drivers/s390/block/dasd_fba.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
req               360 drivers/s390/block/dasd_fba.c 				   blk_mq_rq_to_pdu(req));
req               422 drivers/s390/block/dasd_fba.c 	if (blk_noretry_request(req) ||
req               440 drivers/s390/block/dasd_fba.c 						struct request *req)
req               455 drivers/s390/block/dasd_fba.c 	if (rq_data_dir(req) == READ) {
req               457 drivers/s390/block/dasd_fba.c 	} else if (rq_data_dir(req) == WRITE) {
req               463 drivers/s390/block/dasd_fba.c 	first_rec = blk_rq_pos(req) >> block->s2b_shift;
req               465 drivers/s390/block/dasd_fba.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
req               469 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
req               495 drivers/s390/block/dasd_fba.c 				   blk_mq_rq_to_pdu(req));
req               500 drivers/s390/block/dasd_fba.c 	define_extent(ccw++, cqr->data, rq_data_dir(req),
req               501 drivers/s390/block/dasd_fba.c 		      block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
req               508 drivers/s390/block/dasd_fba.c 		locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
req               511 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
req               516 drivers/s390/block/dasd_fba.c 			if (copy && rq_data_dir(req) == WRITE)
req               526 drivers/s390/block/dasd_fba.c 					      rq_data_dir(req),
req               551 drivers/s390/block/dasd_fba.c 	if (blk_noretry_request(req) ||
req               566 drivers/s390/block/dasd_fba.c 					      struct request *req)
req               568 drivers/s390/block/dasd_fba.c 	if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES)
req               569 drivers/s390/block/dasd_fba.c 		return dasd_fba_build_cp_discard(memdev, block, req);
req               571 drivers/s390/block/dasd_fba.c 		return dasd_fba_build_cp_regular(memdev, block, req);
req               575 drivers/s390/block/dasd_fba.c dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
req               593 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
req               605 drivers/s390/block/dasd_fba.c 					if (rq_data_dir(req) == READ)
req               668 drivers/s390/block/dasd_fba.c dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
req               685 drivers/s390/block/dasd_fba.c 		       " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
req               711 drivers/s390/block/dasd_fba.c 	act = req->cpaddr;
req               714 drivers/s390/block/dasd_fba.c 	len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
req               751 drivers/s390/block/dasd_int.h enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
req               146 drivers/s390/block/scm_blk.c static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
req               148 drivers/s390/block/scm_blk.c 	return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
req               185 drivers/s390/block/scm_blk.c 	struct request *req = scmrq->request[pos];
req               190 drivers/s390/block/scm_blk.c 	aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
req               196 drivers/s390/block/scm_blk.c 	msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
req               197 drivers/s390/block/scm_blk.c 	msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
req               201 drivers/s390/block/scm_blk.c 	rq_for_each_segment(bv, req, iter) {
req               213 drivers/s390/block/scm_blk.c 				   struct request *req)
req               215 drivers/s390/block/scm_blk.c 	scmrq->request[scmrq->aob->request.msb_count] = req;
req               288 drivers/s390/block/scm_blk.c 	struct request *req = qd->rq;
req               292 drivers/s390/block/scm_blk.c 	if (!scm_permit_request(bdev, req)) {
req               308 drivers/s390/block/scm_blk.c 	scm_request_set(scmrq, req);
req               321 drivers/s390/block/scm_blk.c 	blk_mq_start_request(req);
req               417 drivers/s390/block/scm_blk.c static void scm_blk_request_done(struct request *req)
req               419 drivers/s390/block/scm_blk.c 	blk_status_t *error = blk_mq_rq_to_pdu(req);
req               421 drivers/s390/block/scm_blk.c 	blk_mq_end_request(req, *error);
req               111 drivers/s390/char/con3215.c 	struct raw3215_req *req;
req               115 drivers/s390/char/con3215.c 	req = raw3215_freelist;
req               116 drivers/s390/char/con3215.c 	raw3215_freelist = req->next;
req               118 drivers/s390/char/con3215.c 	return req;
req               124 drivers/s390/char/con3215.c static inline void raw3215_free_req(struct raw3215_req *req)
req               128 drivers/s390/char/con3215.c 	if (req->type == RAW3215_FREE)
req               130 drivers/s390/char/con3215.c 	req->type = RAW3215_FREE;
req               132 drivers/s390/char/con3215.c 	req->next = raw3215_freelist;
req               133 drivers/s390/char/con3215.c 	raw3215_freelist = req;
req               145 drivers/s390/char/con3215.c 	struct raw3215_req *req;
req               149 drivers/s390/char/con3215.c 	req = raw->queued_read;
req               150 drivers/s390/char/con3215.c 	if (req == NULL) {
req               152 drivers/s390/char/con3215.c 		req = raw3215_alloc_req();
req               153 drivers/s390/char/con3215.c 		req->type = RAW3215_READ;
req               154 drivers/s390/char/con3215.c 		req->info = raw;
req               155 drivers/s390/char/con3215.c 		raw->queued_read = req;
req               158 drivers/s390/char/con3215.c 	ccw = req->ccws;
req               173 drivers/s390/char/con3215.c 	struct raw3215_req *req;
req               180 drivers/s390/char/con3215.c 	req = raw->queued_write;
req               181 drivers/s390/char/con3215.c 	if (req == NULL) {
req               183 drivers/s390/char/con3215.c 		req = raw3215_alloc_req();
req               184 drivers/s390/char/con3215.c 		req->type = RAW3215_WRITE;
req               185 drivers/s390/char/con3215.c 		req->info = raw;
req               186 drivers/s390/char/con3215.c 		raw->queued_write = req;
req               188 drivers/s390/char/con3215.c 		raw->written -= req->len;
req               191 drivers/s390/char/con3215.c 	ccw = req->ccws;
req               192 drivers/s390/char/con3215.c 	req->start = (raw->head - raw->count + raw->written) &
req               200 drivers/s390/char/con3215.c 	ix = req->start;
req               206 drivers/s390/char/con3215.c 	len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
req               209 drivers/s390/char/con3215.c 	req->len = len;
req               213 drivers/s390/char/con3215.c 	req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
req               215 drivers/s390/char/con3215.c 	ix = req->start;
req               217 drivers/s390/char/con3215.c 		if (ccw > req->ccws)
req               236 drivers/s390/char/con3215.c 	if (ccw > req->ccws)
req               249 drivers/s390/char/con3215.c 	struct raw3215_req *req;
req               252 drivers/s390/char/con3215.c 	req = raw->queued_read;
req               253 drivers/s390/char/con3215.c 	if (req != NULL &&
req               257 drivers/s390/char/con3215.c 		res = ccw_device_start(raw->cdev, req->ccws,
req               258 drivers/s390/char/con3215.c 				       (unsigned long) req, 0, 0);
req               261 drivers/s390/char/con3215.c 			raw->queued_read = req;
req               266 drivers/s390/char/con3215.c 	req = raw->queued_write;
req               267 drivers/s390/char/con3215.c 	if (req != NULL &&
req               271 drivers/s390/char/con3215.c 		res = ccw_device_start(raw->cdev, req->ccws,
req               272 drivers/s390/char/con3215.c 				       (unsigned long) req, 0, 0);
req               275 drivers/s390/char/con3215.c 			raw->queued_write = req;
req               367 drivers/s390/char/con3215.c 	struct raw3215_req *req;
req               373 drivers/s390/char/con3215.c 	req = (struct raw3215_req *) intparm;
req               393 drivers/s390/char/con3215.c 		if ((raw = req->info) == NULL)
req               395 drivers/s390/char/con3215.c 		if (req->type == RAW3215_READ) {
req               397 drivers/s390/char/con3215.c 			req->residual = irb->scsw.cmd.count;
req               404 drivers/s390/char/con3215.c 		if ((raw = req->info) == NULL)
req               406 drivers/s390/char/con3215.c 		if (req->type == RAW3215_READ && tty != NULL) {
req               409 drivers/s390/char/con3215.c 			count = 160 - req->residual;
req               436 drivers/s390/char/con3215.c 		} else if (req->type == RAW3215_WRITE) {
req               437 drivers/s390/char/con3215.c 			raw->count -= req->len;
req               438 drivers/s390/char/con3215.c 			raw->written -= req->len;
req               441 drivers/s390/char/con3215.c 		raw3215_free_req(req);
req               452 drivers/s390/char/con3215.c 		if (req != NULL && req->type != RAW3215_FREE) {
req               453 drivers/s390/char/con3215.c 			if (req->type == RAW3215_WRITE) {
req               454 drivers/s390/char/con3215.c 				raw->count -= req->len;
req               455 drivers/s390/char/con3215.c 				raw->written -= req->len;
req               458 drivers/s390/char/con3215.c 			raw3215_free_req(req);
req               905 drivers/s390/char/con3215.c 	struct raw3215_req *req;
req               922 drivers/s390/char/con3215.c 		req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
req               923 drivers/s390/char/con3215.c 		if (!req)
req               925 drivers/s390/char/con3215.c 		req->next = raw3215_freelist;
req               926 drivers/s390/char/con3215.c 		raw3215_freelist = req;
req                61 drivers/s390/char/sclp.c static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
req               205 drivers/s390/char/sclp.c 	struct sclp_req *req;
req               207 drivers/s390/char/sclp.c 	list_for_each_entry(req, &sclp_req_queue, list) {
req               208 drivers/s390/char/sclp.c 		if (!req->queue_expires)
req               211 drivers/s390/char/sclp.c 		   (time_before(req->queue_expires, expires_next)))
req               212 drivers/s390/char/sclp.c 				expires_next = req->queue_expires;
req               223 drivers/s390/char/sclp.c 	struct sclp_req *req;
req               228 drivers/s390/char/sclp.c 	list_for_each_entry(req, &sclp_req_queue, list) {
req               229 drivers/s390/char/sclp.c 		if (!req->queue_expires)
req               231 drivers/s390/char/sclp.c 		if (time_before_eq(req->queue_expires, now)) {
req               232 drivers/s390/char/sclp.c 			if (req->status == SCLP_REQ_QUEUED) {
req               233 drivers/s390/char/sclp.c 				req->status = SCLP_REQ_QUEUED_TIMEOUT;
req               234 drivers/s390/char/sclp.c 				list_del(&req->list);
req               239 drivers/s390/char/sclp.c 	req = NULL;
req               242 drivers/s390/char/sclp.c 	return req;
req               253 drivers/s390/char/sclp.c 	struct sclp_req *req;
req               256 drivers/s390/char/sclp.c 		req = __sclp_req_queue_remove_expired_req();
req               257 drivers/s390/char/sclp.c 		if (req && req->callback)
req               258 drivers/s390/char/sclp.c 			req->callback(req, req->callback_data);
req               259 drivers/s390/char/sclp.c 	} while (req);
req               272 drivers/s390/char/sclp.c __sclp_start_request(struct sclp_req *req)
req               279 drivers/s390/char/sclp.c 	rc = sclp_service_call(req->command, req->sccb);
req               280 drivers/s390/char/sclp.c 	req->start_count++;
req               284 drivers/s390/char/sclp.c 		req->status = SCLP_REQ_RUNNING;
req               296 drivers/s390/char/sclp.c 	req->status = SCLP_REQ_FAILED;
req               304 drivers/s390/char/sclp.c 	struct sclp_req *req;
req               315 drivers/s390/char/sclp.c 		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
req               316 drivers/s390/char/sclp.c 		if (!req->sccb)
req               318 drivers/s390/char/sclp.c 		rc = __sclp_start_request(req);
req               322 drivers/s390/char/sclp.c 		if (req->start_count > 1) {
req               331 drivers/s390/char/sclp.c 		list_del(&req->list);
req               332 drivers/s390/char/sclp.c 		if (req->callback) {
req               334 drivers/s390/char/sclp.c 			req->callback(req, req->callback_data);
req               341 drivers/s390/char/sclp.c static int __sclp_can_add_request(struct sclp_req *req)
req               343 drivers/s390/char/sclp.c 	if (req == &sclp_suspend_req || req == &sclp_init_req)
req               356 drivers/s390/char/sclp.c sclp_add_request(struct sclp_req *req)
req               362 drivers/s390/char/sclp.c 	if (!__sclp_can_add_request(req)) {
req               366 drivers/s390/char/sclp.c 	req->status = SCLP_REQ_QUEUED;
req               367 drivers/s390/char/sclp.c 	req->start_count = 0;
req               368 drivers/s390/char/sclp.c 	list_add_tail(&req->list, &sclp_req_queue);
req               370 drivers/s390/char/sclp.c 	if (req->queue_timeout) {
req               371 drivers/s390/char/sclp.c 		req->queue_expires = jiffies + req->queue_timeout * HZ;
req               373 drivers/s390/char/sclp.c 		    time_after(sclp_queue_timer.expires, req->queue_expires))
req               374 drivers/s390/char/sclp.c 			mod_timer(&sclp_queue_timer, req->queue_expires);
req               376 drivers/s390/char/sclp.c 		req->queue_expires = 0;
req               379 drivers/s390/char/sclp.c 	    req->list.prev == &sclp_req_queue) {
req               380 drivers/s390/char/sclp.c 		if (!req->sccb) {
req               381 drivers/s390/char/sclp.c 			list_del(&req->list);
req               385 drivers/s390/char/sclp.c 		rc = __sclp_start_request(req);
req               387 drivers/s390/char/sclp.c 			list_del(&req->list);
req               438 drivers/s390/char/sclp.c sclp_read_cb(struct sclp_req *req, void *data)
req               443 drivers/s390/char/sclp.c 	sccb = (struct sccb_header *) req->sccb;
req               444 drivers/s390/char/sclp.c 	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
req               476 drivers/s390/char/sclp.c 	struct sclp_req *req;
req               479 drivers/s390/char/sclp.c 		req = list_entry(l, struct sclp_req, list);
req               480 drivers/s390/char/sclp.c 		if (sccb == (u32) (addr_t) req->sccb)
req               481 drivers/s390/char/sclp.c 				return req;
req               492 drivers/s390/char/sclp.c 	struct sclp_req *req;
req               503 drivers/s390/char/sclp.c 		req = __sclp_find_req(finished_sccb);
req               504 drivers/s390/char/sclp.c 		if (req) {
req               506 drivers/s390/char/sclp.c 			list_del(&req->list);
req               507 drivers/s390/char/sclp.c 			req->status = SCLP_REQ_DONE;
req               508 drivers/s390/char/sclp.c 			if (req->callback) {
req               510 drivers/s390/char/sclp.c 				req->callback(req, req->callback_data);
req               299 drivers/s390/char/sclp.h int sclp_add_request(struct sclp_req *req);
req                33 drivers/s390/char/sclp_cmd.c static void sclp_sync_callback(struct sclp_req *req, void *data)
req                72 drivers/s390/char/sclp_cpi_sys.c static void cpi_callback(struct sclp_req *req, void *data)
req                81 drivers/s390/char/sclp_cpi_sys.c 	struct sclp_req *req;
req                85 drivers/s390/char/sclp_cpi_sys.c 	req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
req                86 drivers/s390/char/sclp_cpi_sys.c 	if (!req)
req                90 drivers/s390/char/sclp_cpi_sys.c 		kfree(req);
req               113 drivers/s390/char/sclp_cpi_sys.c 	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req               114 drivers/s390/char/sclp_cpi_sys.c 	req->sccb = sccb;
req               115 drivers/s390/char/sclp_cpi_sys.c 	req->status = SCLP_REQ_FILLED;
req               116 drivers/s390/char/sclp_cpi_sys.c 	req->callback = cpi_callback;
req               117 drivers/s390/char/sclp_cpi_sys.c 	return req;
req               120 drivers/s390/char/sclp_cpi_sys.c static void cpi_free_req(struct sclp_req *req)
req               122 drivers/s390/char/sclp_cpi_sys.c 	free_page((unsigned long) req->sccb);
req               123 drivers/s390/char/sclp_cpi_sys.c 	kfree(req);
req               129 drivers/s390/char/sclp_cpi_sys.c 	struct sclp_req *req;
req               141 drivers/s390/char/sclp_cpi_sys.c 	req = cpi_prepare_req();
req               142 drivers/s390/char/sclp_cpi_sys.c 	if (IS_ERR(req)) {
req               143 drivers/s390/char/sclp_cpi_sys.c 		rc = PTR_ERR(req);
req               148 drivers/s390/char/sclp_cpi_sys.c 	req->callback_data = &completion;
req               151 drivers/s390/char/sclp_cpi_sys.c 	rc = sclp_add_request(req);
req               157 drivers/s390/char/sclp_cpi_sys.c 	if (req->status != SCLP_REQ_DONE) {
req               158 drivers/s390/char/sclp_cpi_sys.c 		pr_warn("request failed (status=0x%02x)\n", req->status);
req               163 drivers/s390/char/sclp_cpi_sys.c 	response = ((struct cpi_sccb *) req->sccb)->header.response_code;
req               170 drivers/s390/char/sclp_cpi_sys.c 	cpi_free_req(req);
req                35 drivers/s390/char/sclp_ftp.c static void sclp_ftp_txcb(struct sclp_req *req, void *data)
req                41 drivers/s390/char/sclp_ftp.c 		 req->sccb, 24, req->sccb);
req                89 drivers/s390/char/sclp_ftp.c 	struct sclp_req *req;
req                93 drivers/s390/char/sclp_ftp.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req                95 drivers/s390/char/sclp_ftp.c 	if (!req || !sccb) {
req               124 drivers/s390/char/sclp_ftp.c 	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req               125 drivers/s390/char/sclp_ftp.c 	req->sccb = sccb;
req               126 drivers/s390/char/sclp_ftp.c 	req->status = SCLP_REQ_FILLED;
req               127 drivers/s390/char/sclp_ftp.c 	req->callback = sclp_ftp_txcb;
req               128 drivers/s390/char/sclp_ftp.c 	req->callback_data = &completion;
req               132 drivers/s390/char/sclp_ftp.c 	rc = sclp_add_request(req);
req               149 drivers/s390/char/sclp_ftp.c 	if (req->status != SCLP_REQ_DONE ||
req               157 drivers/s390/char/sclp_ftp.c 	kfree(req);
req               103 drivers/s390/char/sclp_pci.c static void sclp_pci_callback(struct sclp_req *req, void *data)
req               134 drivers/s390/char/sclp_pci.c 	struct sclp_req req;
req               157 drivers/s390/char/sclp_pci.c 	memset(&req, 0, sizeof(req));
req               158 drivers/s390/char/sclp_pci.c 	req.callback_data = &completion;
req               159 drivers/s390/char/sclp_pci.c 	req.callback = sclp_pci_callback;
req               160 drivers/s390/char/sclp_pci.c 	req.command = SCLP_CMDW_WRITE_EVENT_DATA;
req               161 drivers/s390/char/sclp_pci.c 	req.status = SCLP_REQ_FILLED;
req               162 drivers/s390/char/sclp_pci.c 	req.sccb = sccb;
req               175 drivers/s390/char/sclp_pci.c 	ret = sclp_add_request(&req);
req               180 drivers/s390/char/sclp_pci.c 	if (req.status != SCLP_REQ_DONE) {
req               182 drivers/s390/char/sclp_pci.c 			req.status);
req                59 drivers/s390/char/sclp_sdias.c static int sdias_sclp_send(struct sclp_req *req)
req                67 drivers/s390/char/sclp_sdias.c 		rc = sclp_add_request(req);
req                77 drivers/s390/char/sclp_sdias.c 		if (req->status == SCLP_REQ_FAILED) {
req               144 drivers/s390/char/tape_core.c 		struct tape_request *req;
req               146 drivers/s390/char/tape_core.c 		req = list_entry(tdev->req_queue.next, struct tape_request,
req               148 drivers/s390/char/tape_core.c 		rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
req              1155 drivers/s390/char/tape_core.c 		struct tape_request *req;
req              1156 drivers/s390/char/tape_core.c 		req = list_entry(device->req_queue.next,
req              1158 drivers/s390/char/tape_core.c 		if (req->status == TAPE_REQUEST_LONG_BUSY) {
req                43 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req                45 drivers/s390/cio/ccwreq.c 	if (!req->singlepath) {
req                46 drivers/s390/cio/ccwreq.c 		req->mask = 0;
req                49 drivers/s390/cio/ccwreq.c 	req->retries	= req->maxretries;
req                50 drivers/s390/cio/ccwreq.c 	req->mask	= lpm_adjust(req->mask >> 1, req->lpm);
req                52 drivers/s390/cio/ccwreq.c 	return req->mask;
req                60 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req                62 drivers/s390/cio/ccwreq.c 	if (req->done)
req                64 drivers/s390/cio/ccwreq.c 	req->done = 1;
req                67 drivers/s390/cio/ccwreq.c 	if (rc && rc != -ENODEV && req->drc)
req                68 drivers/s390/cio/ccwreq.c 		rc = req->drc;
req                69 drivers/s390/cio/ccwreq.c 	req->callback(cdev, req->data, rc);
req                77 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req                79 drivers/s390/cio/ccwreq.c 	struct ccw1 *cp = req->cp;
req                82 drivers/s390/cio/ccwreq.c 	while (req->mask) {
req                83 drivers/s390/cio/ccwreq.c 		if (req->retries-- == 0) {
req                90 drivers/s390/cio/ccwreq.c 		rc = cio_start(sch, cp, (u8) req->mask);
req                93 drivers/s390/cio/ccwreq.c 			ccw_device_set_timeout(cdev, req->timeout);
req               122 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req               124 drivers/s390/cio/ccwreq.c 	if (req->singlepath) {
req               126 drivers/s390/cio/ccwreq.c 		req->mask = 0x8080;
req               128 drivers/s390/cio/ccwreq.c 		req->mask = req->lpm;
req               130 drivers/s390/cio/ccwreq.c 	req->retries	= req->maxretries;
req               131 drivers/s390/cio/ccwreq.c 	req->mask	= lpm_adjust(req->mask, req->lpm);
req               132 drivers/s390/cio/ccwreq.c 	req->drc	= 0;
req               133 drivers/s390/cio/ccwreq.c 	req->done	= 0;
req               134 drivers/s390/cio/ccwreq.c 	req->cancel	= 0;
req               135 drivers/s390/cio/ccwreq.c 	if (!req->mask)
req               154 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req               157 drivers/s390/cio/ccwreq.c 	if (req->done)
req               159 drivers/s390/cio/ccwreq.c 	req->cancel = 1;
req               234 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req               242 drivers/s390/cio/ccwreq.c 	data.retries	= req->retries;
req               243 drivers/s390/cio/ccwreq.c 	data.lpm	= (u8) req->mask;
req               258 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req               264 drivers/s390/cio/ccwreq.c 	if (req->filter)
req               265 drivers/s390/cio/ccwreq.c 		status = req->filter(cdev, req->data, irb, status);
req               283 drivers/s390/cio/ccwreq.c 		if (req->cancel) {
req               290 drivers/s390/cio/ccwreq.c 	if (!req->check)
req               292 drivers/s390/cio/ccwreq.c 	switch (req->check(cdev, req->data)) {
req               330 drivers/s390/cio/ccwreq.c 	struct ccw_request *req = &cdev->private->req;
req               339 drivers/s390/cio/ccwreq.c 				dev_name(&cdev->dev), req->timeout / HZ,
req               348 drivers/s390/cio/ccwreq.c 		req->drc = -ETIME;
req               748 drivers/s390/cio/chsc_sch.c 	sccl_area->fmt = ccl->req.fmt;
req               749 drivers/s390/cio/chsc_sch.c 	sccl_area->ctype = ccl->req.ctype;
req               754 drivers/s390/cio/chsc_sch.c 		chpid_parm->m = ccl->req.chpid.m;
req               755 drivers/s390/cio/chsc_sch.c 		chpid_parm->cssid = ccl->req.chpid.chp.cssid;
req               756 drivers/s390/cio/chsc_sch.c 		chpid_parm->chpid = ccl->req.chpid.chp.id;
req               761 drivers/s390/cio/chsc_sch.c 		cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
req               762 drivers/s390/cio/chsc_sch.c 		cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
req               848 drivers/s390/cio/chsc_sch.c 	sdcal_area->atype = dcal->req.atype;
req               849 drivers/s390/cio/chsc_sch.c 	sdcal_area->fmt = dcal->req.fmt;
req               850 drivers/s390/cio/chsc_sch.c 	memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
req               204 drivers/s390/cio/device_id.c 	struct ccw_request *req = &cdev->private->req;
req               217 drivers/s390/cio/device_id.c 	memset(req, 0, sizeof(*req));
req               218 drivers/s390/cio/device_id.c 	req->cp		= cp;
req               219 drivers/s390/cio/device_id.c 	req->timeout	= SENSE_ID_TIMEOUT;
req               220 drivers/s390/cio/device_id.c 	req->maxretries	= SENSE_ID_RETRIES;
req               221 drivers/s390/cio/device_id.c 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
req               222 drivers/s390/cio/device_id.c 	req->check	= snsid_check;
req               223 drivers/s390/cio/device_id.c 	req->callback	= snsid_callback;
req                59 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req                66 drivers/s390/cio/device_pgid.c 	req->cp		= cp;
req                75 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req                77 drivers/s390/cio/device_pgid.c 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
req                79 drivers/s390/cio/device_pgid.c 	if (!req->lpm)
req               107 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               111 drivers/s390/cio/device_pgid.c 		sch->vpm |= req->lpm;
req               114 drivers/s390/cio/device_pgid.c 		cdev->private->path_noirq_mask |= req->lpm;
req               117 drivers/s390/cio/device_pgid.c 		cdev->private->path_notoper_mask |= req->lpm;
req               123 drivers/s390/cio/device_pgid.c 	req->lpm >>= 1;
req               136 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               138 drivers/s390/cio/device_pgid.c 	int i = pathmask_to_pos(req->lpm);
req               146 drivers/s390/cio/device_pgid.c 	req->cp		= cp;
req               171 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               179 drivers/s390/cio/device_pgid.c 	memset(req, 0, sizeof(*req));
req               180 drivers/s390/cio/device_pgid.c 	req->timeout	= PGID_TIMEOUT;
req               181 drivers/s390/cio/device_pgid.c 	req->maxretries	= PGID_RETRIES;
req               182 drivers/s390/cio/device_pgid.c 	req->lpm	= sch->schib.pmcw.pam;
req               183 drivers/s390/cio/device_pgid.c 	req->callback	= pgid_wipeout_callback;
req               197 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               201 drivers/s390/cio/device_pgid.c 	req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
req               202 drivers/s390/cio/device_pgid.c 	if (!req->lpm)
req               205 drivers/s390/cio/device_pgid.c 	if (req->lpm & sch->opm)
req               230 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               234 drivers/s390/cio/device_pgid.c 		sch->vpm |= req->lpm & sch->opm;
req               238 drivers/s390/cio/device_pgid.c 		cdev->private->path_noirq_mask |= req->lpm;
req               241 drivers/s390/cio/device_pgid.c 		cdev->private->path_notoper_mask |= req->lpm;
req               255 drivers/s390/cio/device_pgid.c 	req->lpm >>= 1;
req               268 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               271 drivers/s390/cio/device_pgid.c 	memset(req, 0, sizeof(*req));
req               272 drivers/s390/cio/device_pgid.c 	req->timeout	= PGID_TIMEOUT;
req               273 drivers/s390/cio/device_pgid.c 	req->maxretries	= PGID_RETRIES;
req               274 drivers/s390/cio/device_pgid.c 	req->lpm	= 0x80;
req               275 drivers/s390/cio/device_pgid.c 	req->singlepath	= 1;
req               276 drivers/s390/cio/device_pgid.c 	req->callback	= spid_callback;
req               438 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               440 drivers/s390/cio/device_pgid.c 	int i = pathmask_to_pos(req->lpm);
req               447 drivers/s390/cio/device_pgid.c 	req->cp		= cp;
req               456 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               459 drivers/s390/cio/device_pgid.c 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
req               461 drivers/s390/cio/device_pgid.c 	if (!req->lpm)
req               482 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               486 drivers/s390/cio/device_pgid.c 		cdev->private->pgid_valid_mask |= req->lpm;
req               490 drivers/s390/cio/device_pgid.c 		cdev->private->path_noirq_mask |= req->lpm;
req               493 drivers/s390/cio/device_pgid.c 		cdev->private->path_notoper_mask |= req->lpm;
req               499 drivers/s390/cio/device_pgid.c 	req->lpm >>= 1;
req               513 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               527 drivers/s390/cio/device_pgid.c 	memset(req, 0, sizeof(*req));
req               528 drivers/s390/cio/device_pgid.c 	req->timeout	= PGID_TIMEOUT;
req               529 drivers/s390/cio/device_pgid.c 	req->maxretries	= PGID_RETRIES;
req               530 drivers/s390/cio/device_pgid.c 	req->lpm	= 0x80;
req               531 drivers/s390/cio/device_pgid.c 	req->singlepath	= 1;
req               535 drivers/s390/cio/device_pgid.c 		req->callback	= snid_callback;
req               540 drivers/s390/cio/device_pgid.c 		req->filter	= nop_filter;
req               541 drivers/s390/cio/device_pgid.c 		req->callback	= nop_callback;
req               604 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               610 drivers/s390/cio/device_pgid.c 	memset(req, 0, sizeof(*req));
req               611 drivers/s390/cio/device_pgid.c 	req->timeout	= PGID_TIMEOUT;
req               612 drivers/s390/cio/device_pgid.c 	req->maxretries	= PGID_RETRIES;
req               613 drivers/s390/cio/device_pgid.c 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
req               614 drivers/s390/cio/device_pgid.c 	req->singlepath	= 1;
req               615 drivers/s390/cio/device_pgid.c 	req->callback	= disband_callback;
req               630 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               641 drivers/s390/cio/device_pgid.c 	req->cp = cp;
req               665 drivers/s390/cio/device_pgid.c 	struct ccw_request *req = &cdev->private->req;
req               670 drivers/s390/cio/device_pgid.c 	memset(req, 0, sizeof(*req));
req               671 drivers/s390/cio/device_pgid.c 	req->timeout	= PGID_TIMEOUT;
req               672 drivers/s390/cio/device_pgid.c 	req->maxretries	= PGID_RETRIES;
req               673 drivers/s390/cio/device_pgid.c 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
req               674 drivers/s390/cio/device_pgid.c 	req->data	= data;
req               675 drivers/s390/cio/device_pgid.c 	req->callback	= stlck_callback;
req               136 drivers/s390/cio/io_sch.h 	struct ccw_request req;		/* internal I/O request */
req               549 drivers/s390/crypto/zcrypt_msgtype6.c 			   (char __force __user *)xcRB->req, xcRB->req_len)) {
req               215 drivers/s390/net/ism.h 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8);
req               218 drivers/s390/net/ism.h 		__zpci_load(data, req, offset);
req               229 drivers/s390/net/ism.h 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len);
req               232 drivers/s390/net/ism.h 		__zpci_store_block(data, req, offset);
req               239 drivers/s390/net/ism.h 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
req               241 drivers/s390/net/ism.h 	return __zpci_store_block(data, req, dmb_req);
req                38 drivers/s390/net/ism_drv.c 	struct ism_req_hdr *req = cmd;
req                41 drivers/s390/net/ism_drv.c 	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
req                42 drivers/s390/net/ism_drv.c 	__ism_write_cmd(ism, req, 0, sizeof(*req));
req                70 drivers/s390/scsi/zfcp_dbf.c void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
req                72 drivers/s390/scsi/zfcp_dbf.c 	struct zfcp_dbf *dbf = req->adapter->dbf;
req                73 drivers/s390/scsi/zfcp_dbf.c 	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
req                74 drivers/s390/scsi/zfcp_dbf.c 	struct fsf_qtcb_header *q_head = &req->qtcb->header;
req                83 drivers/s390/scsi/zfcp_dbf.c 	rec->fsf_req_id = req->req_id;
req                84 drivers/s390/scsi/zfcp_dbf.c 	rec->fsf_req_status = req->status;
req                87 drivers/s390/scsi/zfcp_dbf.c 	rec->u.res.req_issued = req->issued;
req               100 drivers/s390/scsi/zfcp_dbf.c 			  rec->pl_len, "fsf_res", req->req_id);
req               111 drivers/s390/scsi/zfcp_dbf.c void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
req               113 drivers/s390/scsi/zfcp_dbf.c 	struct zfcp_dbf *dbf = req->adapter->dbf;
req               114 drivers/s390/scsi/zfcp_dbf.c 	struct fsf_status_read_buffer *srb = req->data;
req               127 drivers/s390/scsi/zfcp_dbf.c 	rec->fsf_req_id = req->req_id;
req               128 drivers/s390/scsi/zfcp_dbf.c 	rec->fsf_req_status = req->status;
req               147 drivers/s390/scsi/zfcp_dbf.c 				  "fsf_uss", req->req_id);
req               158 drivers/s390/scsi/zfcp_dbf.c void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
req               160 drivers/s390/scsi/zfcp_dbf.c 	struct zfcp_dbf *dbf = req->adapter->dbf;
req               162 drivers/s390/scsi/zfcp_dbf.c 	struct fsf_status_read_buffer *sr_buf = req->data;
req               174 drivers/s390/scsi/zfcp_dbf.c 	rec->fsf_req_id = req->req_id;
req               175 drivers/s390/scsi/zfcp_dbf.c 	rec->fsf_req_status = req->status;
req               494 drivers/s390/scsi/zfcp_dbf.c 	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
req               495 drivers/s390/scsi/zfcp_dbf.c 	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
req               504 drivers/s390/scsi/zfcp_dbf.c 	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
req               304 drivers/s390/scsi/zfcp_dbf.h bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
req               306 drivers/s390/scsi/zfcp_dbf.h 	struct fsf_qtcb *qtcb = req->qtcb;
req               322 drivers/s390/scsi/zfcp_dbf.h void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
req               324 drivers/s390/scsi/zfcp_dbf.h 	if (debug_level_enabled(req->adapter->dbf->hba, level))
req               325 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_hba_fsf_res(tag, level, req);
req               333 drivers/s390/scsi/zfcp_dbf.h void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
req               335 drivers/s390/scsi/zfcp_dbf.h 	struct fsf_qtcb *qtcb = req->qtcb;
req               337 drivers/s390/scsi/zfcp_dbf.h 	if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
req               339 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
req               343 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
req               347 drivers/s390/scsi/zfcp_dbf.h 				      zfcp_dbf_hba_fsf_resp_suppress(req)
req               348 drivers/s390/scsi/zfcp_dbf.h 				      ? 5 : 1, req);
req               352 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
req               355 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
req               358 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
req               364 drivers/s390/scsi/zfcp_dbf.h 		   struct zfcp_fsf_req *req)
req               370 drivers/s390/scsi/zfcp_dbf.h 		zfcp_dbf_scsi_common(tag, level, scmd->device, scmd, req);
req               379 drivers/s390/scsi/zfcp_dbf.h void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
req               382 drivers/s390/scsi/zfcp_dbf.h 		_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
req               384 drivers/s390/scsi/zfcp_dbf.h 		_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
req               386 drivers/s390/scsi/zfcp_dbf.h 		_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
req               342 drivers/s390/scsi/zfcp_def.h static inline bool zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
req               344 drivers/s390/scsi/zfcp_def.h 	return req->qtcb == NULL;
req               567 drivers/s390/scsi/zfcp_erp.c 	struct zfcp_fsf_req *req;
req               573 drivers/s390/scsi/zfcp_erp.c 	req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
req               574 drivers/s390/scsi/zfcp_erp.c 	if (req && req->erp_action == act) {
req               577 drivers/s390/scsi/zfcp_erp.c 			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
req               579 drivers/s390/scsi/zfcp_erp.c 			req->erp_action = NULL;
req               583 drivers/s390/scsi/zfcp_erp.c 		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
req               291 drivers/s390/scsi/zfcp_fc.c static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
req               294 drivers/s390/scsi/zfcp_fc.c 	struct zfcp_adapter *adapter = req->adapter;
req               306 drivers/s390/scsi/zfcp_fc.c static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
req               311 drivers/s390/scsi/zfcp_fc.c 	status_buffer = (struct fsf_status_read_buffer *) req->data;
req               313 drivers/s390/scsi/zfcp_fc.c 	zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
req               316 drivers/s390/scsi/zfcp_fc.c static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
req               319 drivers/s390/scsi/zfcp_fc.c 		(struct fsf_status_read_buffer *)req->data;
req               323 drivers/s390/scsi/zfcp_fc.c 	zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
req               378 drivers/s390/scsi/zfcp_fc.c 	struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
req               386 drivers/s390/scsi/zfcp_fc.c 	fc_req->ct_els.req = &fc_req->sg_req;
req               544 drivers/s390/scsi/zfcp_fc.c 	fc_req->ct_els.req = &fc_req->sg_req;
req               546 drivers/s390/scsi/zfcp_fc.c 	sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
req               556 drivers/s390/scsi/zfcp_fc.c 	fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
req               557 drivers/s390/scsi/zfcp_fc.c 	fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
req               558 drivers/s390/scsi/zfcp_fc.c 	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
req               559 drivers/s390/scsi/zfcp_fc.c 	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
req               668 drivers/s390/scsi/zfcp_fc.c 	sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
req               678 drivers/s390/scsi/zfcp_fc.c 	struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
req               682 drivers/s390/scsi/zfcp_fc.c 	zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req               683 drivers/s390/scsi/zfcp_fc.c 	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
req               687 drivers/s390/scsi/zfcp_fc.c 	ct_els->req = &fc_req->sg_req;
req               833 drivers/s390/scsi/zfcp_fc.c 	struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
req               846 drivers/s390/scsi/zfcp_fc.c 	ct_els->req = &fc_req->sg_req;
req               878 drivers/s390/scsi/zfcp_fc.c 	struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
req               894 drivers/s390/scsi/zfcp_fc.c 	ct_els->req = &fc_req->sg_req;
req              1060 drivers/s390/scsi/zfcp_fc.c 	ct_els->req = job->request_payload.sg_list;
req               149 drivers/s390/scsi/zfcp_fc.h 			struct fc_els_adisc		req;
req               153 drivers/s390/scsi/zfcp_fc.h 			struct zfcp_fc_gid_pn_req	req;
req               158 drivers/s390/scsi/zfcp_fc.h 			struct zfcp_fc_gpn_ft_req	req;
req               161 drivers/s390/scsi/zfcp_fc.h 			struct zfcp_fc_gspn_req		req;
req               165 drivers/s390/scsi/zfcp_fc.h 			struct zfcp_fc_rspn_req		req;
req                78 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
req                80 drivers/s390/scsi/zfcp_fsf.c 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
req                82 drivers/s390/scsi/zfcp_fsf.c 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
req                83 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req                90 drivers/s390/scsi/zfcp_fsf.c void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
req                92 drivers/s390/scsi/zfcp_fsf.c 	if (likely(req->pool)) {
req                93 drivers/s390/scsi/zfcp_fsf.c 		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
req                94 drivers/s390/scsi/zfcp_fsf.c 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
req                95 drivers/s390/scsi/zfcp_fsf.c 		mempool_free(req, req->pool);
req                99 drivers/s390/scsi/zfcp_fsf.c 	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
req               100 drivers/s390/scsi/zfcp_fsf.c 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
req               101 drivers/s390/scsi/zfcp_fsf.c 	kfree(req);
req               104 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
req               107 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_status_read_buffer *sr_buf = req->data;
req               108 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               121 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
req               124 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               138 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               143 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               148 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               153 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               158 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               163 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               167 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               171 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               176 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               181 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               186 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               191 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               196 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req               204 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
req               206 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_status_read_buffer *sr_buf = req->data;
req               213 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_link_down_info_eval(req, ldi);
req               216 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_link_down_info_eval(req, NULL);
req               220 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
req               222 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               223 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_status_read_buffer *sr_buf = req->data;
req               225 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
req               226 drivers/s390/scsi/zfcp_fsf.c 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
req               228 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req               232 drivers/s390/scsi/zfcp_fsf.c 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
req               236 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_status_read_port_closed(req);
req               239 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fc_incoming_els(req);
req               244 drivers/s390/scsi/zfcp_fsf.c 		zfcp_dbf_hba_bit_err("fssrh_3", req);
req               255 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_status_read_link_down(req);
req               281 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req               287 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
req               289 drivers/s390/scsi/zfcp_fsf.c 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
req               298 drivers/s390/scsi/zfcp_fsf.c 		dev_err(&req->adapter->ccw_device->dev,
req               301 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_siosl(req->adapter);
req               302 drivers/s390/scsi/zfcp_fsf.c 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
req               306 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               309 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
req               311 drivers/s390/scsi/zfcp_fsf.c 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
req               314 drivers/s390/scsi/zfcp_fsf.c 	switch (req->qtcb->header.fsf_status) {
req               316 drivers/s390/scsi/zfcp_fsf.c 		dev_err(&req->adapter->ccw_device->dev,
req               318 drivers/s390/scsi/zfcp_fsf.c 			req->qtcb->header.fsf_command);
req               319 drivers/s390/scsi/zfcp_fsf.c 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
req               320 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               323 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_fsfstatus_qual_eval(req);
req               328 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
req               330 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               331 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb *qtcb = req->qtcb;
req               334 drivers/s390/scsi/zfcp_fsf.c 	zfcp_dbf_hba_fsf_response(req);
req               336 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
req               337 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               355 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               373 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
req               393 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               405 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
req               407 drivers/s390/scsi/zfcp_fsf.c 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
req               408 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_status_read_handler(req);
req               412 drivers/s390/scsi/zfcp_fsf.c 	del_timer(&req->timer);
req               413 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_protstatus_eval(req);
req               414 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_fsfstatus_eval(req);
req               415 drivers/s390/scsi/zfcp_fsf.c 	req->handler(req);
req               417 drivers/s390/scsi/zfcp_fsf.c 	if (req->erp_action)
req               418 drivers/s390/scsi/zfcp_fsf.c 		zfcp_erp_notify(req->erp_action, 0);
req               420 drivers/s390/scsi/zfcp_fsf.c 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
req               421 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req               423 drivers/s390/scsi/zfcp_fsf.c 		complete(&req->completion);
req               437 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req, *tmp;
req               443 drivers/s390/scsi/zfcp_fsf.c 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
req               444 drivers/s390/scsi/zfcp_fsf.c 		list_del(&req->list);
req               445 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
req               446 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_complete(req);
req               487 drivers/s390/scsi/zfcp_fsf.c static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
req               489 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
req               490 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               500 drivers/s390/scsi/zfcp_fsf.c 	if (req->data)
req               501 drivers/s390/scsi/zfcp_fsf.c 		memcpy(req->data, bottom, sizeof(*bottom));
req               518 drivers/s390/scsi/zfcp_fsf.c 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
req               554 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
req               556 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               557 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb *qtcb = req->qtcb;
req               561 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req               573 drivers/s390/scsi/zfcp_fsf.c 		if (zfcp_fsf_exchange_config_evaluate(req))
req               599 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_link_down_info_eval(req,
req               601 drivers/s390/scsi/zfcp_fsf.c 		if (zfcp_fsf_exchange_config_evaluate(req))
req               632 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
req               634 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               635 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
req               638 drivers/s390/scsi/zfcp_fsf.c 	if (req->data)
req               639 drivers/s390/scsi/zfcp_fsf.c 		memcpy(req->data, bottom, sizeof(*bottom));
req               654 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
req               656 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb *qtcb = req->qtcb;
req               658 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req               663 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_exchange_port_evaluate(req);
req               666 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_exchange_port_evaluate(req);
req               667 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_link_down_info_eval(req,
req               675 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req               678 drivers/s390/scsi/zfcp_fsf.c 		req = mempool_alloc(pool, GFP_ATOMIC);
req               680 drivers/s390/scsi/zfcp_fsf.c 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
req               682 drivers/s390/scsi/zfcp_fsf.c 	if (unlikely(!req))
req               685 drivers/s390/scsi/zfcp_fsf.c 	memset(req, 0, sizeof(*req));
req               686 drivers/s390/scsi/zfcp_fsf.c 	req->pool = pool;
req               687 drivers/s390/scsi/zfcp_fsf.c 	return req;
req               711 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
req               713 drivers/s390/scsi/zfcp_fsf.c 	if (unlikely(!req))
req               719 drivers/s390/scsi/zfcp_fsf.c 	INIT_LIST_HEAD(&req->list);
req               720 drivers/s390/scsi/zfcp_fsf.c 	timer_setup(&req->timer, NULL, 0);
req               721 drivers/s390/scsi/zfcp_fsf.c 	init_completion(&req->completion);
req               723 drivers/s390/scsi/zfcp_fsf.c 	req->adapter = adapter;
req               724 drivers/s390/scsi/zfcp_fsf.c 	req->req_id = adapter->req_no;
req               728 drivers/s390/scsi/zfcp_fsf.c 			req->qtcb = zfcp_fsf_qtcb_alloc(
req               731 drivers/s390/scsi/zfcp_fsf.c 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
req               733 drivers/s390/scsi/zfcp_fsf.c 		if (unlikely(!req->qtcb)) {
req               734 drivers/s390/scsi/zfcp_fsf.c 			zfcp_fsf_req_free(req);
req               738 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req               739 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->prefix.req_id = req->req_id;
req               740 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->prefix.ulp_info = 26;
req               741 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
req               742 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req               743 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->header.req_handle = req->req_id;
req               744 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->header.fsf_command = fsf_cmd;
req               747 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
req               748 drivers/s390/scsi/zfcp_fsf.c 			   req->qtcb, sizeof(struct fsf_qtcb));
req               750 drivers/s390/scsi/zfcp_fsf.c 	return req;
req               753 drivers/s390/scsi/zfcp_fsf.c static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
req               755 drivers/s390/scsi/zfcp_fsf.c 	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
req               756 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               758 drivers/s390/scsi/zfcp_fsf.c 	int req_id = req->req_id;
req               760 drivers/s390/scsi/zfcp_fsf.c 	zfcp_reqlist_add(adapter->req_list, req);
req               762 drivers/s390/scsi/zfcp_fsf.c 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
req               763 drivers/s390/scsi/zfcp_fsf.c 	req->issued = get_tod_clock();
req               764 drivers/s390/scsi/zfcp_fsf.c 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
req               765 drivers/s390/scsi/zfcp_fsf.c 		del_timer(&req->timer);
req               800 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req               809 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
req               812 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req               813 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req               824 drivers/s390/scsi/zfcp_fsf.c 	req->data = sr_buf;
req               826 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
req               827 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req               829 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req               837 drivers/s390/scsi/zfcp_fsf.c 	req->data = NULL;
req               840 drivers/s390/scsi/zfcp_fsf.c 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
req               841 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req               847 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
req               849 drivers/s390/scsi/zfcp_fsf.c 	struct scsi_device *sdev = req->data;
req               851 drivers/s390/scsi/zfcp_fsf.c 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
req               853 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req               858 drivers/s390/scsi/zfcp_fsf.c 	switch (req->qtcb->header.fsf_status) {
req               863 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               869 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               873 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
req               880 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               886 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               894 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               899 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
req               912 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req = NULL;
req               921 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
req               924 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req               925 drivers/s390/scsi/zfcp_fsf.c 		req = NULL;
req               933 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req               935 drivers/s390/scsi/zfcp_fsf.c 	req->data = sdev;
req               936 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_abort_fcp_command_handler;
req               937 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req               938 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req               939 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
req               941 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
req               942 drivers/s390/scsi/zfcp_fsf.c 	if (!zfcp_fsf_req_send(req)) {
req               948 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req               949 drivers/s390/scsi/zfcp_fsf.c 	req = NULL;
req               952 drivers/s390/scsi/zfcp_fsf.c 	return req;
req               955 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
req               957 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req               958 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_ct_els *ct = req->data;
req               959 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req               963 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req               969 drivers/s390/scsi/zfcp_fsf.c 		zfcp_dbf_san_res("fsscth2", req);
req               972 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_class_not_supp(req);
req               978 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               983 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req               993 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1012 drivers/s390/scsi/zfcp_fsf.c static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
req              1016 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req              1018 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb *qtcb = req->qtcb;
req              1022 drivers/s390/scsi/zfcp_fsf.c 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
req              1026 drivers/s390/scsi/zfcp_fsf.c 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
req              1031 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
req              1032 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1033 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
req              1039 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
req              1047 drivers/s390/scsi/zfcp_fsf.c 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
req              1052 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1053 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
req              1055 drivers/s390/scsi/zfcp_fsf.c 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
req              1060 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1065 drivers/s390/scsi/zfcp_fsf.c static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
req              1072 drivers/s390/scsi/zfcp_fsf.c 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
req              1079 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
req              1080 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.support.timeout = timeout;
req              1081 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
req              1098 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1105 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
req              1108 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1109 drivers/s390/scsi/zfcp_fsf.c 		ret = PTR_ERR(req);
req              1113 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1114 drivers/s390/scsi/zfcp_fsf.c 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
req              1118 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_send_ct_handler;
req              1119 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = wka_port->handle;
req              1121 drivers/s390/scsi/zfcp_fsf.c 	req->data = ct;
req              1123 drivers/s390/scsi/zfcp_fsf.c 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
req              1125 drivers/s390/scsi/zfcp_fsf.c 	ret = zfcp_fsf_req_send(req);
req              1133 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req              1139 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
req              1141 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_ct_els *send_els = req->data;
req              1142 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req              1146 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              1152 drivers/s390/scsi/zfcp_fsf.c 		zfcp_dbf_san_res("fsselh1", req);
req              1155 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_class_not_supp(req);
req              1162 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1175 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1193 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1201 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
req              1204 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1205 drivers/s390/scsi/zfcp_fsf.c 		ret = PTR_ERR(req);
req              1209 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1212 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
req              1214 drivers/s390/scsi/zfcp_fsf.c 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
req              1219 drivers/s390/scsi/zfcp_fsf.c 	hton24(req->qtcb->bottom.support.d_id, d_id);
req              1220 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_send_els_handler;
req              1222 drivers/s390/scsi/zfcp_fsf.c 	req->data = els;
req              1224 drivers/s390/scsi/zfcp_fsf.c 	zfcp_dbf_san_req("fssels1", req, d_id);
req              1226 drivers/s390/scsi/zfcp_fsf.c 	ret = zfcp_fsf_req_send(req);
req              1234 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req              1242 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1250 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
req              1254 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1255 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1259 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1260 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1262 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.config.feature_selection =
req              1265 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              1266 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_exchange_config_data_handler;
req              1267 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              1269 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              1270 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1272 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1284 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req = NULL;
req              1291 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
req              1294 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1295 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1299 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1300 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_exchange_config_data_handler;
req              1302 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.config.feature_selection =
req              1307 drivers/s390/scsi/zfcp_fsf.c 		req->data = data;
req              1309 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
req              1310 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1314 drivers/s390/scsi/zfcp_fsf.c 		wait_for_completion(&req->completion);
req              1317 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req              1333 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1343 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
req              1347 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1348 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1352 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1353 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1355 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_exchange_port_data_handler;
req              1356 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              1357 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              1359 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              1360 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1362 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1380 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req = NULL;
req              1390 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
req              1393 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1394 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1399 drivers/s390/scsi/zfcp_fsf.c 		req->data = data;
req              1401 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1403 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_exchange_port_data_handler;
req              1404 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
req              1405 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1410 drivers/s390/scsi/zfcp_fsf.c 		wait_for_completion(&req->completion);
req              1413 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req              1422 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
req              1424 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_port *port = req->data;
req              1425 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req              1428 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              1435 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req              1441 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1450 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1475 drivers/s390/scsi/zfcp_fsf.c 		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
req              1476 drivers/s390/scsi/zfcp_fsf.c 		if (req->qtcb->bottom.support.els1_length >=
req              1481 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1498 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1505 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
req              1509 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1510 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1514 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1515 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1517 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_open_port_handler;
req              1518 drivers/s390/scsi/zfcp_fsf.c 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
req              1519 drivers/s390/scsi/zfcp_fsf.c 	req->data = port;
req              1520 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              1521 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              1524 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              1525 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1527 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1537 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
req              1539 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_port *port = req->data;
req              1541 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              1544 drivers/s390/scsi/zfcp_fsf.c 	switch (req->qtcb->header.fsf_status) {
req              1547 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1565 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1572 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
req              1576 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1577 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1581 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1582 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1584 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_close_port_handler;
req              1585 drivers/s390/scsi/zfcp_fsf.c 	req->data = erp_action->port;
req              1586 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              1587 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = erp_action->port->handle;
req              1588 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              1590 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              1591 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1593 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1602 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
req              1604 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fc_wka_port *wka_port = req->data;
req              1605 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req              1607 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
req              1614 drivers/s390/scsi/zfcp_fsf.c 		dev_warn(&req->adapter->ccw_device->dev,
req              1618 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1639 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1647 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
req              1651 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1652 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1656 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1657 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1659 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_open_wka_port_handler;
req              1660 drivers/s390/scsi/zfcp_fsf.c 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req              1661 drivers/s390/scsi/zfcp_fsf.c 	req->data = wka_port;
req              1663 drivers/s390/scsi/zfcp_fsf.c 	req_id = req->req_id;
req              1665 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
req              1666 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1668 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1677 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
req              1679 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fc_wka_port *wka_port = req->data;
req              1681 drivers/s390/scsi/zfcp_fsf.c 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
req              1682 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1698 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1706 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
req              1710 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1711 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1715 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1716 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1718 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_close_wka_port_handler;
req              1719 drivers/s390/scsi/zfcp_fsf.c 	req->data = wka_port;
req              1720 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = wka_port->handle;
req              1722 drivers/s390/scsi/zfcp_fsf.c 	req_id = req->req_id;
req              1724 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
req              1725 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1727 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1736 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
req              1738 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_port *port = req->data;
req              1739 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req              1742 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              1748 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1761 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1768 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1793 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1800 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
req              1804 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1805 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1809 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1810 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1812 drivers/s390/scsi/zfcp_fsf.c 	req->data = erp_action->port;
req              1813 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = erp_action->port->handle;
req              1814 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              1815 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_close_physical_port_handler;
req              1816 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              1818 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              1819 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1821 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1830 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
req              1832 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_adapter *adapter = req->adapter;
req              1833 drivers/s390/scsi/zfcp_fsf.c 	struct scsi_device *sdev = req->data;
req              1835 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req              1838 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              1859 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1873 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1884 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1892 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1913 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              1920 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
req              1924 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              1925 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              1929 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              1930 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              1932 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = erp_action->port->handle;
req              1933 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
req              1934 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_open_lun_handler;
req              1935 drivers/s390/scsi/zfcp_fsf.c 	req->data = erp_action->sdev;
req              1936 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              1937 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              1940 drivers/s390/scsi/zfcp_fsf.c 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
req              1942 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              1943 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              1945 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              1954 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
req              1956 drivers/s390/scsi/zfcp_fsf.c 	struct scsi_device *sdev = req->data;
req              1959 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              1964 drivers/s390/scsi/zfcp_fsf.c 	switch (req->qtcb->header.fsf_status) {
req              1967 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1971 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1978 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              1981 drivers/s390/scsi/zfcp_fsf.c 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
req              1986 drivers/s390/scsi/zfcp_fsf.c 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2005 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              2012 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
req              2016 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              2017 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              2021 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              2022 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              2024 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = erp_action->port->handle;
req              2025 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req              2026 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_close_lun_handler;
req              2027 drivers/s390/scsi/zfcp_fsf.c 	req->data = erp_action->sdev;
req              2028 drivers/s390/scsi/zfcp_fsf.c 	req->erp_action = erp_action;
req              2029 drivers/s390/scsi/zfcp_fsf.c 	erp_action->fsf_req_id = req->req_id;
req              2031 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_erp_timer(req);
req              2032 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              2034 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_req_free(req);
req              2050 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
req              2056 drivers/s390/scsi/zfcp_fsf.c 	int ticks = req->adapter->timer_ticks;
req              2058 drivers/s390/scsi/zfcp_fsf.c 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
req              2062 drivers/s390/scsi/zfcp_fsf.c 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
req              2065 drivers/s390/scsi/zfcp_fsf.c 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
req              2067 drivers/s390/scsi/zfcp_fsf.c 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
req              2068 drivers/s390/scsi/zfcp_fsf.c 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
req              2074 drivers/s390/scsi/zfcp_fsf.c 		switch (req->qtcb->bottom.io.data_direction) {
req              2108 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
req              2112 drivers/s390/scsi/zfcp_fsf.c 	struct fsf_qtcb_header *header = &req->qtcb->header;
req              2114 drivers/s390/scsi/zfcp_fsf.c 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
req              2122 drivers/s390/scsi/zfcp_fsf.c 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
req              2123 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2128 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2131 drivers/s390/scsi/zfcp_fsf.c 		zfcp_fsf_class_not_supp(req);
req              2134 drivers/s390/scsi/zfcp_fsf.c 		dev_err(&req->adapter->ccw_device->dev,
req              2137 drivers/s390/scsi/zfcp_fsf.c 			req->qtcb->bottom.io.data_direction,
req              2140 drivers/s390/scsi/zfcp_fsf.c 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
req              2141 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2144 drivers/s390/scsi/zfcp_fsf.c 		dev_err(&req->adapter->ccw_device->dev,
req              2146 drivers/s390/scsi/zfcp_fsf.c 			req->qtcb->bottom.io.fcp_cmnd_length);
req              2147 drivers/s390/scsi/zfcp_fsf.c 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
req              2148 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2155 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2161 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2167 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
req              2172 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
req              2178 drivers/s390/scsi/zfcp_fsf.c 	read_lock_irqsave(&req->adapter->abort_lock, flags);
req              2180 drivers/s390/scsi/zfcp_fsf.c 	scpnt = req->data;
req              2182 drivers/s390/scsi/zfcp_fsf.c 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
req              2186 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
req              2188 drivers/s390/scsi/zfcp_fsf.c 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
req              2193 drivers/s390/scsi/zfcp_fsf.c 	switch (req->qtcb->header.fsf_status) {
req              2209 drivers/s390/scsi/zfcp_fsf.c 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
req              2213 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_trace(req, scpnt);
req              2214 drivers/s390/scsi/zfcp_fsf.c 	zfcp_dbf_scsi_result(scpnt, req);
req              2224 drivers/s390/scsi/zfcp_fsf.c 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
req              2271 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req;
req              2295 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
req              2298 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              2299 drivers/s390/scsi/zfcp_fsf.c 		retval = PTR_ERR(req);
req              2303 drivers/s390/scsi/zfcp_fsf.c 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
req              2305 drivers/s390/scsi/zfcp_fsf.c 	io = &req->qtcb->bottom.io;
req              2306 drivers/s390/scsi/zfcp_fsf.c 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req              2307 drivers/s390/scsi/zfcp_fsf.c 	req->data = scsi_cmnd;
req              2308 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_fcp_cmnd_handler;
req              2309 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req              2310 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req              2323 drivers/s390/scsi/zfcp_fsf.c 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
req              2328 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
req              2330 drivers/s390/scsi/zfcp_fsf.c 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
req              2338 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
req              2343 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
req              2345 drivers/s390/scsi/zfcp_fsf.c 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
req              2347 drivers/s390/scsi/zfcp_fsf.c 	retval = zfcp_fsf_req_send(req);
req              2355 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req              2362 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
req              2364 drivers/s390/scsi/zfcp_fsf.c 	struct scsi_device *sdev = req->data;
req              2368 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_fcp_handler_common(req, sdev);
req              2370 drivers/s390/scsi/zfcp_fsf.c 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
req              2374 drivers/s390/scsi/zfcp_fsf.c 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
req              2375 drivers/s390/scsi/zfcp_fsf.c 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
req              2388 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req = NULL;
req              2401 drivers/s390/scsi/zfcp_fsf.c 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
req              2405 drivers/s390/scsi/zfcp_fsf.c 	if (IS_ERR(req)) {
req              2406 drivers/s390/scsi/zfcp_fsf.c 		req = NULL;
req              2410 drivers/s390/scsi/zfcp_fsf.c 	req->data = sdev;
req              2412 drivers/s390/scsi/zfcp_fsf.c 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req              2413 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req              2414 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req              2415 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
req              2416 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req              2417 drivers/s390/scsi/zfcp_fsf.c 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
req              2419 drivers/s390/scsi/zfcp_fsf.c 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req              2421 drivers/s390/scsi/zfcp_fsf.c 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
req              2424 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
req              2425 drivers/s390/scsi/zfcp_fsf.c 	if (!zfcp_fsf_req_send(req)) {
req              2430 drivers/s390/scsi/zfcp_fsf.c 	zfcp_fsf_req_free(req);
req              2431 drivers/s390/scsi/zfcp_fsf.c 	req = NULL;
req              2434 drivers/s390/scsi/zfcp_fsf.c 	return req;
req               450 drivers/s390/scsi/zfcp_fsf.h 	struct scatterlist *req;
req                86 drivers/s390/scsi/zfcp_reqlist.h 	struct zfcp_fsf_req *req;
req                90 drivers/s390/scsi/zfcp_reqlist.h 	list_for_each_entry(req, &rl->buckets[i], list)
req                91 drivers/s390/scsi/zfcp_reqlist.h 		if (req->req_id == req_id)
req                92 drivers/s390/scsi/zfcp_reqlist.h 			return req;
req               108 drivers/s390/scsi/zfcp_reqlist.h 	struct zfcp_fsf_req *req;
req               111 drivers/s390/scsi/zfcp_reqlist.h 	req = _zfcp_reqlist_find(rl, req_id);
req               114 drivers/s390/scsi/zfcp_reqlist.h 	return req;
req               133 drivers/s390/scsi/zfcp_reqlist.h 	struct zfcp_fsf_req *req;
req               136 drivers/s390/scsi/zfcp_reqlist.h 	req = _zfcp_reqlist_find(rl, req_id);
req               137 drivers/s390/scsi/zfcp_reqlist.h 	if (req)
req               138 drivers/s390/scsi/zfcp_reqlist.h 		list_del(&req->list);
req               141 drivers/s390/scsi/zfcp_reqlist.h 	return req;
req               155 drivers/s390/scsi/zfcp_reqlist.h 				    struct zfcp_fsf_req *req)
req               160 drivers/s390/scsi/zfcp_reqlist.h 	i = zfcp_reqlist_hash(req->req_id);
req               163 drivers/s390/scsi/zfcp_reqlist.h 	list_add_tail(&req->list, &rl->buckets[i]);
req               201 drivers/s390/scsi/zfcp_reqlist.h 	struct zfcp_fsf_req *req;
req               207 drivers/s390/scsi/zfcp_reqlist.h 		list_for_each_entry(req, &rl->buckets[i], list)
req               208 drivers/s390/scsi/zfcp_reqlist.h 			f(req, data);
req               739 drivers/scsi/be2iscsi/be_cmds.c 	struct be_cmd_req_eq_create *req = embedded_payload(wrb);
req               747 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               749 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req               750 drivers/scsi/be2iscsi/be_cmds.c 			OPCODE_COMMON_EQ_CREATE, sizeof(*req));
req               752 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
req               754 drivers/scsi/be2iscsi/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
req               756 drivers/scsi/be2iscsi/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
req               757 drivers/scsi/be2iscsi/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
req               758 drivers/scsi/be2iscsi/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
req               760 drivers/scsi/be2iscsi/be_cmds.c 	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
req               762 drivers/scsi/be2iscsi/be_cmds.c 	be_dws_cpu_to_le(req->context, sizeof(req->context));
req               764 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req               780 drivers/scsi/be2iscsi/be_cmds.c 	struct be_cmd_req_cq_create *req = embedded_payload(wrb);
req               784 drivers/scsi/be2iscsi/be_cmds.c 	void *ctxt = &req->context;
req               790 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               792 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req               793 drivers/scsi/be2iscsi/be_cmds.c 			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req               795 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
req               810 drivers/scsi/be2iscsi/be_cmds.c 		req->hdr.version = MBX_CMD_VER2;
req               811 drivers/scsi/be2iscsi/be_cmds.c 		req->page_size = 1;
req               824 drivers/scsi/be2iscsi/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req               826 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req               855 drivers/scsi/be2iscsi/be_cmds.c 	struct be_cmd_req_mcc_create_ext *req;
req               865 drivers/scsi/be2iscsi/be_cmds.c 	req = embedded_payload(wrb);
req               866 drivers/scsi/be2iscsi/be_cmds.c 	ctxt = &req->context;
req               868 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               870 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req               871 drivers/scsi/be2iscsi/be_cmds.c 			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req               873 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req               874 drivers/scsi/be2iscsi/be_cmds.c 	req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
req               875 drivers/scsi/be2iscsi/be_cmds.c 	req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
req               876 drivers/scsi/be2iscsi/be_cmds.c 	req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
req               885 drivers/scsi/be2iscsi/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req               887 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req               904 drivers/scsi/be2iscsi/be_cmds.c 	struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
req               915 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               946 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
req               948 drivers/scsi/be2iscsi/be_cmds.c 		req->id = cpu_to_le16(q->id);
req               981 drivers/scsi/be2iscsi/be_cmds.c 	struct be_defq_create_req *req = embedded_payload(wrb);
req               984 drivers/scsi/be2iscsi/be_cmds.c 	void *ctxt = &req->context;
req               990 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               992 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req               993 drivers/scsi/be2iscsi/be_cmds.c 			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
req               995 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req               997 drivers/scsi/be2iscsi/be_cmds.c 		req->ulp_num = ulp_num;
req               998 drivers/scsi/be2iscsi/be_cmds.c 		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
req               999 drivers/scsi/be2iscsi/be_cmds.c 		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
req              1032 drivers/scsi/be2iscsi/be_cmds.c 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
req              1034 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1082 drivers/scsi/be2iscsi/be_cmds.c 	struct be_wrbq_create_req *req = embedded_payload(wrb);
req              1090 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1092 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req              1093 drivers/scsi/be2iscsi/be_cmds.c 		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
req              1094 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req              1097 drivers/scsi/be2iscsi/be_cmds.c 		req->ulp_num = ulp_num;
req              1098 drivers/scsi/be2iscsi/be_cmds.c 		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
req              1099 drivers/scsi/be2iscsi/be_cmds.c 		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
req              1102 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1126 drivers/scsi/be2iscsi/be_cmds.c 	struct be_post_template_pages_req *req = embedded_payload(wrb);
req              1132 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1133 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1135 drivers/scsi/be2iscsi/be_cmds.c 			   sizeof(*req));
req              1137 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req              1138 drivers/scsi/be2iscsi/be_cmds.c 	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
req              1139 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req              1149 drivers/scsi/be2iscsi/be_cmds.c 	struct be_remove_template_pages_req *req = embedded_payload(wrb);
req              1155 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1156 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1158 drivers/scsi/be2iscsi/be_cmds.c 			   sizeof(*req));
req              1160 drivers/scsi/be2iscsi/be_cmds.c 	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
req              1172 drivers/scsi/be2iscsi/be_cmds.c 	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
req              1185 drivers/scsi/be2iscsi/be_cmds.c 		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1186 drivers/scsi/be2iscsi/be_cmds.c 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req              1188 drivers/scsi/be2iscsi/be_cmds.c 				   sizeof(*req));
req              1191 drivers/scsi/be2iscsi/be_cmds.c 		req->num_pages = min(num_pages, curr_pages);
req              1192 drivers/scsi/be2iscsi/be_cmds.c 		req->page_offset = page_offset;
req              1193 drivers/scsi/be2iscsi/be_cmds.c 		be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
req              1194 drivers/scsi/be2iscsi/be_cmds.c 		q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
req              1195 drivers/scsi/be2iscsi/be_cmds.c 		internal_page_offset += req->num_pages;
req              1196 drivers/scsi/be2iscsi/be_cmds.c 		page_offset += req->num_pages;
req              1197 drivers/scsi/be2iscsi/be_cmds.c 		num_pages -= req->num_pages;
req              1200 drivers/scsi/be2iscsi/be_cmds.c 			req->num_pages = temp_num_pages;
req              1232 drivers/scsi/be2iscsi/be_cmds.c 	struct be_cmd_set_vlan_req *req;
req              1243 drivers/scsi/be2iscsi/be_cmds.c 	req = embedded_payload(wrb);
req              1245 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req              1247 drivers/scsi/be2iscsi/be_cmds.c 			   sizeof(*req));
req              1249 drivers/scsi/be2iscsi/be_cmds.c 	req->interface_hndl = phba->interface_handle;
req              1250 drivers/scsi/be2iscsi/be_cmds.c 	req->vlan_priority = vlan_tag;
req              1263 drivers/scsi/be2iscsi/be_cmds.c 	struct be_mgmt_controller_attributes *req;
req              1277 drivers/scsi/be2iscsi/be_cmds.c 	req = nonemb_cmd.va;
req              1278 drivers/scsi/be2iscsi/be_cmds.c 	memset(req, 0, sizeof(*req));
req              1281 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
req              1282 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1283 drivers/scsi/be2iscsi/be_cmds.c 			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
req              1538 drivers/scsi/be2iscsi/be_cmds.c 	ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
req              1539 drivers/scsi/be2iscsi/be_cmds.c 	ioctl->param.req.param_len =
req              1540 drivers/scsi/be2iscsi/be_cmds.c 		snprintf((char *)ioctl->param.req.param_data,
req              1541 drivers/scsi/be2iscsi/be_cmds.c 			 sizeof(ioctl->param.req.param_data),
req              1543 drivers/scsi/be2iscsi/be_cmds.c 	ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
req              1544 drivers/scsi/be2iscsi/be_cmds.c 	if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
req              1545 drivers/scsi/be2iscsi/be_cmds.c 		ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
req              1582 drivers/scsi/be2iscsi/be_cmds.c 	ioctl->param_len = sizeof(ioctl->param.req);
req              1583 drivers/scsi/be2iscsi/be_cmds.c 	ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
req              1645 drivers/scsi/be2iscsi/be_cmds.c 	struct be_post_sgl_pages_req *req;
req              1650 drivers/scsi/be2iscsi/be_cmds.c 	req = embedded_payload(wrb);
req              1651 drivers/scsi/be2iscsi/be_cmds.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1652 drivers/scsi/be2iscsi/be_cmds.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
req              1653 drivers/scsi/be2iscsi/be_cmds.c 			   OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
req              1745 drivers/scsi/be2iscsi/be_cmds.c 	struct iscsi_cleanup_req *req;
req              1756 drivers/scsi/be2iscsi/be_cmds.c 		req = embedded_payload(wrb);
req              1757 drivers/scsi/be2iscsi/be_cmds.c 		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1758 drivers/scsi/be2iscsi/be_cmds.c 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req              1759 drivers/scsi/be2iscsi/be_cmds.c 				   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
req              1760 drivers/scsi/be2iscsi/be_cmds.c 		req->chute = (1 << ulp);
req              1762 drivers/scsi/be2iscsi/be_cmds.c 		req->hdr_ring_id = hdr_ring_id;
req              1763 drivers/scsi/be2iscsi/be_cmds.c 		req->data_ring_id = data_ring_id;
req               755 drivers/scsi/be2iscsi/be_cmds.h 		struct be_sethost_req req;
req               782 drivers/scsi/be2iscsi/be_cmds.h 		struct be_uer_req req;
req              1182 drivers/scsi/be2iscsi/be_cmds.h 	struct be_invalidate_connection_params_in req;
req              1262 drivers/scsi/be2iscsi/be_cmds.h 		} req;
req                42 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
req                51 drivers/scsi/be2iscsi/be_mgmt.c 	req->region = region;
req                52 drivers/scsi/be2iscsi/be_mgmt.c 	req->sector = sector;
req                53 drivers/scsi/be2iscsi/be_mgmt.c 	req->offset = offset;
req                60 drivers/scsi/be2iscsi/be_mgmt.c 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req                61 drivers/scsi/be2iscsi/be_mgmt.c 				   OPCODE_COMMON_WRITE_FLASH, sizeof(*req));
req                67 drivers/scsi/be2iscsi/be_mgmt.c 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req                68 drivers/scsi/be2iscsi/be_mgmt.c 			   OPCODE_COMMON_READ_FLASH, sizeof(*req));
req               119 drivers/scsi/be2iscsi/be_mgmt.c 	struct tcp_connect_and_offload_in_v1 *req;
req               155 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd->va;
req               156 drivers/scsi/be2iscsi/be_mgmt.c 	memset(req, 0, sizeof(*req));
req               159 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req               164 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_address.ip_type = BEISCSI_IP_TYPE_V4;
req               165 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_address.addr[0] = s_addr & 0x000000ff;
req               166 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
req               167 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
req               168 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24;
req               169 drivers/scsi/be2iscsi/be_mgmt.c 		req->tcp_port = ntohs(daddr_in->sin_port);
req               175 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_address.ip_type = BEISCSI_IP_TYPE_V6;
req               176 drivers/scsi/be2iscsi/be_mgmt.c 		memcpy(&req->ip_address.addr,
req               178 drivers/scsi/be2iscsi/be_mgmt.c 		req->tcp_port = ntohs(daddr_in6->sin6_port);
req               184 drivers/scsi/be2iscsi/be_mgmt.c 	req->cid = cid;
req               188 drivers/scsi/be2iscsi/be_mgmt.c 	req->cq_id = phwi_context->be_cq[i].id;
req               190 drivers/scsi/be2iscsi/be_mgmt.c 		    "BG_%d : i=%d cq_id=%d\n", i, req->cq_id);
req               191 drivers/scsi/be2iscsi/be_mgmt.c 	req->defq_id = def_hdr_id;
req               192 drivers/scsi/be2iscsi/be_mgmt.c 	req->hdr_ring_id = def_hdr_id;
req               193 drivers/scsi/be2iscsi/be_mgmt.c 	req->data_ring_id = def_data_id;
req               194 drivers/scsi/be2iscsi/be_mgmt.c 	req->do_offload = 1;
req               195 drivers/scsi/be2iscsi/be_mgmt.c 	req->dataout_template_pa.lo = ptemplate_address->lo;
req               196 drivers/scsi/be2iscsi/be_mgmt.c 	req->dataout_template_pa.hi = ptemplate_address->hi;
req               202 drivers/scsi/be2iscsi/be_mgmt.c 		req->hdr.version = MBX_CMD_VER1;
req               203 drivers/scsi/be2iscsi/be_mgmt.c 		req->tcp_window_size = 0x8000;
req               204 drivers/scsi/be2iscsi/be_mgmt.c 		req->tcp_window_scale_count = 2;
req               328 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_req_modify_eq_delay *req;
req               333 drivers/scsi/be2iscsi/be_mgmt.c 			OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
req               337 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req               338 drivers/scsi/be2iscsi/be_mgmt.c 	req->num_eq = cpu_to_le32(num);
req               340 drivers/scsi/be2iscsi/be_mgmt.c 		req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
req               341 drivers/scsi/be2iscsi/be_mgmt.c 		req->delay[i].phase = 0;
req               342 drivers/scsi/be2iscsi/be_mgmt.c 		req->delay[i].delay_multiplier =
req               361 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_hba_name *req;
req               369 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req               371 drivers/scsi/be2iscsi/be_mgmt.c 		req->hdr.version = 1;
req               388 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_get_all_if_id_req *req;
req               401 drivers/scsi/be2iscsi/be_mgmt.c 	req = embedded_payload(wrb);
req               402 drivers/scsi/be2iscsi/be_mgmt.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               403 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req               405 drivers/scsi/be2iscsi/be_mgmt.c 			   sizeof(*req));
req               436 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_set_def_gateway_req *req;
req               442 drivers/scsi/be2iscsi/be_mgmt.c 			sizeof(*req));
req               446 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req               447 drivers/scsi/be2iscsi/be_mgmt.c 	req->action = action;
req               448 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_addr.ip_type = ip_type;
req               449 drivers/scsi/be2iscsi/be_mgmt.c 	memcpy(req->ip_addr.addr, gw,
req               488 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_get_def_gateway_req *req;
req               498 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req               499 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_type = ip_type;
req               509 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_set_ip_addr_req *req;
req               515 drivers/scsi/be2iscsi/be_mgmt.c 			sizeof(*req));
req               519 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req               520 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.record_entry_count = 1;
req               521 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.action = IP_ACTION_DEL;
req               522 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.interface_hndl =
req               524 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.ip_addr.size_of_structure =
req               526 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type;
req               527 drivers/scsi/be2iscsi/be_mgmt.c 	memcpy(req->ip_params.ip_record.ip_addr.addr,
req               530 drivers/scsi/be2iscsi/be_mgmt.c 	memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
req               534 drivers/scsi/be2iscsi/be_mgmt.c 	if (rc < 0 || req->ip_params.ip_record.status) {
req               537 drivers/scsi/be2iscsi/be_mgmt.c 			    rc, req->ip_params.ip_record.status);
req               546 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_set_ip_addr_req *req;
req               553 drivers/scsi/be2iscsi/be_mgmt.c 			sizeof(*req));
req               557 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req               558 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.record_entry_count = 1;
req               559 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.action = IP_ACTION_ADD;
req               560 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.interface_hndl =
req               562 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.ip_addr.size_of_structure =
req               564 drivers/scsi/be2iscsi/be_mgmt.c 	req->ip_params.ip_record.ip_addr.ip_type = ip_type;
req               566 drivers/scsi/be2iscsi/be_mgmt.c 	memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len);
req               568 drivers/scsi/be2iscsi/be_mgmt.c 		memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
req               576 drivers/scsi/be2iscsi/be_mgmt.c 	if (rc < 0 || req->ip_params.ip_record.status) {
req               579 drivers/scsi/be2iscsi/be_mgmt.c 			    rc, req->ip_params.ip_record.status);
req               580 drivers/scsi/be2iscsi/be_mgmt.c 		if (req->ip_params.ip_record.status)
req               736 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_get_if_info_req *req;
req               753 drivers/scsi/be2iscsi/be_mgmt.c 		req = nonemb_cmd.va;
req               754 drivers/scsi/be2iscsi/be_mgmt.c 		req->interface_hndl = phba->interface_handle;
req               755 drivers/scsi/be2iscsi/be_mgmt.c 		req->ip_type = ip_type;
req               922 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_req_logout_fw_sess *req;
req               932 drivers/scsi/be2iscsi/be_mgmt.c 	req = embedded_payload(wrb);
req               933 drivers/scsi/be2iscsi/be_mgmt.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               934 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
req               938 drivers/scsi/be2iscsi/be_mgmt.c 	req->session_handle = phba->boot_struct.boot_sess.session_handle;
req               961 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_reopen_session_req *req;
req               971 drivers/scsi/be2iscsi/be_mgmt.c 	req = embedded_payload(wrb);
req               972 drivers/scsi/be2iscsi/be_mgmt.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req               973 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
req               976 drivers/scsi/be2iscsi/be_mgmt.c 	req->reopen_type = BE_REOPEN_BOOT_SESSIONS;
req               977 drivers/scsi/be2iscsi/be_mgmt.c 	req->session_handle = BE_BOOT_INVALID_SHANDLE;
req              1001 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_get_session_req *req;
req              1025 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd->va;
req              1026 drivers/scsi/be2iscsi/be_mgmt.c 	memset(req, 0, sizeof(*req));
req              1028 drivers/scsi/be2iscsi/be_mgmt.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
req              1029 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
req              1032 drivers/scsi/be2iscsi/be_mgmt.c 	req->session_handle = phba->boot_struct.s_handle;
req              1050 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_cmd_get_boot_target_req *req;
req              1060 drivers/scsi/be2iscsi/be_mgmt.c 	req = embedded_payload(wrb);
req              1061 drivers/scsi/be2iscsi/be_mgmt.c 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
req              1062 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
req              1434 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_invalidate_connection_params_in *req;
req              1446 drivers/scsi/be2iscsi/be_mgmt.c 	req = embedded_payload(wrb);
req              1449 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
req              1451 drivers/scsi/be2iscsi/be_mgmt.c 			   sizeof(*req));
req              1452 drivers/scsi/be2iscsi/be_mgmt.c 	req->session_handle = beiscsi_ep->fw_handle;
req              1453 drivers/scsi/be2iscsi/be_mgmt.c 	req->cid = beiscsi_ep->ep_cid;
req              1455 drivers/scsi/be2iscsi/be_mgmt.c 		req->cleanup_type = BE_CLEANUP_TYPE_INVALIDATE;
req              1457 drivers/scsi/be2iscsi/be_mgmt.c 		req->cleanup_type = BE_CLEANUP_TYPE_ISSUE_TCP_RST;
req              1462 drivers/scsi/be2iscsi/be_mgmt.c 	req->save_cfg = 0;
req              1473 drivers/scsi/be2iscsi/be_mgmt.c 	struct be_tcp_upload_params_in *req;
req              1483 drivers/scsi/be2iscsi/be_mgmt.c 	req = embedded_payload(wrb);
req              1485 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
req              1486 drivers/scsi/be2iscsi/be_mgmt.c 			   OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req              1487 drivers/scsi/be2iscsi/be_mgmt.c 	req->id = beiscsi_ep->ep_cid;
req              1489 drivers/scsi/be2iscsi/be_mgmt.c 		req->upload_type = BE_UPLOAD_TYPE_GRACEFUL;
req              1491 drivers/scsi/be2iscsi/be_mgmt.c 		req->upload_type = BE_UPLOAD_TYPE_ABORT;
req              1502 drivers/scsi/be2iscsi/be_mgmt.c 	struct invldt_cmds_params_in *req;
req              1531 drivers/scsi/be2iscsi/be_mgmt.c 	req = nonemb_cmd.va;
req              1533 drivers/scsi/be2iscsi/be_mgmt.c 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
req              1535 drivers/scsi/be2iscsi/be_mgmt.c 			sizeof(*req));
req              1536 drivers/scsi/be2iscsi/be_mgmt.c 	req->ref_handle = 0;
req              1537 drivers/scsi/be2iscsi/be_mgmt.c 	req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
req              1539 drivers/scsi/be2iscsi/be_mgmt.c 		req->table[i].icd = inv_tbl[i].icd;
req              1540 drivers/scsi/be2iscsi/be_mgmt.c 		req->table[i].cid = inv_tbl[i].cid;
req              1541 drivers/scsi/be2iscsi/be_mgmt.c 		req->icd_count++;
req              2990 drivers/scsi/bfa/bfa_ioc.c 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
req              2992 drivers/scsi/bfa/bfa_ioc.c 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
req              2994 drivers/scsi/bfa/bfa_ioc.c 	req->clscode = cpu_to_be16(ioc->clscode);
req              3759 drivers/scsi/bfa/bfa_ioc.c 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
req              3761 drivers/scsi/bfa/bfa_ioc.c 	bfa_trc(sfp, req->memtype);
req              3764 drivers/scsi/bfa/bfa_ioc.c 	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
req              3777 drivers/scsi/bfa/bfa_ioc.c 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
req              3784 drivers/scsi/bfa/bfa_ioc.c 	req->memtype = memtype;
req              3787 drivers/scsi/bfa/bfa_ioc.c 	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
req              3896 drivers/scsi/bfa/bfa_ioc.c 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
req              3904 drivers/scsi/bfa/bfa_ioc.c 	req->memtype = 0;
req               537 drivers/scsi/bfa/bfa_svc.c bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
req               541 drivers/scsi/bfa/bfa_svc.c 	if (req)
req               899 drivers/scsi/bfa/bfa_svc.c 		bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
req               905 drivers/scsi/bfa/bfa_svc.c 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
req              1068 drivers/scsi/bfa/bfa_svc.c 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
req              1072 drivers/scsi/bfa/bfa_svc.c 	if (req)
req              1088 drivers/scsi/bfa/bfa_svc.c 	if (req)
req              5774 drivers/scsi/bfa/bfa_svc.c 	struct bfi_diag_qtest_req_s *req;
req              5776 drivers/scsi/bfa/bfa_svc.c 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
req              5777 drivers/scsi/bfa/bfa_svc.c 	if (!req)
req              5781 drivers/scsi/bfa/bfa_svc.c 	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
req              5785 drivers/scsi/bfa/bfa_svc.c 		req->data[i] = QTEST_PAT_DEFAULT;
req              5789 drivers/scsi/bfa/bfa_svc.c 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
req              6134 drivers/scsi/bfa/bfa_svc.c 					enum bfi_dport_req req);
req              6479 drivers/scsi/bfa/bfa_svc.c bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
req              6494 drivers/scsi/bfa/bfa_svc.c 	m->req  = req;
req              6495 drivers/scsi/bfa/bfa_svc.c 	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
req               608 drivers/scsi/bfa/bfa_svc.h 				  bfa_boolean_t req);
req               617 drivers/scsi/bfa/bfa_svc.h 				bfa_boolean_t req);
req              1123 drivers/scsi/bfa/bfi.h 	u8			req;	/* request 1: enable 0: disable	*/
req              1168 drivers/scsi/bfa/bfi.h 	struct bfi_diag_dport_req_s	req;
req               264 drivers/scsi/bnx2i/bnx2i.h 	struct bnx2i_cmd_request req;
req               511 drivers/scsi/bnx2i/bnx2i_hwi.c 	memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
req               735 drivers/scsi/bnx2i/bnx2i_hwi.c 	cmd_cleanup->itt = cmd->req.itt;
req              1356 drivers/scsi/bnx2i/bnx2i_hwi.c 	if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
req              1360 drivers/scsi/bnx2i/bnx2i_hwi.c 			bnx2i_cmd->req.total_data_transfer_length;
req              1364 drivers/scsi/bnx2i/bnx2i_hwi.c 			     bnx2i_cmd->req.total_data_transfer_length);
req              1371 drivers/scsi/bnx2i/bnx2i_hwi.c 			bnx2i_cmd->req.total_data_transfer_length;
req              1375 drivers/scsi/bnx2i/bnx2i_hwi.c 			     bnx2i_cmd->req.total_data_transfer_length);
req                90 drivers/scsi/bnx2i/bnx2i_iscsi.c 	u32 cmd_len = cmd->req.total_data_transfer_length;
req               106 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.ud_buffer_offset = start_bd_offset;
req               107 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.ud_start_bd_index = start_bd_idx;
req               129 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.sd_buffer_offset = start_bd_offset;
req               130 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.sd_start_bd_index = start_bd_idx;
req               219 drivers/scsi/bnx2i/bnx2i_iscsi.c 	memset(&cmd->req, 0x00, sizeof(cmd->req));
req               220 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.op_code = 0xFF;
req               221 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
req               222 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.bd_list_addr_hi =
req              1140 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
req              1141 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
req              1145 drivers/scsi/bnx2i/bnx2i_iscsi.c 	dstp = (u32 *) cmd->req.cdb;
req              1245 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.op_code = ISCSI_OP_SCSI_CMD;
req              1248 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.total_data_transfer_length = scsi_bufflen(sc);
req              1249 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
req              1254 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
req              1256 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
req              1257 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.itt = task->itt |
req              1262 drivers/scsi/bnx2i/bnx2i_iscsi.c 			cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
req              1263 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.itt = task->itt |
req              1267 drivers/scsi/bnx2i/bnx2i_iscsi.c 	cmd->req.num_bds = cmd->io_tbl.bd_valid;
req              1269 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
req              1270 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
req              1271 drivers/scsi/bnx2i/bnx2i_iscsi.c 		cmd->req.num_bds = 1;
req               163 drivers/scsi/csiostor/csio_scsi.c csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
req               166 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
req               178 drivers/scsi/csiostor/csio_scsi.c 		if (req->nsge)
req               179 drivers/scsi/csiostor/csio_scsi.c 			if (req->datadir == DMA_TO_DEVICE)
req               201 drivers/scsi/csiostor/csio_scsi.c csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
req               203 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               204 drivers/scsi/csiostor/csio_scsi.c 	struct csio_rnode *rn = req->rnode;
req               215 drivers/scsi/csiostor/csio_scsi.c 	wr->cookie = (uintptr_t) req;
req               216 drivers/scsi/csiostor/csio_scsi.c 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
req               217 drivers/scsi/csiostor/csio_scsi.c 	wr->tmo_val = (uint8_t) req->tmo;
req               222 drivers/scsi/csiostor/csio_scsi.c 	dma_buf = &req->dma_buf;
req               236 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
req               255 drivers/scsi/csiostor/csio_scsi.c csio_scsi_cmd(struct csio_ioreq *req)
req               258 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               262 drivers/scsi/csiostor/csio_scsi.c 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
req               263 drivers/scsi/csiostor/csio_scsi.c 	if (unlikely(req->drv_status != 0))
req               268 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_init_cmd_wr(req, wrp.addr1, size);
req               270 drivers/scsi/csiostor/csio_scsi.c 		uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
req               276 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
req               290 drivers/scsi/csiostor/csio_scsi.c csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
req               299 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
req               302 drivers/scsi/csiostor/csio_scsi.c 				     ULPTX_NSGE_V(req->nsge));
req               304 drivers/scsi/csiostor/csio_scsi.c 	if (likely(!req->dcopy)) {
req               305 drivers/scsi/csiostor/csio_scsi.c 		scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
req               328 drivers/scsi/csiostor/csio_scsi.c 		list_for_each(tmp, &req->gen_list) {
req               360 drivers/scsi/csiostor/csio_scsi.c csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
req               362 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               363 drivers/scsi/csiostor/csio_scsi.c 	struct csio_rnode *rn = req->rnode;
req               368 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
req               374 drivers/scsi/csiostor/csio_scsi.c 	wr->cookie = (uintptr_t)req;
req               375 drivers/scsi/csiostor/csio_scsi.c 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
req               376 drivers/scsi/csiostor/csio_scsi.c 	wr->tmo_val = (uint8_t)(req->tmo);
req               381 drivers/scsi/csiostor/csio_scsi.c 	dma_buf = &req->dma_buf;
req               393 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
req               401 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_init_ultptx_dsgl(hw, req, sgl);
req               413 drivers/scsi/csiostor/csio_scsi.c csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
req               415 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               416 drivers/scsi/csiostor/csio_scsi.c 	struct csio_rnode *rn = req->rnode;
req               421 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
req               427 drivers/scsi/csiostor/csio_scsi.c 	wr->cookie = (uintptr_t)req;
req               428 drivers/scsi/csiostor/csio_scsi.c 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
req               429 drivers/scsi/csiostor/csio_scsi.c 	wr->tmo_val = (uint8_t)(req->tmo);
req               434 drivers/scsi/csiostor/csio_scsi.c 	dma_buf = &req->dma_buf;
req               446 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
req               454 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_init_ultptx_dsgl(hw, req, sgl);
req               458 drivers/scsi/csiostor/csio_scsi.c #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm)				       \
req               464 drivers/scsi/csiostor/csio_scsi.c 	if (unlikely((req)->nsge > 1))				               \
req               466 drivers/scsi/csiostor/csio_scsi.c 				(ALIGN(((req)->nsge - 1), 2) / 2));            \
req               479 drivers/scsi/csiostor/csio_scsi.c csio_scsi_read(struct csio_ioreq *req)
req               483 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               486 drivers/scsi/csiostor/csio_scsi.c 	CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
req               489 drivers/scsi/csiostor/csio_scsi.c 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
req               490 drivers/scsi/csiostor/csio_scsi.c 	if (likely(req->drv_status == 0)) {
req               493 drivers/scsi/csiostor/csio_scsi.c 			csio_scsi_init_read_wr(req, wrp.addr1, size);
req               495 drivers/scsi/csiostor/csio_scsi.c 			uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
req               500 drivers/scsi/csiostor/csio_scsi.c 			csio_scsi_init_read_wr(req, (void *)tmpwr, size);
req               516 drivers/scsi/csiostor/csio_scsi.c csio_scsi_write(struct csio_ioreq *req)
req               520 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               523 drivers/scsi/csiostor/csio_scsi.c 	CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
req               526 drivers/scsi/csiostor/csio_scsi.c 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
req               527 drivers/scsi/csiostor/csio_scsi.c 	if (likely(req->drv_status == 0)) {
req               530 drivers/scsi/csiostor/csio_scsi.c 			csio_scsi_init_write_wr(req, wrp.addr1, size);
req               532 drivers/scsi/csiostor/csio_scsi.c 			uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
req               537 drivers/scsi/csiostor/csio_scsi.c 			csio_scsi_init_write_wr(req, (void *)tmpwr, size);
req               553 drivers/scsi/csiostor/csio_scsi.c csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
req               556 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               559 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
req               569 drivers/scsi/csiostor/csio_scsi.c 	scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
req               583 drivers/scsi/csiostor/csio_scsi.c 		if ((i != (req->nsge - 1)) &&
req               593 drivers/scsi/csiostor/csio_scsi.c 	req->dcopy = 0;
req               594 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_read(req);
req               604 drivers/scsi/csiostor/csio_scsi.c 	req->dcopy = 1;
req               607 drivers/scsi/csiostor/csio_scsi.c 	INIT_LIST_HEAD(&req->gen_list);
req               615 drivers/scsi/csiostor/csio_scsi.c 			req->drv_status = -EBUSY;
req               620 drivers/scsi/csiostor/csio_scsi.c 		list_add_tail(&dma_buf->list, &req->gen_list);
req               624 drivers/scsi/csiostor/csio_scsi.c 	if (!req->drv_status) {
req               626 drivers/scsi/csiostor/csio_scsi.c 		req->nsge = i;
req               627 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_read(req);
req               633 drivers/scsi/csiostor/csio_scsi.c 		csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
req               646 drivers/scsi/csiostor/csio_scsi.c csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
req               649 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               650 drivers/scsi/csiostor/csio_scsi.c 	struct csio_rnode *rn = req->rnode;
req               658 drivers/scsi/csiostor/csio_scsi.c 	wr->cookie = (uintptr_t) req;
req               659 drivers/scsi/csiostor/csio_scsi.c 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
req               660 drivers/scsi/csiostor/csio_scsi.c 	wr->tmo_val = (uint8_t) req->tmo;
req               670 drivers/scsi/csiostor/csio_scsi.c 	wr->t_cookie = (uintptr_t) req;
req               674 drivers/scsi/csiostor/csio_scsi.c csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
req               677 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               680 drivers/scsi/csiostor/csio_scsi.c 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
req               681 drivers/scsi/csiostor/csio_scsi.c 	if (req->drv_status != 0)
req               686 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
req               688 drivers/scsi/csiostor/csio_scsi.c 		uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
req               693 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
req               703 drivers/scsi/csiostor/csio_scsi.c csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
req               705 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               711 drivers/scsi/csiostor/csio_scsi.c 		if (req->nsge) {
req               712 drivers/scsi/csiostor/csio_scsi.c 			if (req->datadir == DMA_TO_DEVICE) {
req               713 drivers/scsi/csiostor/csio_scsi.c 				req->dcopy = 0;
req               714 drivers/scsi/csiostor/csio_scsi.c 				csio_scsi_write(req);
req               716 drivers/scsi/csiostor/csio_scsi.c 				csio_setup_ddp(scsim, req);
req               718 drivers/scsi/csiostor/csio_scsi.c 			csio_scsi_cmd(req);
req               721 drivers/scsi/csiostor/csio_scsi.c 		if (likely(req->drv_status == 0)) {
req               723 drivers/scsi/csiostor/csio_scsi.c 			csio_set_state(&req->sm, csio_scsis_io_active);
req               724 drivers/scsi/csiostor/csio_scsi.c 			list_add_tail(&req->sm.sm_list, &scsim->active_q);
req               725 drivers/scsi/csiostor/csio_scsi.c 			csio_wr_issue(hw, req->eq_idx, false);
req               733 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_cmd(req);
req               734 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status == 0) {
req               743 drivers/scsi/csiostor/csio_scsi.c 			csio_set_state(&req->sm, csio_scsis_tm_active);
req               744 drivers/scsi/csiostor/csio_scsi.c 			list_add_tail(&req->sm.sm_list, &scsim->active_q);
req               745 drivers/scsi/csiostor/csio_scsi.c 			csio_wr_issue(hw, req->eq_idx, false);
req               762 drivers/scsi/csiostor/csio_scsi.c 		req->drv_status = -EINVAL;
req               763 drivers/scsi/csiostor/csio_scsi.c 		csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
req               767 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
req               773 drivers/scsi/csiostor/csio_scsi.c csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
req               775 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               782 drivers/scsi/csiostor/csio_scsi.c 		list_del_init(&req->sm.sm_list);
req               783 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req               799 drivers/scsi/csiostor/csio_scsi.c 		if (unlikely(req->wr_status != FW_SUCCESS)) {
req               800 drivers/scsi/csiostor/csio_scsi.c 			rn = req->rnode;
req               805 drivers/scsi/csiostor/csio_scsi.c 			if (csio_scsi_itnexus_loss_error(req->wr_status) &&
req               807 drivers/scsi/csiostor/csio_scsi.c 				csio_set_state(&req->sm,
req               809 drivers/scsi/csiostor/csio_scsi.c 				list_add_tail(&req->sm.sm_list,
req               817 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_abrt_cls(req, SCSI_ABORT);
req               818 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status == 0) {
req               819 drivers/scsi/csiostor/csio_scsi.c 			csio_wr_issue(hw, req->eq_idx, false);
req               820 drivers/scsi/csiostor/csio_scsi.c 			csio_set_state(&req->sm, csio_scsis_aborting);
req               825 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_abrt_cls(req, SCSI_CLOSE);
req               826 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status == 0) {
req               827 drivers/scsi/csiostor/csio_scsi.c 			csio_wr_issue(hw, req->eq_idx, false);
req               828 drivers/scsi/csiostor/csio_scsi.c 			csio_set_state(&req->sm, csio_scsis_closing);
req               833 drivers/scsi/csiostor/csio_scsi.c 		req->wr_status = FW_HOSTERROR;
req               835 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req               839 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
req               845 drivers/scsi/csiostor/csio_scsi.c csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
req               847 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               853 drivers/scsi/csiostor/csio_scsi.c 		list_del_init(&req->sm.sm_list);
req               854 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req               859 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_abrt_cls(req, SCSI_ABORT);
req               860 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status == 0) {
req               861 drivers/scsi/csiostor/csio_scsi.c 			csio_wr_issue(hw, req->eq_idx, false);
req               862 drivers/scsi/csiostor/csio_scsi.c 			csio_set_state(&req->sm, csio_scsis_aborting);
req               868 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_abrt_cls(req, SCSI_CLOSE);
req               869 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status == 0) {
req               870 drivers/scsi/csiostor/csio_scsi.c 			csio_wr_issue(hw, req->eq_idx, false);
req               871 drivers/scsi/csiostor/csio_scsi.c 			csio_set_state(&req->sm, csio_scsis_closing);
req               876 drivers/scsi/csiostor/csio_scsi.c 		req->wr_status = FW_HOSTERROR;
req               878 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req               882 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
req               888 drivers/scsi/csiostor/csio_scsi.c csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
req               890 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               897 drivers/scsi/csiostor/csio_scsi.c 			 "in aborting st\n", req, req->wr_status);
req               907 drivers/scsi/csiostor/csio_scsi.c 		req->drv_status = -ECANCELED;
req               917 drivers/scsi/csiostor/csio_scsi.c 			 req, req->wr_status, req->drv_status);
req               922 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status != -ECANCELED) {
req               925 drivers/scsi/csiostor/csio_scsi.c 				   " req:%p\n", req);
req               952 drivers/scsi/csiostor/csio_scsi.c 		if ((req->wr_status == FW_SUCCESS) ||
req               953 drivers/scsi/csiostor/csio_scsi.c 		    (req->wr_status == FW_EINVAL) ||
req               954 drivers/scsi/csiostor/csio_scsi.c 		    csio_scsi_itnexus_loss_error(req->wr_status))
req               955 drivers/scsi/csiostor/csio_scsi.c 			req->wr_status = FW_SCSI_ABORT_REQUESTED;
req               958 drivers/scsi/csiostor/csio_scsi.c 		list_del_init(&req->sm.sm_list);
req               959 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req               963 drivers/scsi/csiostor/csio_scsi.c 		req->wr_status = FW_HOSTERROR;
req               965 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req               979 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
req               985 drivers/scsi/csiostor/csio_scsi.c csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
req               987 drivers/scsi/csiostor/csio_scsi.c 	struct csio_hw *hw = req->lnode->hwp;
req               994 drivers/scsi/csiostor/csio_scsi.c 			 "in closing st\n", req, req->wr_status);
req              1004 drivers/scsi/csiostor/csio_scsi.c 		req->drv_status = -ECANCELED;
req              1012 drivers/scsi/csiostor/csio_scsi.c 		if (req->drv_status != -ECANCELED) {
req              1015 drivers/scsi/csiostor/csio_scsi.c 				   " req:%p\n", req);
req              1024 drivers/scsi/csiostor/csio_scsi.c 		CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
req              1025 drivers/scsi/csiostor/csio_scsi.c 					(req->wr_status == FW_EINVAL));
req              1026 drivers/scsi/csiostor/csio_scsi.c 		req->wr_status = FW_SCSI_CLOSE_REQUESTED;
req              1029 drivers/scsi/csiostor/csio_scsi.c 		list_del_init(&req->sm.sm_list);
req              1030 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req              1037 drivers/scsi/csiostor/csio_scsi.c 		req->wr_status = FW_HOSTERROR;
req              1039 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req              1043 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
req              1049 drivers/scsi/csiostor/csio_scsi.c csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
req              1070 drivers/scsi/csiostor/csio_scsi.c 		req->drv_status = 0;
req              1073 drivers/scsi/csiostor/csio_scsi.c 		csio_set_state(&req->sm, csio_scsis_uninit);
req              1076 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
req              1077 drivers/scsi/csiostor/csio_scsi.c 			 evt, req);
req              1489 drivers/scsi/csiostor/csio_scsi.c csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
req              1491 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *scmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
req              1504 drivers/scsi/csiostor/csio_scsi.c 	dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
req              1531 drivers/scsi/csiostor/csio_scsi.c 				sg, req);
req              1558 drivers/scsi/csiostor/csio_scsi.c csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
req              1560 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
req              1571 drivers/scsi/csiostor/csio_scsi.c 	switch (req->wr_status) {
req              1581 drivers/scsi/csiostor/csio_scsi.c 		dma_buf = &req->dma_buf;
req              1647 drivers/scsi/csiostor/csio_scsi.c 		csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
req              1649 drivers/scsi/csiostor/csio_scsi.c 			    (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
req              1656 drivers/scsi/csiostor/csio_scsi.c 		if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
req              1665 drivers/scsi/csiostor/csio_scsi.c 			 req, cmnd, req->wr_status);
req              1707 drivers/scsi/csiostor/csio_scsi.c 			    req->wr_status, req, cmnd);
req              1716 drivers/scsi/csiostor/csio_scsi.c 	if (req->nsge > 0) {
req              1718 drivers/scsi/csiostor/csio_scsi.c 		if (req->dcopy && (host_status == DID_OK))
req              1719 drivers/scsi/csiostor/csio_scsi.c 			host_status = csio_scsi_copy_to_sgl(hw, req);
req              1726 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_cmnd(req) = NULL;
req              1727 drivers/scsi/csiostor/csio_scsi.c 	complete(&req->cmplobj);
req              1737 drivers/scsi/csiostor/csio_scsi.c csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
req              1739 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
req              1743 drivers/scsi/csiostor/csio_scsi.c 	if (likely(req->wr_status == FW_SUCCESS)) {
req              1744 drivers/scsi/csiostor/csio_scsi.c 		if (req->nsge > 0) {
req              1746 drivers/scsi/csiostor/csio_scsi.c 			if (req->dcopy)
req              1747 drivers/scsi/csiostor/csio_scsi.c 				host_status = csio_scsi_copy_to_sgl(hw, req);
req              1752 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_cmnd(req) = NULL;
req              1756 drivers/scsi/csiostor/csio_scsi.c 		csio_scsi_err_handler(hw, req);
req              2013 drivers/scsi/csiostor/csio_scsi.c csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
req              2015 drivers/scsi/csiostor/csio_scsi.c 	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
req              2022 drivers/scsi/csiostor/csio_scsi.c 		      req, req->wr_status);
req              2025 drivers/scsi/csiostor/csio_scsi.c 	cmnd->SCp.Status = req->wr_status;
req              2038 drivers/scsi/csiostor/csio_scsi.c 	if (req->wr_status == FW_SCSI_RSP_ERR) {
req              2039 drivers/scsi/csiostor/csio_scsi.c 		dma_buf = &req->dma_buf;
req              2054 drivers/scsi/csiostor/csio_scsi.c 	csio_scsi_cmnd(req) = NULL;
req                92 drivers/scsi/csiostor/csio_scsi.h #define csio_scsi_cmnd(req)		((req)->scratch1)
req               194 drivers/scsi/csiostor/csio_scsi.h 	struct csio_sm *req;
req               197 drivers/scsi/csiostor/csio_scsi.h 		req = list_first_entry(&scm->ioreq_freelist,
req               199 drivers/scsi/csiostor/csio_scsi.h 		list_del_init(&req->sm_list);
req               201 drivers/scsi/csiostor/csio_scsi.h 		return (struct csio_ioreq *)req;
req               161 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
req               165 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               166 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
req               167 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->local_port = csk->saddr.sin_port;
req               168 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->peer_port = csk->daddr.sin_port;
req               169 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->local_ip = csk->saddr.sin_addr.s_addr;
req               170 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->peer_ip = csk->daddr.sin_addr.s_addr;
req               172 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
req               175 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
req               181 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		&req->local_ip, ntohs(req->local_port),
req               182 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		&req->peer_ip, ntohs(req->peer_port),
req               202 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
req               210 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
req               211 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_lo = htonl(V_WR_TID(tid));
req               212 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
req               213 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->rsvd = htonl(csk->write_seq);
req               229 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_abort_req *req = cplhdr(skb);
req               233 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		tdev, GET_TID(req), skb);
req               234 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->cmd = CPL_ABORT_NO_RST;
req               241 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_abort_req *req;
req               251 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_abort_req *)skb->head;
req               254 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
req               255 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
req               256 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
req               257 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->rsvd0 = htonl(csk->snd_nxt);
req               258 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
req               259 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->cmd = CPL_ABORT_SEND_RST;
req               264 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		req->rsvd1);
req               300 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_rx_data_ack *req;
req               307 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
req               312 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_rx_data_ack *)skb->head;
req               313 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req               314 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
req               315 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
req               353 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct tx_data_wr *req;
req               357 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = __skb_push(skb, sizeof(*req));
req               358 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
req               360 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr_lo = htonl(V_WR_TID(csk->tid));
req               362 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->len = htonl(len);
req               364 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
req               366 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->sndseq = htonl(csk->snd_nxt);
req               367 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->param = htonl(V_TX_PORT(l2t->smt_idx));
req               370 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
req               373 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
req               478 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_act_establish *req = cplhdr(skb);
req               479 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	unsigned int tid = GET_TID(req);
req               480 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
req               481 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	u32 rcv_isn = ntohl(req->rcv_isn);	/* real RCV_ISN + 1 */
req               510 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
req               663 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	const struct cpl_abort_req_rss *req = cplhdr(skb);
req               671 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
req               672 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	    req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
req               689 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		csk->err = abort_status_to_errno(csk, req->status, &rst_status);
req              1071 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
req              1073 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	memset(req, 0, sizeof(*req));
req              1075 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
req              1076 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
req              1078 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
req              1094 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct ulp_mem_io *req;
req              1106 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		req = (struct ulp_mem_io *)skb->head;
req              1107 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		ppod = (struct cxgbi_pagepod *)(req + 1);
req              1149 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_set_tcb_field *req;
req              1158 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_set_tcb_field *)skb->head;
req              1159 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req              1160 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req              1161 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->reply = V_NO_REPLY(1);
req              1162 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->cpu_idx = 0;
req              1163 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->word = htons(31);
req              1164 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->mask = cpu_to_be64(0xF0000000);
req              1165 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->val = cpu_to_be64(val << 28);
req              1185 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_set_tcb_field *req;
req              1194 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_set_tcb_field *)skb->head;
req              1195 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req              1196 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req              1197 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->reply = V_NO_REPLY(1);
req              1198 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->cpu_idx = 0;
req              1199 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->word = htons(31);
req              1200 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->mask = cpu_to_be64(0x0F000000);
req              1201 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->val = cpu_to_be64(val << 24);
req               227 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		struct cpl_act_open_req *req =
req               230 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		INIT_TP_WR(req, 0);
req               231 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
req               233 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_port = csk->saddr.sin_port;
req               234 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_port = csk->daddr.sin_port;
req               235 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip = csk->saddr.sin_addr.s_addr;
req               236 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip = csk->daddr.sin_addr.s_addr;
req               237 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt0 = cpu_to_be64(opt0);
req               238 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->params = cpu_to_be32(cxgb4_select_ntuple(
req               242 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt2 = cpu_to_be32(opt2);
req               246 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, &req->local_ip, ntohs(req->local_port),
req               247 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			&req->peer_ip, ntohs(req->peer_port),
req               250 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		struct cpl_t5_act_open_req *req =
req               254 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		INIT_TP_WR(req, 0);
req               255 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
req               257 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_port = csk->saddr.sin_port;
req               258 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_port = csk->daddr.sin_port;
req               259 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip = csk->saddr.sin_addr.s_addr;
req               260 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip = csk->daddr.sin_addr.s_addr;
req               261 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt0 = cpu_to_be64(opt0);
req               262 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->params = cpu_to_be64(FILTER_TUPLE_V(
req               266 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->rsvd = cpu_to_be32(isn);
req               270 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt2 = cpu_to_be32(opt2);
req               274 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, &req->local_ip, ntohs(req->local_port),
req               275 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			&req->peer_ip, ntohs(req->peer_port),
req               278 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		struct cpl_t6_act_open_req *req =
req               282 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		INIT_TP_WR(req, 0);
req               283 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
req               285 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_port = csk->saddr.sin_port;
req               286 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_port = csk->daddr.sin_port;
req               287 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip = csk->saddr.sin_addr.s_addr;
req               288 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip = csk->daddr.sin_addr.s_addr;
req               289 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt0 = cpu_to_be64(opt0);
req               290 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->params = cpu_to_be64(FILTER_TUPLE_V(
req               294 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->rsvd = cpu_to_be32(isn);
req               300 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt2 = cpu_to_be32(opt2);
req               301 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->rsvd2 = cpu_to_be32(0);
req               302 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt3 = cpu_to_be32(0);
req               306 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			  csk, &req->local_ip, ntohs(req->local_port),
req               307 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			  &req->peer_ip, ntohs(req->peer_port),
req               346 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		struct cpl_act_open_req6 *req =
req               349 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		INIT_TP_WR(req, 0);
req               350 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
req               352 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_port = csk->saddr6.sin6_port;
req               353 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_port = csk->daddr6.sin6_port;
req               355 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
req               356 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
req               358 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
req               359 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
req               362 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt0 = cpu_to_be64(opt0);
req               365 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt2 = cpu_to_be32(opt2);
req               367 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->params = cpu_to_be32(cxgb4_select_ntuple(
req               371 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		struct cpl_t5_act_open_req6 *req =
req               374 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		INIT_TP_WR(req, 0);
req               375 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
req               377 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_port = csk->saddr6.sin6_port;
req               378 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_port = csk->daddr6.sin6_port;
req               379 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
req               380 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
req               382 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
req               383 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
req               385 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt0 = cpu_to_be64(opt0);
req               388 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt2 = cpu_to_be32(opt2);
req               390 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
req               394 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		struct cpl_t6_act_open_req6 *req =
req               397 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		INIT_TP_WR(req, 0);
req               398 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
req               400 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_port = csk->saddr6.sin6_port;
req               401 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_port = csk->daddr6.sin6_port;
req               402 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
req               403 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
req               405 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
req               406 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
req               408 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt0 = cpu_to_be64(opt0);
req               413 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt2 = cpu_to_be32(opt2);
req               415 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
req               419 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->rsvd2 = cpu_to_be32(0);
req               420 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->opt3 = cpu_to_be32(0);
req               439 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
req               447 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	INIT_TP_WR(req, tid);
req               448 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
req               449 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->rsvd = 0;
req               459 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_abort_req *req;
req               464 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_abort_req *)skb->data;
req               465 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->cmd = CPL_ABORT_NO_RST;
req               471 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_abort_req *req;
req               487 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_abort_req *)skb->head;
req               489 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->cmd = CPL_ABORT_SEND_RST;
req               491 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	INIT_TP_WR(req, csk->tid);
req               492 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
req               493 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->rsvd0 = htonl(csk->snd_nxt);
req               494 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
req               499 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->rsvd1);
req               529 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_rx_data_ack *req;
req               535 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
req               540 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_rx_data_ack *)skb->head;
req               543 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	INIT_TP_WR(req, csk->tid);
req               544 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
req               546 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
req               673 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct fw_ofld_tx_data_wr *req;
req               678 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = __skb_push(skb, sizeof(*req));
req               681 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
req               684 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
req               687 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->op_to_immdlen =
req               691 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		req->flowid_len16 =
req               699 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->tunnel_to_proxy = htonl(wr_ulp_mode |
req               701 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->plen = htonl(len);
req               782 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			struct cpl_close_con_req *req =
req               784 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
req               812 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
req               813 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	unsigned short tcp_opt = ntohs(req->tcp_opt);
req               814 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	unsigned int tid = GET_TID(req);
req               815 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
req               818 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
req               873 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
req              1013 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
req              1014 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	unsigned int tid = GET_TID(req);
req              1073 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
req              1074 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	unsigned int tid = GET_TID(req);
req              1087 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		       csk, csk->state, csk->flags, csk->tid, req->status);
req              1089 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (is_neg_adv(req->status))
req              1108 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk->err = abort_status_to_errno(csk, req->status, &rst_status);
req              1889 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		   struct ulp_mem_io *req,
req              1895 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
req              1897 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	INIT_ULPTX_WR(req, wr_len, 0, tid);
req              1898 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
req              1900 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
req              1903 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
req              1904 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
req              1905 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
req              1944 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct ulp_mem_io *req;
req              1952 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct ulp_mem_io *)skb->head;
req              1953 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	idata = (struct ulptx_idata *)(req + 1);
req              2000 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_set_tcb_field *req;
req              2005 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
req              2010 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_set_tcb_field *)skb->head;
req              2011 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	INIT_TP_WR(req, csk->tid);
req              2012 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req              2013 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req              2014 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->word_cookie = htons(0);
req              2015 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->mask = cpu_to_be64(0x3 << 8);
req              2016 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->val = cpu_to_be64(pg_idx << 8);
req              2033 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_set_tcb_field *req;
req              2038 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
req              2045 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_set_tcb_field *)skb->head;
req              2046 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	INIT_TP_WR(req, tid);
req              2047 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req              2048 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req              2049 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->word_cookie = htons(0);
req              2050 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->mask = cpu_to_be64(0x3 << 4);
req              2051 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
req              1076 drivers/scsi/device_handler/scsi_dh_alua.c static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
req              1096 drivers/scsi/device_handler/scsi_dh_alua.c 		req->rq_flags |= RQF_QUIET;
req               332 drivers/scsi/device_handler/scsi_dh_emc.c 		struct request *req)
req               337 drivers/scsi/device_handler/scsi_dh_emc.c 		req->rq_flags |= RQF_QUIET;
req               162 drivers/scsi/device_handler/scsi_dh_hp_sw.c static blk_status_t hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
req               167 drivers/scsi/device_handler/scsi_dh_hp_sw.c 		req->rq_flags |= RQF_QUIET;
req               647 drivers/scsi/device_handler/scsi_dh_rdac.c static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
req               652 drivers/scsi/device_handler/scsi_dh_rdac.c 		req->rq_flags |= RQF_QUIET;
req                41 drivers/scsi/hptiop.c 				struct hpt_iop_request_scsi_command *req);
req                48 drivers/scsi/hptiop.c 	u32 req = 0;
req                52 drivers/scsi/hptiop.c 		req = readl(&hba->u.itl.iop->inbound_queue);
req                53 drivers/scsi/hptiop.c 		if (req != IOPMU_QUEUE_EMPTY)
req                58 drivers/scsi/hptiop.c 	if (req != IOPMU_QUEUE_EMPTY) {
req                59 drivers/scsi/hptiop.c 		writel(req, &hba->u.itl.iop->outbound_queue);
req                88 drivers/scsi/hptiop.c 	u32 req;
req                90 drivers/scsi/hptiop.c 	while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
req                93 drivers/scsi/hptiop.c 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
req                94 drivers/scsi/hptiop.c 			hptiop_request_callback_itl(hba, req);
req                99 drivers/scsi/hptiop.c 				((char __iomem *)hba->u.itl.iop + req);
req               103 drivers/scsi/hptiop.c 					hptiop_request_callback_itl(hba, req);
req               108 drivers/scsi/hptiop.c 				hptiop_request_callback_itl(hba, req);
req               178 drivers/scsi/hptiop.c 	struct hpt_iop_request_scsi_command *req;
req               191 drivers/scsi/hptiop.c 		req = hba->reqs[tag >> 8].req_virt;
req               193 drivers/scsi/hptiop.c 			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
req               195 drivers/scsi/hptiop.c 		hptiop_finish_scsi_req(hba, tag>>8, req);
req               233 drivers/scsi/hptiop.c 	struct hpt_iop_request_scsi_command *req;
req               242 drivers/scsi/hptiop.c 		req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
req               244 drivers/scsi/hptiop.c 			req->header.result = IOP_RESULT_SUCCESS;
req               245 drivers/scsi/hptiop.c 		hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
req               301 drivers/scsi/hptiop.c 	struct hpt_iop_request_header __iomem *req = _req;
req               304 drivers/scsi/hptiop.c 	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
req               305 drivers/scsi/hptiop.c 	writel(0, &req->context);
req               306 drivers/scsi/hptiop.c 	writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
req               312 drivers/scsi/hptiop.c 		if (readl(&req->context))
req               404 drivers/scsi/hptiop.c 	struct hpt_iop_request_get_config __iomem *req;
req               410 drivers/scsi/hptiop.c 	req = (struct hpt_iop_request_get_config __iomem *)
req               413 drivers/scsi/hptiop.c 	writel(0, &req->header.flags);
req               414 drivers/scsi/hptiop.c 	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
req               415 drivers/scsi/hptiop.c 	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
req               416 drivers/scsi/hptiop.c 	writel(IOP_RESULT_PENDING, &req->header.result);
req               418 drivers/scsi/hptiop.c 	if (iop_send_sync_request_itl(hba, req, 20000)) {
req               423 drivers/scsi/hptiop.c 	memcpy_fromio(config, req, sizeof(*config));
req               431 drivers/scsi/hptiop.c 	struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
req               433 drivers/scsi/hptiop.c 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
req               434 drivers/scsi/hptiop.c 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
req               435 drivers/scsi/hptiop.c 	req->header.size =
req               437 drivers/scsi/hptiop.c 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
req               438 drivers/scsi/hptiop.c 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
req               439 drivers/scsi/hptiop.c 	req->header.context_hi32 = 0;
req               446 drivers/scsi/hptiop.c 	memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
req               476 drivers/scsi/hptiop.c 	struct hpt_iop_request_set_config __iomem *req;
req               482 drivers/scsi/hptiop.c 	req = (struct hpt_iop_request_set_config __iomem *)
req               485 drivers/scsi/hptiop.c 	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
req               490 drivers/scsi/hptiop.c 	writel(0, &req->header.flags);
req               491 drivers/scsi/hptiop.c 	writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
req               492 drivers/scsi/hptiop.c 	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
req               493 drivers/scsi/hptiop.c 	writel(IOP_RESULT_PENDING, &req->header.result);
req               495 drivers/scsi/hptiop.c 	if (iop_send_sync_request_itl(hba, req, 20000)) {
req               507 drivers/scsi/hptiop.c 	struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
req               509 drivers/scsi/hptiop.c 	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
req               510 drivers/scsi/hptiop.c 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
req               511 drivers/scsi/hptiop.c 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
req               512 drivers/scsi/hptiop.c 	req->header.size =
req               514 drivers/scsi/hptiop.c 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
req               515 drivers/scsi/hptiop.c 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
req               516 drivers/scsi/hptiop.c 	req->header.context_hi32 = 0;
req               529 drivers/scsi/hptiop.c 	struct hpt_iop_request_set_config *req =
req               532 drivers/scsi/hptiop.c 	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
req               533 drivers/scsi/hptiop.c 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
req               534 drivers/scsi/hptiop.c 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
req               535 drivers/scsi/hptiop.c 	req->header.size =
req               537 drivers/scsi/hptiop.c 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
req               538 drivers/scsi/hptiop.c 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
req               539 drivers/scsi/hptiop.c 	req->header.context_hi32 = 0;
req               709 drivers/scsi/hptiop.c static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
req               711 drivers/scsi/hptiop.c 	dprintk("free_req(%d, %p)\n", req->index, req);
req               712 drivers/scsi/hptiop.c 	req->next = hba->req_list;
req               713 drivers/scsi/hptiop.c 	hba->req_list = req;
req               717 drivers/scsi/hptiop.c 				struct hpt_iop_request_scsi_command *req)
req               723 drivers/scsi/hptiop.c 			req, req->header.type, req->header.result,
req               724 drivers/scsi/hptiop.c 			req->header.context, tag);
req               726 drivers/scsi/hptiop.c 	BUG_ON(!req->header.result);
req               727 drivers/scsi/hptiop.c 	BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
req               734 drivers/scsi/hptiop.c 	switch (le32_to_cpu(req->header.result)) {
req               737 drivers/scsi/hptiop.c 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
req               757 drivers/scsi/hptiop.c 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
req               759 drivers/scsi/hptiop.c 		memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
req               769 drivers/scsi/hptiop.c 		scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
req               779 drivers/scsi/hptiop.c 	struct hpt_iop_request_scsi_command *req;
req               784 drivers/scsi/hptiop.c 		req = hba->reqs[tag].req_virt;
req               786 drivers/scsi/hptiop.c 			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
req               789 drivers/scsi/hptiop.c 		req = hba->reqs[tag].req_virt;
req               792 drivers/scsi/hptiop.c 	hptiop_finish_scsi_req(hba, tag, req);
req               797 drivers/scsi/hptiop.c 	struct hpt_iop_request_header __iomem *req;
req               801 drivers/scsi/hptiop.c 	req = (struct hpt_iop_request_header __iomem *)
req               805 drivers/scsi/hptiop.c 			req, readl(&req->type), readl(&req->result),
req               806 drivers/scsi/hptiop.c 			readl(&req->context), tag);
req               808 drivers/scsi/hptiop.c 	BUG_ON(!readl(&req->result));
req               809 drivers/scsi/hptiop.c 	BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
req               811 drivers/scsi/hptiop.c 	p = (struct hpt_iop_request_ioctl_command __iomem *)req;
req               813 drivers/scsi/hptiop.c 		(readl(&req->context) |
req               814 drivers/scsi/hptiop.c 			((u64)readl(&req->context_hi32)<<32));
req               816 drivers/scsi/hptiop.c 	if (readl(&req->result) == IOP_RESULT_SUCCESS) {
req              1002 drivers/scsi/hptiop.c 	struct hpt_iop_request_scsi_command *req;
req              1038 drivers/scsi/hptiop.c 	req = _req->req_virt;
req              1041 drivers/scsi/hptiop.c 	sg_count = hptiop_buildsgl(scp, req->sg_list);
req              1045 drivers/scsi/hptiop.c 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
req              1046 drivers/scsi/hptiop.c 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
req              1047 drivers/scsi/hptiop.c 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
req              1048 drivers/scsi/hptiop.c 	req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
req              1049 drivers/scsi/hptiop.c 	req->channel = scp->device->channel;
req              1050 drivers/scsi/hptiop.c 	req->target = scp->device->id;
req              1051 drivers/scsi/hptiop.c 	req->lun = scp->device->lun;
req              1052 drivers/scsi/hptiop.c 	req->header.size = cpu_to_le32(
req              1057 drivers/scsi/hptiop.c 	memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
req              1246 drivers/scsi/ibmvscsi/ibmvscsi.c 	struct viosrp_capabilities *req;
req              1258 drivers/scsi/ibmvscsi/ibmvscsi.c 	req = &evt_struct->iu.mad.capabilities;
req              1259 drivers/scsi/ibmvscsi/ibmvscsi.c 	memset(req, 0, sizeof(*req));
req              1272 drivers/scsi/ibmvscsi/ibmvscsi.c 	req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
req              1273 drivers/scsi/ibmvscsi/ibmvscsi.c 	req->buffer = cpu_to_be64(hostdata->caps_addr);
req              1292 drivers/scsi/ibmvscsi/ibmvscsi.c 		req->common.length =
req              1295 drivers/scsi/ibmvscsi/ibmvscsi.c 		req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
req              1416 drivers/scsi/ibmvscsi/ibmvscsi.c 	struct viosrp_adapter_info *req;
req              1428 drivers/scsi/ibmvscsi/ibmvscsi.c 	req = &evt_struct->iu.mad.adapter_info;
req              1429 drivers/scsi/ibmvscsi/ibmvscsi.c 	memset(req, 0x00, sizeof(*req));
req              1431 drivers/scsi/ibmvscsi/ibmvscsi.c 	req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
req              1432 drivers/scsi/ibmvscsi/ibmvscsi.c 	req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
req              1433 drivers/scsi/ibmvscsi/ibmvscsi.c 	req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
req              2282 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
req              2291 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	iport = (struct port_id *)req->initiator_port_id;
req              2292 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	tport = (struct port_id *)req->target_port_id;
req              2293 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	fmt = (struct format_code *)&req->req_buf_fmt;
req              2294 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
req              2296 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	else if (be32_to_cpu(req->req_it_iu_len) < 64)
req              2301 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	else if (req->req_flags & SRP_MULTICHAN_MULTI)
req              2316 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	cmd->rsp.tag = req->tag;
req               577 drivers/scsi/isci/request.c 	struct isci_stp_request *stp_req = &ireq->stp.req;
req              1326 drivers/scsi/isci/request.c 	struct isci_stp_request *stp_req = &ireq->stp.req;
req              1352 drivers/scsi/isci/request.c 	struct isci_stp_request *stp_req = &ireq->stp.req;
req              1512 drivers/scsi/isci/request.c 	struct isci_stp_request *stp_req = &ireq->stp.req;
req              1710 drivers/scsi/isci/request.c 	struct isci_stp_request *stp_req = &ireq->stp.req;
req               143 drivers/scsi/isci/request.h 			struct isci_stp_request req;
req               154 drivers/scsi/isci/request.h 	ireq = container_of(stp_req, typeof(*ireq), stp.req);
req               286 drivers/scsi/isci/request.h #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
req               288 drivers/scsi/isci/request.h #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
req               449 drivers/scsi/libfc/fc_lport.c 	struct fc_els_rnid *req;
req               464 drivers/scsi/libfc/fc_lport.c 	req = fc_frame_payload_get(in_fp, sizeof(*req));
req               465 drivers/scsi/libfc/fc_lport.c 	if (!req) {
req               470 drivers/scsi/libfc/fc_lport.c 		fmt = req->rnid_fmt;
req                55 drivers/scsi/libsas/sas_expander.c 		struct scatterlist *req, struct scatterlist *resp)
req                76 drivers/scsi/libsas/sas_expander.c 		task->smp_task.smp_req = *req;
req               140 drivers/scsi/libsas/sas_expander.c static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
req               146 drivers/scsi/libsas/sas_expander.c 	sg_init_one(&req_sg, req, req_size);
req               662 drivers/scsi/libsas/sas_expander.c 	u8 *req;
req               667 drivers/scsi/libsas/sas_expander.c 	req = alloc_smp_req(RPEL_REQ_SIZE);
req               668 drivers/scsi/libsas/sas_expander.c 	if (!req)
req               673 drivers/scsi/libsas/sas_expander.c 		kfree(req);
req               677 drivers/scsi/libsas/sas_expander.c 	req[1] = SMP_REPORT_PHY_ERR_LOG;
req               678 drivers/scsi/libsas/sas_expander.c 	req[9] = phy->number;
req               680 drivers/scsi/libsas/sas_expander.c 	res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
req               692 drivers/scsi/libsas/sas_expander.c 	kfree(req);
req              1940 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
req              1942 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
req              1944 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
req              1948 drivers/scsi/lpfc/lpfc_bsg.c 		       &link_diag_state->u.req, 1);
req              1951 drivers/scsi/lpfc/lpfc_bsg.c 		       &link_diag_state->u.req, 0);
req              1999 drivers/scsi/lpfc/lpfc_bsg.c 	       &link_diag_loopback->u.req, link_no);
req              2003 drivers/scsi/lpfc/lpfc_bsg.c 		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
req              2006 drivers/scsi/lpfc/lpfc_bsg.c 		       &link_diag_loopback->u.req,
req              2010 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
req              2472 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
req              2474 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
req              2476 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
req              2478 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
req              2480 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
req              2482 drivers/scsi/lpfc/lpfc_bsg.c 	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
req              3094 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t req:4;
req              3098 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t req:4;
req              1793 drivers/scsi/lpfc/lpfc_hw4.h 		} req;
req              1885 drivers/scsi/lpfc/lpfc_hw4.h 		} req;
req              1910 drivers/scsi/lpfc/lpfc_hw4.h 		} req;
req              1942 drivers/scsi/lpfc/lpfc_hw4.h 		} req;
req              1977 drivers/scsi/lpfc/lpfc_hw4.h 		} req;
req              2008 drivers/scsi/lpfc/lpfc_hw4.h 	} req;
req              1908 drivers/scsi/lpfc/lpfc_mbox.c 		       &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
req              1925 drivers/scsi/lpfc/lpfc_mbox.c 			       &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
req              1065 drivers/scsi/lpfc/lpfc_nvmet.c 			 struct nvmefc_tgt_fcp_req *req)
req              1069 drivers/scsi/lpfc/lpfc_nvmet.c 		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
req              1582 drivers/scsi/lpfc/lpfc_nvmet.c 	struct nvmefc_tgt_fcp_req *req = NULL;
req              1666 drivers/scsi/lpfc/lpfc_nvmet.c 		req = &ctxp->ctx.fcp_req;
req              1667 drivers/scsi/lpfc/lpfc_nvmet.c 		if (req)
req              1668 drivers/scsi/lpfc/lpfc_nvmet.c 			nvmet_fc_rcv_fcp_abort(phba->targetport, req);
req              2784 drivers/scsi/qla2xxx/qla_attr.c 	struct req_que *req = ha->req_q_map[0];
req              2897 drivers/scsi/qla2xxx/qla_attr.c 			req = qpair->req;
req              2903 drivers/scsi/qla2xxx/qla_attr.c 	vha->req = req;
req              2575 drivers/scsi/qla2xxx/qla_bsg.c 	struct req_que *req;
req              2580 drivers/scsi/qla2xxx/qla_bsg.c 		req = ha->req_q_map[que];
req              2581 drivers/scsi/qla2xxx/qla_bsg.c 		if (!req)
req              2584 drivers/scsi/qla2xxx/qla_bsg.c 		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
req              2585 drivers/scsi/qla2xxx/qla_bsg.c 			sp = req->outstanding_cmds[cnt];
req              2591 drivers/scsi/qla2xxx/qla_bsg.c 					req->outstanding_cmds[cnt] = NULL;
req                96 drivers/scsi/qla2xxx/qla_dbg.c 	struct req_que *req = ha->req_q_map[0];
req                99 drivers/scsi/qla2xxx/qla_dbg.c 	memcpy(ptr, req->ring, req->length *
req               103 drivers/scsi/qla2xxx/qla_dbg.c 	ptr += req->length * sizeof(request_t);
req               596 drivers/scsi/qla2xxx/qla_dbg.c 	struct req_que *req;
req               605 drivers/scsi/qla2xxx/qla_dbg.c 		req = ha->req_q_map[que];
req               606 drivers/scsi/qla2xxx/qla_dbg.c 		if (!req)
req               616 drivers/scsi/qla2xxx/qla_dbg.c 		    (req->length * sizeof(request_t)));
req               623 drivers/scsi/qla2xxx/qla_dbg.c 		qh->size = htonl(req->length * sizeof(request_t));
req               627 drivers/scsi/qla2xxx/qla_dbg.c 		memcpy(ptr, req->ring, req->length * sizeof(request_t));
req               628 drivers/scsi/qla2xxx/qla_dbg.c 		ptr += req->length * sizeof(request_t);
req               429 drivers/scsi/qla2xxx/qla_def.h 	void		*req;
req              2916 drivers/scsi/qla2xxx/qla_def.h 	} req;
req              3026 drivers/scsi/qla2xxx/qla_def.h 		struct ct_sns_req req;
req              3033 drivers/scsi/qla2xxx/qla_def.h 		struct ct_sns_req req;
req              3451 drivers/scsi/qla2xxx/qla_def.h 	struct req_que *req;
req              3525 drivers/scsi/qla2xxx/qla_def.h 	struct req_que *req;
req              4498 drivers/scsi/qla2xxx/qla_def.h 	struct req_que *req;
req               281 drivers/scsi/qla2xxx/qla_gbl.h extern uint32_t qla2xxx_get_next_handle(struct req_que *req);
req               109 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.revision = 0x01;
req               110 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_type = 0xFC;
req               111 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_subtype = 0x02;
req               112 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.command = cpu_to_be16(cmd);
req               113 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
req               115 drivers/scsi/qla2xxx/qla_gs.c 	return &p->p.req;
req               229 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
req               320 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
req               400 drivers/scsi/qla2xxx/qla_gs.c 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
req               467 drivers/scsi/qla2xxx/qla_gs.c 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
req               542 drivers/scsi/qla2xxx/qla_gs.c 		if (sp->u.iocb_cmd.u.ctarg.req) {
req               545 drivers/scsi/qla2xxx/qla_gs.c 			    sp->u.iocb_cmd.u.ctarg.req,
req               547 drivers/scsi/qla2xxx/qla_gs.c 			sp->u.iocb_cmd.u.ctarg.req = NULL;
req               602 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
req               606 drivers/scsi/qla2xxx/qla_gs.c 	if (!sp->u.iocb_cmd.u.ctarg.req) {
req               625 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req               631 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
req               632 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
req               635 drivers/scsi/qla2xxx/qla_gs.c 		ct_req->req.rft_id.fc4_types[6] = 1;    /* NVMe type 28h */
req               698 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
req               702 drivers/scsi/qla2xxx/qla_gs.c 	if (!sp->u.iocb_cmd.u.ctarg.req) {
req               721 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req               727 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
req               728 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rff_id.fc4_feature = fc4feature;
req               729 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rff_id.fc4_type = fc4type;		/* SCSI - FCP */
req               789 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
req               793 drivers/scsi/qla2xxx/qla_gs.c 	if (!sp->u.iocb_cmd.u.ctarg.req) {
req               812 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req               818 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
req               819 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
req               897 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
req               901 drivers/scsi/qla2xxx/qla_gs.c 	if (!sp->u.iocb_cmd.u.ctarg.req) {
req               920 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req               926 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
req               929 drivers/scsi/qla2xxx/qla_gs.c 	qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
req               930 drivers/scsi/qla2xxx/qla_gs.c 	    sizeof(ct_req->req.rsnn_nn.sym_node_name));
req               931 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rsnn_nn.name_len =
req               932 drivers/scsi/qla2xxx/qla_gs.c 	    (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
req               935 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
req              1495 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.revision = 0x01;
req              1496 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_type = 0xFA;
req              1497 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_subtype = 0x10;
req              1498 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.command = cpu_to_be16(cmd);
req              1499 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
req              1501 drivers/scsi/qla2xxx/qla_gs.c 	return &p->p.req;
req              1533 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
req              1534 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rhba.entry_count = cpu_to_be32(1);
req              1535 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
req              1539 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rhba.attrs.count =
req              1541 drivers/scsi/qla2xxx/qla_gs.c 	entries = &ct_req->req;
req              1680 drivers/scsi/qla2xxx/qla_gs.c 	    ct_req->req.rhba.hba_identifier, size);
req              1745 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
req              1749 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
req              1750 drivers/scsi/qla2xxx/qla_gs.c 	entries = &ct_req->req;
req              1896 drivers/scsi/qla2xxx/qla_gs.c 	    wwn_to_u64(ct_req->req.rpa.port_name), size);
req              1956 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
req              1957 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rhba2.entry_count = cpu_to_be32(1);
req              1958 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
req              1962 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
req              1963 drivers/scsi/qla2xxx/qla_gs.c 	entries = &ct_req->req;
req              2204 drivers/scsi/qla2xxx/qla_gs.c 	    wwn_to_u64(ct_req->req.rhba2.hba_identifier));
req              2264 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
req              2267 drivers/scsi/qla2xxx/qla_gs.c 	    "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
req              2317 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
req              2321 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
req              2322 drivers/scsi/qla2xxx/qla_gs.c 	entries = &ct_req->req;
req              2583 drivers/scsi/qla2xxx/qla_gs.c 	    "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
req              2714 drivers/scsi/qla2xxx/qla_gs.c 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
req              2749 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.revision = 0x01;
req              2750 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_type = 0xFA;
req              2751 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_subtype = 0x01;
req              2752 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.command = cpu_to_be16(cmd);
req              2753 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
req              2755 drivers/scsi/qla2xxx/qla_gs.c 	return &p->p.req;
req              2828 drivers/scsi/qla2xxx/qla_gs.c 		memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
req              2918 drivers/scsi/qla2xxx/qla_gs.c 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
req              3068 drivers/scsi/qla2xxx/qla_gs.c 	memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
req              3071 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
req              3127 drivers/scsi/qla2xxx/qla_gs.c 		if (sp->u.iocb_cmd.u.ctarg.req) {
req              3130 drivers/scsi/qla2xxx/qla_gs.c 			    sp->u.iocb_cmd.u.ctarg.req,
req              3132 drivers/scsi/qla2xxx/qla_gs.c 			sp->u.iocb_cmd.u.ctarg.req = NULL;
req              3253 drivers/scsi/qla2xxx/qla_gs.c 	    (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
req              3263 drivers/scsi/qla2xxx/qla_gs.c 		    sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
req              3268 drivers/scsi/qla2xxx/qla_gs.c 		    sp->name, sp->gen1, &ct_req->req.port_id.port_id,
req              3274 drivers/scsi/qla2xxx/qla_gs.c 	ea.id = be_to_port_id(ct_req->req.port_id.port_id);
req              3301 drivers/scsi/qla2xxx/qla_gs.c 				  sp->u.iocb_cmd.u.ctarg.req,
req              3303 drivers/scsi/qla2xxx/qla_gs.c 		sp->u.iocb_cmd.u.ctarg.req = NULL;
req              3353 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
req              3357 drivers/scsi/qla2xxx/qla_gs.c 	if (!sp->u.iocb_cmd.u.ctarg.req) {
req              3376 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req              3381 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.port_id.port_id = port_id_to_be_id(*id);
req              3392 drivers/scsi/qla2xxx/qla_gs.c 	    sp->handle, &ct_req->req.port_id.port_id);
req              3405 drivers/scsi/qla2xxx/qla_gs.c 	if (sp->u.iocb_cmd.u.ctarg.req) {
req              3408 drivers/scsi/qla2xxx/qla_gs.c 			sp->u.iocb_cmd.u.ctarg.req,
req              3410 drivers/scsi/qla2xxx/qla_gs.c 		sp->u.iocb_cmd.u.ctarg.req = NULL;
req              3502 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
req              3503 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
req              3504 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
req              3506 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
req              3767 drivers/scsi/qla2xxx/qla_gs.c 		(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
req              3861 drivers/scsi/qla2xxx/qla_gs.c 		(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
req              3962 drivers/scsi/qla2xxx/qla_gs.c 	if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
req              3965 drivers/scsi/qla2xxx/qla_gs.c 		    __func__, sp->u.iocb_cmd.u.ctarg.req,
req              3990 drivers/scsi/qla2xxx/qla_gs.c 	memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
req              3992 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req              3998 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.gpn_ft.port_type = fc4_type;
req              4007 drivers/scsi/qla2xxx/qla_gs.c 	    sp->handle, ct_req->req.gpn_ft.port_type);
req              4017 drivers/scsi/qla2xxx/qla_gs.c 	if (sp->u.iocb_cmd.u.ctarg.req) {
req              4020 drivers/scsi/qla2xxx/qla_gs.c 		    sp->u.iocb_cmd.u.ctarg.req,
req              4022 drivers/scsi/qla2xxx/qla_gs.c 		sp->u.iocb_cmd.u.ctarg.req = NULL;
req              4094 drivers/scsi/qla2xxx/qla_gs.c 		sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
req              4099 drivers/scsi/qla2xxx/qla_gs.c 		if (!sp->u.iocb_cmd.u.ctarg.req) {
req              4127 drivers/scsi/qla2xxx/qla_gs.c 			    sp->u.iocb_cmd.u.ctarg.req,
req              4129 drivers/scsi/qla2xxx/qla_gs.c 			sp->u.iocb_cmd.u.ctarg.req = NULL;
req              4155 drivers/scsi/qla2xxx/qla_gs.c 	memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
req              4157 drivers/scsi/qla2xxx/qla_gs.c 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
req              4162 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.gpn_ft.port_type = fc4_type;
req              4170 drivers/scsi/qla2xxx/qla_gs.c 	    sp->handle, ct_req->req.gpn_ft.port_type);
req              4180 drivers/scsi/qla2xxx/qla_gs.c 	if (sp->u.iocb_cmd.u.ctarg.req) {
req              4183 drivers/scsi/qla2xxx/qla_gs.c 		    sp->u.iocb_cmd.u.ctarg.req,
req              4185 drivers/scsi/qla2xxx/qla_gs.c 		sp->u.iocb_cmd.u.ctarg.req = NULL;
req              4290 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
req              4294 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
req              4418 drivers/scsi/qla2xxx/qla_gs.c 	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
req              4422 drivers/scsi/qla2xxx/qla_gs.c 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
req                53 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req;
req                59 drivers/scsi/qla2xxx/qla_init.c 	req = sp->qpair->req;
req                60 drivers/scsi/qla2xxx/qla_init.c 	req->outstanding_cmds[sp->handle] = NULL;
req               115 drivers/scsi/qla2xxx/qla_init.c 	for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
req               116 drivers/scsi/qla2xxx/qla_init.c 		if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
req               118 drivers/scsi/qla2xxx/qla_init.c 			qpair->req->outstanding_cmds[handle] = NULL;
req               121 drivers/scsi/qla2xxx/qla_init.c 		if (qpair->req->outstanding_cmds[handle] == sp) {
req               122 drivers/scsi/qla2xxx/qla_init.c 			qpair->req->outstanding_cmds[handle] = NULL;
req               172 drivers/scsi/qla2xxx/qla_init.c 	abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
req               226 drivers/scsi/qla2xxx/qla_init.c 			for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
req               228 drivers/scsi/qla2xxx/qla_init.c 				if (sp->qpair->req->outstanding_cmds[h] ==
req               230 drivers/scsi/qla2xxx/qla_init.c 					sp->qpair->req->outstanding_cmds[h] =
req               249 drivers/scsi/qla2xxx/qla_init.c 			for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
req               251 drivers/scsi/qla2xxx/qla_init.c 				if (sp->qpair->req->outstanding_cmds[h] ==
req               253 drivers/scsi/qla2xxx/qla_init.c 					sp->qpair->req->outstanding_cmds[h] =
req              1849 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = qpair->req;
req              1852 drivers/scsi/qla2xxx/qla_init.c 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
req              1853 drivers/scsi/qla2xxx/qla_init.c 		if (req->outstanding_cmds[handle] == sp)
req              1858 drivers/scsi/qla2xxx/qla_init.c 	if (handle == req->num_outstanding_cmds) {
req              2178 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              2241 drivers/scsi/qla2xxx/qla_init.c 	ha->isp_ops->get_flash_version(vha, req->ring);
req              2959 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              3029 drivers/scsi/qla2xxx/qla_init.c 	if (req->length > 1024)
req              3033 drivers/scsi/qla2xxx/qla_init.c 		    req->length;
req              3078 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              3083 drivers/scsi/qla2xxx/qla_init.c 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
req              3202 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              3236 drivers/scsi/qla2xxx/qla_init.c 			    (req->length * sizeof(request_t));
req              3271 drivers/scsi/qla2xxx/qla_init.c 		req_q_size = req->length * sizeof(request_t);
req              3398 drivers/scsi/qla2xxx/qla_init.c qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
req              3401 drivers/scsi/qla2xxx/qla_init.c 	if (req->outstanding_cmds)
req              3405 drivers/scsi/qla2xxx/qla_init.c 		req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
req              3408 drivers/scsi/qla2xxx/qla_init.c 			req->num_outstanding_cmds = ha->cur_fw_xcb_count;
req              3410 drivers/scsi/qla2xxx/qla_init.c 			req->num_outstanding_cmds = ha->cur_fw_iocb_count;
req              3413 drivers/scsi/qla2xxx/qla_init.c 	req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
req              3417 drivers/scsi/qla2xxx/qla_init.c 	if (!req->outstanding_cmds) {
req              3422 drivers/scsi/qla2xxx/qla_init.c 		req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
req              3423 drivers/scsi/qla2xxx/qla_init.c 		req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
req              3427 drivers/scsi/qla2xxx/qla_init.c 		if (!req->outstanding_cmds) {
req              3430 drivers/scsi/qla2xxx/qla_init.c 			    "outstanding_cmds for req_que %p.\n", req);
req              3431 drivers/scsi/qla2xxx/qla_init.c 			req->num_outstanding_cmds = 0;
req              3677 drivers/scsi/qla2xxx/qla_init.c 				    vha->req);
req              3933 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              3939 drivers/scsi/qla2xxx/qla_init.c 	ha->init_cb->request_q_length = cpu_to_le16(req->length);
req              3941 drivers/scsi/qla2xxx/qla_init.c 	put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
req              3960 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              3967 drivers/scsi/qla2xxx/qla_init.c 	icb->request_q_length = cpu_to_le16(req->length);
req              3969 drivers/scsi/qla2xxx/qla_init.c 	put_unaligned_le64(req->dma, &icb->request_q_address);
req              4051 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req;
req              4060 drivers/scsi/qla2xxx/qla_init.c 		req = ha->req_q_map[que];
req              4061 drivers/scsi/qla2xxx/qla_init.c 		if (!req || !test_bit(que, ha->req_qid_map))
req              4063 drivers/scsi/qla2xxx/qla_init.c 		req->out_ptr = (void *)(req->ring + req->length);
req              4064 drivers/scsi/qla2xxx/qla_init.c 		*req->out_ptr = 0;
req              4065 drivers/scsi/qla2xxx/qla_init.c 		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req              4066 drivers/scsi/qla2xxx/qla_init.c 			req->outstanding_cmds[cnt] = NULL;
req              4068 drivers/scsi/qla2xxx/qla_init.c 		req->current_outstanding_cmd = 1;
req              4071 drivers/scsi/qla2xxx/qla_init.c 		req->ring_ptr  = req->ring;
req              4072 drivers/scsi/qla2xxx/qla_init.c 		req->ring_index    = 0;
req              4073 drivers/scsi/qla2xxx/qla_init.c 		req->cnt      = req->length;
req              6752 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              6795 drivers/scsi/qla2xxx/qla_init.c 		ha->isp_ops->get_flash_version(vha, req->ring);
req              6960 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = NULL;
req              6981 drivers/scsi/qla2xxx/qla_init.c 		req = ha->req_q_map[i];
req              6982 drivers/scsi/qla2xxx/qla_init.c 		if (req && test_bit(i, ha->req_qid_map)) {
req              6984 drivers/scsi/qla2xxx/qla_init.c 			req->options &= ~BIT_0;
req              6985 drivers/scsi/qla2xxx/qla_init.c 			ret = qla25xx_init_req_que(base_vha, req);
req              6989 drivers/scsi/qla2xxx/qla_init.c 				    __func__, req->id);
req              6993 drivers/scsi/qla2xxx/qla_init.c 				    __func__, req->id);
req              7636 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              7642 drivers/scsi/qla2xxx/qla_init.c 	dcode = (void *)req->ring;
req              7655 drivers/scsi/qla2xxx/qla_init.c 	dcode = (void *)req->ring;
req              7681 drivers/scsi/qla2xxx/qla_init.c 			rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
req              7706 drivers/scsi/qla2xxx/qla_init.c 		dcode = (void *)req->ring;
req              7781 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              7795 drivers/scsi/qla2xxx/qla_init.c 	wcode = (uint16_t *)req->ring;
req              7847 drivers/scsi/qla2xxx/qla_init.c 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
req              7884 drivers/scsi/qla2xxx/qla_init.c 	struct req_que *req = ha->req_q_map[0];
req              7910 drivers/scsi/qla2xxx/qla_init.c 	dcode = (void *)req->ring;
req              7938 drivers/scsi/qla2xxx/qla_init.c 			rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
req              8977 drivers/scsi/qla2xxx/qla_init.c 		qpair->req = ha->req_q_map[req_id];
req              8978 drivers/scsi/qla2xxx/qla_init.c 		qpair->rsp->req = qpair->req;
req              9037 drivers/scsi/qla2xxx/qla_init.c 	ret = qla25xx_delete_req_que(vha, qpair->req);
req               299 drivers/scsi/qla2xxx/qla_inline.h 	struct req_que *req = qpair->req;
req               301 drivers/scsi/qla2xxx/qla_inline.h 	req->ring_index++;
req               302 drivers/scsi/qla2xxx/qla_inline.h 	if (req->ring_index == req->length) {
req               303 drivers/scsi/qla2xxx/qla_inline.h 		req->ring_index = 0;
req               304 drivers/scsi/qla2xxx/qla_inline.h 		req->ring_ptr = req->ring;
req               306 drivers/scsi/qla2xxx/qla_inline.h 		req->ring_ptr++;
req               308 drivers/scsi/qla2xxx/qla_inline.h 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req                97 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = vha->req;
req                99 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req               100 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req               101 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req               102 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req               104 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req               107 drivers/scsi/qla2xxx/qla_iocb.c 	cont_pkt = (cont_entry_t *)req->ring_ptr;
req               123 drivers/scsi/qla2xxx/qla_iocb.c qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
req               128 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req               129 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req               130 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req               131 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req               133 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req               136 drivers/scsi/qla2xxx/qla_iocb.c 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
req               285 drivers/scsi/qla2xxx/qla_iocb.c 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
req               300 drivers/scsi/qla2xxx/qla_iocb.c uint32_t qla2xxx_get_next_handle(struct req_que *req)
req               302 drivers/scsi/qla2xxx/qla_iocb.c 	uint32_t index, handle = req->current_outstanding_cmd;
req               304 drivers/scsi/qla2xxx/qla_iocb.c 	for (index = 1; index < req->num_outstanding_cmds; index++) {
req               306 drivers/scsi/qla2xxx/qla_iocb.c 		if (handle == req->num_outstanding_cmds)
req               308 drivers/scsi/qla2xxx/qla_iocb.c 		if (!req->outstanding_cmds[handle])
req               336 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req;
req               344 drivers/scsi/qla2xxx/qla_iocb.c 	req = ha->req_q_map[0];
req               361 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req               378 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < (req_cnt + 2)) {
req               380 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->ring_index < cnt)
req               381 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req               383 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req               384 drivers/scsi/qla2xxx/qla_iocb.c 			    (req->ring_index - cnt);
req               386 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2))
req               391 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req               392 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req               395 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req               397 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
req               421 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req               422 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req               423 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req               424 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req               426 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req               431 drivers/scsi/qla2xxx/qla_iocb.c 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
req               457 drivers/scsi/qla2xxx/qla_iocb.c qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req               460 drivers/scsi/qla2xxx/qla_iocb.c 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
req               466 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index++;
req               467 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->ring_index == req->length) {
req               468 drivers/scsi/qla2xxx/qla_iocb.c 			req->ring_index = 0;
req               469 drivers/scsi/qla2xxx/qla_iocb.c 			req->ring_ptr = req->ring;
req               471 drivers/scsi/qla2xxx/qla_iocb.c 			req->ring_ptr++;
req               475 drivers/scsi/qla2xxx/qla_iocb.c 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
req               477 drivers/scsi/qla2xxx/qla_iocb.c 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
req               480 drivers/scsi/qla2xxx/qla_iocb.c 			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
req               484 drivers/scsi/qla2xxx/qla_iocb.c 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
req               488 drivers/scsi/qla2xxx/qla_iocb.c 				req->ring_index);
req               512 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = qpair->req;
req               533 drivers/scsi/qla2xxx/qla_iocb.c 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
req               541 drivers/scsi/qla2xxx/qla_iocb.c 	qla2x00_start_iocbs(vha, req);
req               699 drivers/scsi/qla2xxx/qla_iocb.c 	uint16_t tot_dsds, struct req_que *req)
req               747 drivers/scsi/qla2xxx/qla_iocb.c 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
req              1601 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = NULL;
req              1607 drivers/scsi/qla2xxx/qla_iocb.c 	req = vha->req;
req              1623 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req              1638 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < (req_cnt + 2)) {
req              1639 drivers/scsi/qla2xxx/qla_iocb.c 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
req              1640 drivers/scsi/qla2xxx/qla_iocb.c 		    RD_REG_DWORD_RELAXED(req->req_q_out);
req              1641 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->ring_index < cnt)
req              1642 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req              1644 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req              1645 drivers/scsi/qla2xxx/qla_iocb.c 				(req->ring_index - cnt);
req              1646 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2))
req              1651 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req              1652 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req              1655 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              1657 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
req              1658 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              1685 drivers/scsi/qla2xxx/qla_iocb.c 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
req              1691 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req              1692 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req              1693 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req              1694 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req              1696 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req              1701 drivers/scsi/qla2xxx/qla_iocb.c 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req              1733 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que		*req = NULL;
req              1750 drivers/scsi/qla2xxx/qla_iocb.c 	req = vha->req;
req              1751 drivers/scsi/qla2xxx/qla_iocb.c 	rsp = req->rsp;
req              1767 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req              1823 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < (req_cnt + 2)) {
req              1824 drivers/scsi/qla2xxx/qla_iocb.c 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
req              1825 drivers/scsi/qla2xxx/qla_iocb.c 		    RD_REG_DWORD_RELAXED(req->req_q_out);
req              1826 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->ring_index < cnt)
req              1827 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req              1829 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req              1830 drivers/scsi/qla2xxx/qla_iocb.c 				(req->ring_index - cnt);
req              1831 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2))
req              1838 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req              1839 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req              1842 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              1845 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
req              1846 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              1865 drivers/scsi/qla2xxx/qla_iocb.c 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
req              1876 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req              1877 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req              1878 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req              1879 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req              1881 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req              1884 drivers/scsi/qla2xxx/qla_iocb.c 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req              1892 drivers/scsi/qla2xxx/qla_iocb.c 		req->outstanding_cmds[handle] = NULL;
req              1893 drivers/scsi/qla2xxx/qla_iocb.c 		req->cnt += req_cnt;
req              1918 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = NULL;
req              1928 drivers/scsi/qla2xxx/qla_iocb.c 	req = qpair->req;
req              1943 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req              1958 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < (req_cnt + 2)) {
req              1959 drivers/scsi/qla2xxx/qla_iocb.c 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
req              1960 drivers/scsi/qla2xxx/qla_iocb.c 		    RD_REG_DWORD_RELAXED(req->req_q_out);
req              1961 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->ring_index < cnt)
req              1962 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req              1964 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req              1965 drivers/scsi/qla2xxx/qla_iocb.c 				(req->ring_index - cnt);
req              1966 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2))
req              1971 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req              1972 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req              1975 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              1977 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
req              1978 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              2005 drivers/scsi/qla2xxx/qla_iocb.c 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
req              2011 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req              2012 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req              2013 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req              2014 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req              2016 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req              2021 drivers/scsi/qla2xxx/qla_iocb.c 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req              2054 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que		*req = NULL;
req              2087 drivers/scsi/qla2xxx/qla_iocb.c 	req = qpair->req;
req              2102 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req              2158 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < (req_cnt + 2)) {
req              2159 drivers/scsi/qla2xxx/qla_iocb.c 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
req              2160 drivers/scsi/qla2xxx/qla_iocb.c 		    RD_REG_DWORD_RELAXED(req->req_q_out);
req              2161 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->ring_index < cnt)
req              2162 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req              2164 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req              2165 drivers/scsi/qla2xxx/qla_iocb.c 				(req->ring_index - cnt);
req              2166 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2))
req              2173 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req              2174 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req              2177 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              2180 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
req              2181 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              2200 drivers/scsi/qla2xxx/qla_iocb.c 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
req              2209 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req              2210 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req              2211 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req              2212 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req              2214 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req              2217 drivers/scsi/qla2xxx/qla_iocb.c 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req              2230 drivers/scsi/qla2xxx/qla_iocb.c 		req->outstanding_cmds[handle] = NULL;
req              2231 drivers/scsi/qla2xxx/qla_iocb.c 		req->cnt += req_cnt;
req              2248 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = qpair->req;
req              2249 drivers/scsi/qla2xxx/qla_iocb.c 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
req              2264 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < req_cnt + 2) {
req              2266 drivers/scsi/qla2xxx/qla_iocb.c 			cnt = *req->out_ptr;
req              2280 drivers/scsi/qla2xxx/qla_iocb.c 		if  (req->ring_index < cnt)
req              2281 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req              2283 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req              2284 drivers/scsi/qla2xxx/qla_iocb.c 			    (req->ring_index - cnt);
req              2286 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < req_cnt + 2)
req              2290 drivers/scsi/qla2xxx/qla_iocb.c 		handle = qla2xxx_get_next_handle(req);
req              2298 drivers/scsi/qla2xxx/qla_iocb.c 		req->current_outstanding_cmd = handle;
req              2299 drivers/scsi/qla2xxx/qla_iocb.c 		req->outstanding_cmds[handle] = sp;
req              2304 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              2305 drivers/scsi/qla2xxx/qla_iocb.c 	pkt = req->ring_ptr;
req              2485 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = vha->req;
req              2492 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
req              3094 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = NULL;
req              3100 drivers/scsi/qla2xxx/qla_iocb.c 	req = vha->req;
req              3122 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req              3182 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2)) {
req              3185 drivers/scsi/qla2xxx/qla_iocb.c 			if (req->ring_index < cnt)
req              3186 drivers/scsi/qla2xxx/qla_iocb.c 				req->cnt = cnt - req->ring_index;
req              3188 drivers/scsi/qla2xxx/qla_iocb.c 				req->cnt = req->length -
req              3189 drivers/scsi/qla2xxx/qla_iocb.c 					(req->ring_index - cnt);
req              3190 drivers/scsi/qla2xxx/qla_iocb.c 			if (req->cnt < (req_cnt + 2))
req              3232 drivers/scsi/qla2xxx/qla_iocb.c 		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
req              3233 drivers/scsi/qla2xxx/qla_iocb.c 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              3291 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2)) {
req              3294 drivers/scsi/qla2xxx/qla_iocb.c 			if (req->ring_index < cnt)
req              3295 drivers/scsi/qla2xxx/qla_iocb.c 				req->cnt = cnt - req->ring_index;
req              3297 drivers/scsi/qla2xxx/qla_iocb.c 				req->cnt = req->length -
req              3298 drivers/scsi/qla2xxx/qla_iocb.c 					(req->ring_index - cnt);
req              3300 drivers/scsi/qla2xxx/qla_iocb.c 		if (req->cnt < (req_cnt + 2))
req              3303 drivers/scsi/qla2xxx/qla_iocb.c 		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
req              3304 drivers/scsi/qla2xxx/qla_iocb.c 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              3334 drivers/scsi/qla2xxx/qla_iocb.c 		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
req              3345 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req              3346 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req              3349 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              3353 drivers/scsi/qla2xxx/qla_iocb.c 	req->ring_index++;
req              3354 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->ring_index == req->length) {
req              3355 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_index = 0;
req              3356 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
req              3358 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr++;
req              3364 drivers/scsi/qla2xxx/qla_iocb.c 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
req              3404 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req = sp->qpair->req;
req              3409 drivers/scsi/qla2xxx/qla_iocb.c 	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
req              3631 drivers/scsi/qla2xxx/qla_iocb.c 	qla2x00_start_iocbs(vha, qp->req);
req              3690 drivers/scsi/qla2xxx/qla_iocb.c 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
req              3712 drivers/scsi/qla2xxx/qla_iocb.c 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
req              3736 drivers/scsi/qla2xxx/qla_iocb.c 	struct req_que *req;
req              3742 drivers/scsi/qla2xxx/qla_iocb.c 	req = vha->req;
req              3755 drivers/scsi/qla2xxx/qla_iocb.c 	handle = qla2xxx_get_next_handle(req);
req              3765 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < req_cnt + 2) {
req              3766 drivers/scsi/qla2xxx/qla_iocb.c 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
req              3767 drivers/scsi/qla2xxx/qla_iocb.c 		    RD_REG_DWORD_RELAXED(req->req_q_out);
req              3768 drivers/scsi/qla2xxx/qla_iocb.c 		if  (req->ring_index < cnt)
req              3769 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = cnt - req->ring_index;
req              3771 drivers/scsi/qla2xxx/qla_iocb.c 			req->cnt = req->length -
req              3772 drivers/scsi/qla2xxx/qla_iocb.c 				(req->ring_index - cnt);
req              3774 drivers/scsi/qla2xxx/qla_iocb.c 	if (req->cnt < req_cnt + 2) {
req              3779 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
req              3780 drivers/scsi/qla2xxx/qla_iocb.c 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req              3796 drivers/scsi/qla2xxx/qla_iocb.c 	req->current_outstanding_cmd = handle;
req              3797 drivers/scsi/qla2xxx/qla_iocb.c 	req->outstanding_cmds[handle] = sp;
req              3799 drivers/scsi/qla2xxx/qla_iocb.c 	req->cnt -= req_cnt;
req              3803 drivers/scsi/qla2xxx/qla_iocb.c 	qla2x00_start_iocbs(vha, req);
req               707 drivers/scsi/qla2xxx/qla_isr.c 			qla2x00_process_completed_request(vha, rsp->req,
req              1274 drivers/scsi/qla2xxx/qla_isr.c 				  struct req_que *req, uint32_t index)
req              1280 drivers/scsi/qla2xxx/qla_isr.c 	if (index >= req->num_outstanding_cmds) {
req              1291 drivers/scsi/qla2xxx/qla_isr.c 	sp = req->outstanding_cmds[index];
req              1294 drivers/scsi/qla2xxx/qla_isr.c 		req->outstanding_cmds[index] = NULL;
req              1310 drivers/scsi/qla2xxx/qla_isr.c     struct req_que *req, void *iocb)
req              1318 drivers/scsi/qla2xxx/qla_isr.c 	if (index >= req->num_outstanding_cmds) {
req              1328 drivers/scsi/qla2xxx/qla_isr.c 	sp = req->outstanding_cmds[index];
req              1340 drivers/scsi/qla2xxx/qla_isr.c 	req->outstanding_cmds[index] = NULL;
req              1347 drivers/scsi/qla2xxx/qla_isr.c qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1358 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
req              1433 drivers/scsi/qla2xxx/qla_isr.c qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1442 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              1458 drivers/scsi/qla2xxx/qla_isr.c qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1465 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              1476 drivers/scsi/qla2xxx/qla_isr.c qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1487 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              1548 drivers/scsi/qla2xxx/qla_isr.c qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1561 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              1678 drivers/scsi/qla2xxx/qla_isr.c qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1689 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
req              1812 drivers/scsi/qla2xxx/qla_isr.c qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
req              1821 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
req              1861 drivers/scsi/qla2xxx/qla_isr.c static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              1959 drivers/scsi/qla2xxx/qla_isr.c static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
req              1966 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
req              2008 drivers/scsi/qla2xxx/qla_isr.c 			qla2x00_process_completed_request(vha, rsp->req,
req              2015 drivers/scsi/qla2xxx/qla_isr.c 			qla2x00_process_completed_request(vha, rsp->req,
req              2022 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
req              2025 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
req              2255 drivers/scsi/qla2xxx/qla_isr.c 				  struct req_que *req, uint32_t index)
req              2270 drivers/scsi/qla2xxx/qla_isr.c 	if (index >= req->num_outstanding_cmds) {
req              2277 drivers/scsi/qla2xxx/qla_isr.c 	sp = req->outstanding_cmds[index];
req              2281 drivers/scsi/qla2xxx/qla_isr.c 		    req->id, index);
req              2288 drivers/scsi/qla2xxx/qla_isr.c 	req->outstanding_cmds[index] = NULL;
req              2426 drivers/scsi/qla2xxx/qla_isr.c 	struct req_que *req;
req              2442 drivers/scsi/qla2xxx/qla_isr.c 	req = ha->req_q_map[que];
req              2445 drivers/scsi/qla2xxx/qla_isr.c 	if (req == NULL ||
req              2449 drivers/scsi/qla2xxx/qla_isr.c 		    "que=%u.\n", sts->handle, req, que);
req              2454 drivers/scsi/qla2xxx/qla_isr.c 	if (handle < req->num_outstanding_cmds) {
req              2455 drivers/scsi/qla2xxx/qla_isr.c 		sp = req->outstanding_cmds[handle];
req              2483 drivers/scsi/qla2xxx/qla_isr.c 		req->outstanding_cmds[handle] = NULL;
req              2492 drivers/scsi/qla2xxx/qla_isr.c 		req->outstanding_cmds[handle] = NULL;
req              2493 drivers/scsi/qla2xxx/qla_isr.c 		qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
req              2498 drivers/scsi/qla2xxx/qla_isr.c 		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
req              2504 drivers/scsi/qla2xxx/qla_isr.c 		qla24xx_tm_iocb_entry(vha, req, pkt);
req              2510 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_process_completed_request(vha, req, handle);
req              2515 drivers/scsi/qla2xxx/qla_isr.c 	req->outstanding_cmds[handle] = NULL;
req              2870 drivers/scsi/qla2xxx/qla_isr.c 	struct req_que *req = NULL;
req              2880 drivers/scsi/qla2xxx/qla_isr.c 	req = ha->req_q_map[que];
req              2898 drivers/scsi/qla2xxx/qla_isr.c 		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              2954 drivers/scsi/qla2xxx/qla_isr.c qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              2961 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              2971 drivers/scsi/qla2xxx/qla_isr.c     struct pt_ls4_request *pkt, struct req_que *req)
req              2977 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              3035 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_logio_entry(vha, rsp->req,
req              3039 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
req              3042 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
req              3062 drivers/scsi/qla2xxx/qla_isr.c 			    rsp->req);
req              3069 drivers/scsi/qla2xxx/qla_isr.c 				qla24xxx_nack_iocb_entry(vha, rsp->req,
req              3078 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_abort_iocb_entry(vha, rsp->req,
req              3082 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_mbx_iocb_entry(vha, rsp->req,
req              3086 drivers/scsi/qla2xxx/qla_isr.c 			qla_ctrlvp_completed(vha, rsp->req,
req              1472 drivers/scsi/qla2xxx/qla_mbx.c 	struct req_que *req;
req              1479 drivers/scsi/qla2xxx/qla_mbx.c 		req = sp->qpair->req;
req              1481 drivers/scsi/qla2xxx/qla_mbx.c 		req = vha->req;
req              1484 drivers/scsi/qla2xxx/qla_mbx.c 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
req              1485 drivers/scsi/qla2xxx/qla_mbx.c 		if (req->outstanding_cmds[handle] == sp)
req              1490 drivers/scsi/qla2xxx/qla_mbx.c 	if (handle == req->num_outstanding_cmds) {
req              2368 drivers/scsi/qla2xxx/qla_mbx.c 	struct req_que *req;
req              2374 drivers/scsi/qla2xxx/qla_mbx.c 		req = vha->qpair->req;
req              2376 drivers/scsi/qla2xxx/qla_mbx.c 		req = ha->req_q_map[0];
req              2387 drivers/scsi/qla2xxx/qla_mbx.c 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
req              2642 drivers/scsi/qla2xxx/qla_mbx.c 	struct req_que *req;
req              2654 drivers/scsi/qla2xxx/qla_mbx.c 	req = vha->req;
req              2657 drivers/scsi/qla2xxx/qla_mbx.c 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
req              3114 drivers/scsi/qla2xxx/qla_mbx.c 	struct req_que *req = vha->req;
req              3121 drivers/scsi/qla2xxx/qla_mbx.c 		req = sp->qpair->req;
req              3129 drivers/scsi/qla2xxx/qla_mbx.c 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
req              3130 drivers/scsi/qla2xxx/qla_mbx.c 		if (req->outstanding_cmds[handle] == sp)
req              3134 drivers/scsi/qla2xxx/qla_mbx.c 	if (handle == req->num_outstanding_cmds) {
req              3148 drivers/scsi/qla2xxx/qla_mbx.c 	abt->handle = MAKE_HANDLE(req->id, abt->handle);
req              3150 drivers/scsi/qla2xxx/qla_mbx.c 	abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
req              3156 drivers/scsi/qla2xxx/qla_mbx.c 	abt->req_que_no = cpu_to_le16(req->id);
req              3202 drivers/scsi/qla2xxx/qla_mbx.c 	struct req_que *req;
req              3207 drivers/scsi/qla2xxx/qla_mbx.c 	req = vha->req;
req              3215 drivers/scsi/qla2xxx/qla_mbx.c 		req = qpair->req;
req              3227 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
req              4236 drivers/scsi/qla2xxx/qla_mbx.c 		struct verify_chip_entry_84xx req;
req              4269 drivers/scsi/qla2xxx/qla_mbx.c 		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
req              4270 drivers/scsi/qla2xxx/qla_mbx.c 		mn->p.req.entry_count = 1;
req              4271 drivers/scsi/qla2xxx/qla_mbx.c 		mn->p.req.options = cpu_to_le16(options);
req              4335 drivers/scsi/qla2xxx/qla_mbx.c qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
req              4350 drivers/scsi/qla2xxx/qla_mbx.c 		req->options |= BIT_13;
req              4353 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[1] = req->options;
req              4354 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[2] = MSW(LSD(req->dma));
req              4355 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[3] = LSW(LSD(req->dma));
req              4356 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[6] = MSW(MSD(req->dma));
req              4357 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[7] = LSW(MSD(req->dma));
req              4358 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[5] = req->length;
req              4359 drivers/scsi/qla2xxx/qla_mbx.c 	if (req->rsp)
req              4360 drivers/scsi/qla2xxx/qla_mbx.c 		mcp->mb[10] = req->rsp->id;
req              4361 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[12] = req->qos;
req              4362 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[11] = req->vp_idx;
req              4363 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[13] = req->rid;
req              4367 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[4] = req->id;
req              4371 drivers/scsi/qla2xxx/qla_mbx.c 	mcp->mb[9] = *req->out_ptr = 0;
req              4388 drivers/scsi/qla2xxx/qla_mbx.c 	if (!(req->options & BIT_0)) {
req              4389 drivers/scsi/qla2xxx/qla_mbx.c 		WRT_REG_DWORD(req->req_q_in, 0);
req              4391 drivers/scsi/qla2xxx/qla_mbx.c 			WRT_REG_DWORD(req->req_q_out, 0);
req               523 drivers/scsi/qla2xxx/qla_mid.c 	vha->req = base_vha->req;
req               525 drivers/scsi/qla2xxx/qla_mid.c 	host->can_queue = base_vha->req->length + 128;
req               555 drivers/scsi/qla2xxx/qla_mid.c qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
req               558 drivers/scsi/qla2xxx/qla_mid.c 	uint16_t que_id = req->id;
req               560 drivers/scsi/qla2xxx/qla_mid.c 	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
req               561 drivers/scsi/qla2xxx/qla_mid.c 		sizeof(request_t), req->ring, req->dma);
req               562 drivers/scsi/qla2xxx/qla_mid.c 	req->ring = NULL;
req               563 drivers/scsi/qla2xxx/qla_mid.c 	req->dma = 0;
req               570 drivers/scsi/qla2xxx/qla_mid.c 	kfree(req->outstanding_cmds);
req               571 drivers/scsi/qla2xxx/qla_mid.c 	kfree(req);
req               572 drivers/scsi/qla2xxx/qla_mid.c 	req = NULL;
req               602 drivers/scsi/qla2xxx/qla_mid.c qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
req               606 drivers/scsi/qla2xxx/qla_mid.c 	if (req && vha->flags.qpairs_req_created) {
req               607 drivers/scsi/qla2xxx/qla_mid.c 		req->options |= BIT_0;
req               608 drivers/scsi/qla2xxx/qla_mid.c 		ret = qla25xx_init_req_que(vha, req);
req               612 drivers/scsi/qla2xxx/qla_mid.c 		qla25xx_free_req_que(vha, req);
req               640 drivers/scsi/qla2xxx/qla_mid.c 	struct req_que *req = NULL;
req               652 drivers/scsi/qla2xxx/qla_mid.c 			req = ha->req_q_map[cnt];
req               653 drivers/scsi/qla2xxx/qla_mid.c 			if (req && test_bit(cnt, ha->req_qid_map)) {
req               654 drivers/scsi/qla2xxx/qla_mid.c 				ret = qla25xx_delete_req_que(vha, req);
req               658 drivers/scsi/qla2xxx/qla_mid.c 					    req->id);
req               687 drivers/scsi/qla2xxx/qla_mid.c 	struct req_que *req = NULL;
req               694 drivers/scsi/qla2xxx/qla_mid.c 	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
req               695 drivers/scsi/qla2xxx/qla_mid.c 	if (req == NULL) {
req               701 drivers/scsi/qla2xxx/qla_mid.c 	req->length = REQUEST_ENTRY_CNT_24XX;
req               702 drivers/scsi/qla2xxx/qla_mid.c 	req->ring = dma_alloc_coherent(&ha->pdev->dev,
req               703 drivers/scsi/qla2xxx/qla_mid.c 			(req->length + 1) * sizeof(request_t),
req               704 drivers/scsi/qla2xxx/qla_mid.c 			&req->dma, GFP_KERNEL);
req               705 drivers/scsi/qla2xxx/qla_mid.c 	if (req->ring == NULL) {
req               711 drivers/scsi/qla2xxx/qla_mid.c 	ret = qla2x00_alloc_outstanding_cmds(ha, req);
req               724 drivers/scsi/qla2xxx/qla_mid.c 	ha->req_q_map[que_id] = req;
req               725 drivers/scsi/qla2xxx/qla_mid.c 	req->rid = rid;
req               726 drivers/scsi/qla2xxx/qla_mid.c 	req->vp_idx = vp_idx;
req               727 drivers/scsi/qla2xxx/qla_mid.c 	req->qos = qos;
req               731 drivers/scsi/qla2xxx/qla_mid.c 	    que_id, req->rid, req->vp_idx, req->qos);
req               734 drivers/scsi/qla2xxx/qla_mid.c 	    que_id, req->rid, req->vp_idx, req->qos);
req               736 drivers/scsi/qla2xxx/qla_mid.c 		req->rsp = NULL;
req               738 drivers/scsi/qla2xxx/qla_mid.c 		req->rsp = ha->rsp_q_map[rsp_que];
req               740 drivers/scsi/qla2xxx/qla_mid.c 	if (MSB(req->rid))
req               743 drivers/scsi/qla2xxx/qla_mid.c 	if (LSB(req->rid))
req               745 drivers/scsi/qla2xxx/qla_mid.c 	req->options = options;
req               748 drivers/scsi/qla2xxx/qla_mid.c 	    "options=0x%x.\n", req->options);
req               750 drivers/scsi/qla2xxx/qla_mid.c 	    "options=0x%x.\n", req->options);
req               751 drivers/scsi/qla2xxx/qla_mid.c 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req               752 drivers/scsi/qla2xxx/qla_mid.c 		req->outstanding_cmds[cnt] = NULL;
req               753 drivers/scsi/qla2xxx/qla_mid.c 	req->current_outstanding_cmd = 1;
req               755 drivers/scsi/qla2xxx/qla_mid.c 	req->ring_ptr = req->ring;
req               756 drivers/scsi/qla2xxx/qla_mid.c 	req->ring_index = 0;
req               757 drivers/scsi/qla2xxx/qla_mid.c 	req->cnt = req->length;
req               758 drivers/scsi/qla2xxx/qla_mid.c 	req->id = que_id;
req               760 drivers/scsi/qla2xxx/qla_mid.c 	req->req_q_in = &reg->isp25mq.req_q_in;
req               761 drivers/scsi/qla2xxx/qla_mid.c 	req->req_q_out = &reg->isp25mq.req_q_out;
req               762 drivers/scsi/qla2xxx/qla_mid.c 	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
req               763 drivers/scsi/qla2xxx/qla_mid.c 	req->out_ptr = (void *)(req->ring + req->length);
req               768 drivers/scsi/qla2xxx/qla_mid.c 	    req->ring_ptr, req->ring_index,
req               769 drivers/scsi/qla2xxx/qla_mid.c 	    req->cnt, req->id, req->max_q_depth);
req               773 drivers/scsi/qla2xxx/qla_mid.c 	    req->ring_ptr, req->ring_index, req->cnt,
req               774 drivers/scsi/qla2xxx/qla_mid.c 	    req->id, req->max_q_depth);
req               777 drivers/scsi/qla2xxx/qla_mid.c 		ret = qla25xx_init_req_que(base_vha, req);
req               789 drivers/scsi/qla2xxx/qla_mid.c 	return req->id;
req               792 drivers/scsi/qla2xxx/qla_mid.c 	qla25xx_free_req_que(base_vha, req);
req               905 drivers/scsi/qla2xxx/qla_mid.c 	rsp->req = NULL;
req               659 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = ha->req_q_map[0];
req               661 drivers/scsi/qla2xxx/qla_mr.c 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
req               837 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = ha->req_q_map[0];
req               840 drivers/scsi/qla2xxx/qla_mr.c 	req->length_fx00 = req->length;
req               841 drivers/scsi/qla2xxx/qla_mr.c 	req->ring_fx00 = req->ring;
req               842 drivers/scsi/qla2xxx/qla_mr.c 	req->dma_fx00 = req->dma;
req               850 drivers/scsi/qla2xxx/qla_mr.c 	    "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
req               851 drivers/scsi/qla2xxx/qla_mr.c 	    req->length_fx00, (u64)req->dma_fx00);
req               863 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = ha->req_q_map[0];
req               867 drivers/scsi/qla2xxx/qla_mr.c 	req->length = ha->req_que_len;
req               868 drivers/scsi/qla2xxx/qla_mr.c 	req->ring = (void __force *)ha->iobase + ha->req_que_off;
req               869 drivers/scsi/qla2xxx/qla_mr.c 	req->dma = bar2_hdl + ha->req_que_off;
req               870 drivers/scsi/qla2xxx/qla_mr.c 	if ((!req->ring) || (req->length == 0)) {
req               879 drivers/scsi/qla2xxx/qla_mr.c 	    req, req->ring, req->length,
req               880 drivers/scsi/qla2xxx/qla_mr.c 	    ha->req_que_off, (u64)req->dma);
req              2068 drivers/scsi/qla2xxx/qla_mr.c 	rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
req              2176 drivers/scsi/qla2xxx/qla_mr.c qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              2191 drivers/scsi/qla2xxx/qla_mr.c qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              2198 drivers/scsi/qla2xxx/qla_mr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              2208 drivers/scsi/qla2xxx/qla_mr.c qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
req              2220 drivers/scsi/qla2xxx/qla_mr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              2293 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req;
req              2305 drivers/scsi/qla2xxx/qla_mr.c 	req = ha->req_q_map[que];
req              2308 drivers/scsi/qla2xxx/qla_mr.c 	if (handle < req->num_outstanding_cmds)
req              2309 drivers/scsi/qla2xxx/qla_mr.c 		sp = req->outstanding_cmds[handle];
req              2323 drivers/scsi/qla2xxx/qla_mr.c 		req->outstanding_cmds[handle] = NULL;
req              2324 drivers/scsi/qla2xxx/qla_mr.c 		qlafx00_tm_iocb_entry(vha, req, pkt, sp,
req              2331 drivers/scsi/qla2xxx/qla_mr.c 		qla2x00_process_completed_request(vha, req, handle);
req              2335 drivers/scsi/qla2xxx/qla_mr.c 	req->outstanding_cmds[handle] = NULL;
req              2641 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req;
req              2662 drivers/scsi/qla2xxx/qla_mr.c 		req = ha->req_q_map[que];
req              2665 drivers/scsi/qla2xxx/qla_mr.c 		if (handle < req->num_outstanding_cmds)
req              2666 drivers/scsi/qla2xxx/qla_mr.c 			sp = req->outstanding_cmds[handle];
req              2677 drivers/scsi/qla2xxx/qla_mr.c 		qla2x00_process_completed_request(vha, req, handle);
req              2696 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = NULL;
req              2699 drivers/scsi/qla2xxx/qla_mr.c 	req = ha->req_q_map[que];
req              2701 drivers/scsi/qla2xxx/qla_mr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
req              2768 drivers/scsi/qla2xxx/qla_mr.c 			qlafx00_abort_iocb_entry(vha, rsp->req,
req              2773 drivers/scsi/qla2xxx/qla_mr.c 			qlafx00_ioctl_iosb_entry(vha, rsp->req,
req              2971 drivers/scsi/qla2xxx/qla_mr.c qlafx00_prep_cont_type1_iocb(struct req_que *req,
req              2977 drivers/scsi/qla2xxx/qla_mr.c 	req->ring_index++;
req              2978 drivers/scsi/qla2xxx/qla_mr.c 	if (req->ring_index == req->length) {
req              2979 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_index = 0;
req              2980 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_ptr = req->ring;
req              2982 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_ptr++;
req              2985 drivers/scsi/qla2xxx/qla_mr.c 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
req              3003 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req;
req              3008 drivers/scsi/qla2xxx/qla_mr.c 	req = vha->req;
req              3046 drivers/scsi/qla2xxx/qla_mr.c 			    qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
req              3082 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = NULL;
req              3093 drivers/scsi/qla2xxx/qla_mr.c 	req = vha->req;
req              3101 drivers/scsi/qla2xxx/qla_mr.c 	handle = qla2xxx_get_next_handle(req);
req              3116 drivers/scsi/qla2xxx/qla_mr.c 	if (req->cnt < (req_cnt + 2)) {
req              3117 drivers/scsi/qla2xxx/qla_mr.c 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
req              3119 drivers/scsi/qla2xxx/qla_mr.c 		if (req->ring_index < cnt)
req              3120 drivers/scsi/qla2xxx/qla_mr.c 			req->cnt = cnt - req->ring_index;
req              3122 drivers/scsi/qla2xxx/qla_mr.c 			req->cnt = req->length -
req              3123 drivers/scsi/qla2xxx/qla_mr.c 				(req->ring_index - cnt);
req              3124 drivers/scsi/qla2xxx/qla_mr.c 		if (req->cnt < (req_cnt + 2))
req              3129 drivers/scsi/qla2xxx/qla_mr.c 	req->current_outstanding_cmd = handle;
req              3130 drivers/scsi/qla2xxx/qla_mr.c 	req->outstanding_cmds[handle] = sp;
req              3133 drivers/scsi/qla2xxx/qla_mr.c 	req->cnt -= req_cnt;
req              3135 drivers/scsi/qla2xxx/qla_mr.c 	cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
req              3139 drivers/scsi/qla2xxx/qla_mr.c 	lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
req              3172 drivers/scsi/qla2xxx/qla_mr.c 	req->ring_index++;
req              3173 drivers/scsi/qla2xxx/qla_mr.c 	if (req->ring_index == req->length) {
req              3174 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_index = 0;
req              3175 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_ptr = req->ring;
req              3177 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_ptr++;
req              3182 drivers/scsi/qla2xxx/qla_mr.c 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req              3202 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = vha->req;
req              3209 drivers/scsi/qla2xxx/qla_mr.c 	tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
req              3229 drivers/scsi/qla2xxx/qla_mr.c 	struct req_que *req = vha->req;
req              3235 drivers/scsi/qla2xxx/qla_mr.c 	abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
req              3237 drivers/scsi/qla2xxx/qla_mr.c 	    cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
req              3239 drivers/scsi/qla2xxx/qla_mr.c 	abt_iocb.req_que_no = cpu_to_le16(req->id);
req              3337 drivers/scsi/qla2xxx/qla_mr.c 						sp->vha->req, &lcont_pkt);
req              3393 drivers/scsi/qla2xxx/qla_mr.c 						sp->vha->req, &lcont_pkt);
req                18 drivers/scsi/qla2xxx/qla_nvme.c 	struct nvme_fc_port_info req;
req                41 drivers/scsi/qla2xxx/qla_nvme.c 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
req                42 drivers/scsi/qla2xxx/qla_nvme.c 	req.port_name = wwn_to_u64(fcport->port_name);
req                43 drivers/scsi/qla2xxx/qla_nvme.c 	req.node_name = wwn_to_u64(fcport->node_name);
req                44 drivers/scsi/qla2xxx/qla_nvme.c 	req.port_role = 0;
req                45 drivers/scsi/qla2xxx/qla_nvme.c 	req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
req                48 drivers/scsi/qla2xxx/qla_nvme.c 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
req                51 drivers/scsi/qla2xxx/qla_nvme.c 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
req                54 drivers/scsi/qla2xxx/qla_nvme.c 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
req                56 drivers/scsi/qla2xxx/qla_nvme.c 	req.port_id = fcport->d_id.b24;
req                60 drivers/scsi/qla2xxx/qla_nvme.c 	    __func__, req.node_name, req.port_name,
req                61 drivers/scsi/qla2xxx/qla_nvme.c 	    req.port_id);
req                63 drivers/scsi/qla2xxx/qla_nvme.c 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
req               363 drivers/scsi/qla2xxx/qla_nvme.c 	struct req_que *req = NULL;
req               373 drivers/scsi/qla2xxx/qla_nvme.c 	req = qpair->req;
req               379 drivers/scsi/qla2xxx/qla_nvme.c 	handle = qla2xxx_get_next_handle(req);
req               385 drivers/scsi/qla2xxx/qla_nvme.c 	if (req->cnt < (req_cnt + 2)) {
req               386 drivers/scsi/qla2xxx/qla_nvme.c 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
req               387 drivers/scsi/qla2xxx/qla_nvme.c 		    RD_REG_DWORD_RELAXED(req->req_q_out);
req               389 drivers/scsi/qla2xxx/qla_nvme.c 		if (req->ring_index < cnt)
req               390 drivers/scsi/qla2xxx/qla_nvme.c 			req->cnt = cnt - req->ring_index;
req               392 drivers/scsi/qla2xxx/qla_nvme.c 			req->cnt = req->length - (req->ring_index - cnt);
req               394 drivers/scsi/qla2xxx/qla_nvme.c 		if (req->cnt < (req_cnt + 2)){
req               410 drivers/scsi/qla2xxx/qla_nvme.c 	req->current_outstanding_cmd = handle;
req               411 drivers/scsi/qla2xxx/qla_nvme.c 	req->outstanding_cmds[handle] = sp;
req               413 drivers/scsi/qla2xxx/qla_nvme.c 	req->cnt -= req_cnt;
req               415 drivers/scsi/qla2xxx/qla_nvme.c 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
req               416 drivers/scsi/qla2xxx/qla_nvme.c 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
req               484 drivers/scsi/qla2xxx/qla_nvme.c 			req->ring_index++;
req               485 drivers/scsi/qla2xxx/qla_nvme.c 			if (req->ring_index == req->length) {
req               486 drivers/scsi/qla2xxx/qla_nvme.c 				req->ring_index = 0;
req               487 drivers/scsi/qla2xxx/qla_nvme.c 				req->ring_ptr = req->ring;
req               489 drivers/scsi/qla2xxx/qla_nvme.c 				req->ring_ptr++;
req               491 drivers/scsi/qla2xxx/qla_nvme.c 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
req               508 drivers/scsi/qla2xxx/qla_nvme.c 	req->ring_index++;
req               509 drivers/scsi/qla2xxx/qla_nvme.c 	if (req->ring_index == req->length) {
req               510 drivers/scsi/qla2xxx/qla_nvme.c 		req->ring_index = 0;
req               511 drivers/scsi/qla2xxx/qla_nvme.c 		req->ring_ptr = req->ring;
req               513 drivers/scsi/qla2xxx/qla_nvme.c 		req->ring_ptr++;
req               517 drivers/scsi/qla2xxx/qla_nvme.c 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
req              1781 drivers/scsi/qla2xxx/qla_nx.c 	struct req_que *req = ha->req_q_map[0];
req              1788 drivers/scsi/qla2xxx/qla_nx.c 	icb->request_q_length = cpu_to_le16(req->length);
req              1790 drivers/scsi/qla2xxx/qla_nx.c 	put_unaligned_le64(req->dma, &icb->request_q_address);
req              2804 drivers/scsi/qla2xxx/qla_nx.c 	struct req_que *req = ha->req_q_map[0];
req              2808 drivers/scsi/qla2xxx/qla_nx.c 	req->ring_index++;
req              2809 drivers/scsi/qla2xxx/qla_nx.c 	if (req->ring_index == req->length) {
req              2810 drivers/scsi/qla2xxx/qla_nx.c 		req->ring_index = 0;
req              2811 drivers/scsi/qla2xxx/qla_nx.c 		req->ring_ptr = req->ring;
req              2813 drivers/scsi/qla2xxx/qla_nx.c 		req->ring_ptr++;
req              2817 drivers/scsi/qla2xxx/qla_nx.c 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
req              3055 drivers/scsi/qla2xxx/qla_nx.c 	struct req_que *req = ha->req_q_map[0];
req              3060 drivers/scsi/qla2xxx/qla_nx.c 		ha->isp_ops->get_flash_version(vha, req->ring);
req              3680 drivers/scsi/qla2xxx/qla_nx.c 		struct req_que *req;
req              3684 drivers/scsi/qla2xxx/qla_nx.c 			req = ha->req_q_map[que];
req              3685 drivers/scsi/qla2xxx/qla_nx.c 			if (!req)
req              3687 drivers/scsi/qla2xxx/qla_nx.c 			for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
req              3688 drivers/scsi/qla2xxx/qla_nx.c 				sp = req->outstanding_cmds[cnt];
req              1658 drivers/scsi/qla2xxx/qla_nx2.c 		ha->isp_ops->get_flash_version(vha, vha->req->ring);
req               364 drivers/scsi/qla2xxx/qla_os.c static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
req               370 drivers/scsi/qla2xxx/qla_os.c 	rsp->req = req;
req               372 drivers/scsi/qla2xxx/qla_os.c 	ha->base_qpair->req = req;
req               389 drivers/scsi/qla2xxx/qla_os.c static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
req               417 drivers/scsi/qla2xxx/qla_os.c 	qla_init_base_qpair(vha, req, rsp);
req               434 drivers/scsi/qla2xxx/qla_os.c 	ha->req_q_map[0] = req;
req               452 drivers/scsi/qla2xxx/qla_os.c static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
req               455 drivers/scsi/qla2xxx/qla_os.c 		if (req && req->ring_fx00)
req               457 drivers/scsi/qla2xxx/qla_os.c 			    (req->length_fx00 + 1) * sizeof(request_t),
req               458 drivers/scsi/qla2xxx/qla_os.c 			    req->ring_fx00, req->dma_fx00);
req               459 drivers/scsi/qla2xxx/qla_os.c 	} else if (req && req->ring)
req               461 drivers/scsi/qla2xxx/qla_os.c 		(req->length + 1) * sizeof(request_t),
req               462 drivers/scsi/qla2xxx/qla_os.c 		req->ring, req->dma);
req               464 drivers/scsi/qla2xxx/qla_os.c 	if (req)
req               465 drivers/scsi/qla2xxx/qla_os.c 		kfree(req->outstanding_cmds);
req               467 drivers/scsi/qla2xxx/qla_os.c 	kfree(req);
req               487 drivers/scsi/qla2xxx/qla_os.c 	struct req_que *req;
req               506 drivers/scsi/qla2xxx/qla_os.c 		req = ha->req_q_map[cnt];
req               511 drivers/scsi/qla2xxx/qla_os.c 		qla2x00_free_req_que(ha, req);
req              1326 drivers/scsi/qla2xxx/qla_os.c 	struct req_que *req;
req              1333 drivers/scsi/qla2xxx/qla_os.c 	req = vha->req;
req              1335 drivers/scsi/qla2xxx/qla_os.c 		cnt < req->num_outstanding_cmds; cnt++) {
req              1336 drivers/scsi/qla2xxx/qla_os.c 		sp = req->outstanding_cmds[cnt];
req              1757 drivers/scsi/qla2xxx/qla_os.c 	struct req_que *req;
req              1764 drivers/scsi/qla2xxx/qla_os.c 	req = qp->req;
req              1765 drivers/scsi/qla2xxx/qla_os.c 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
req              1766 drivers/scsi/qla2xxx/qla_os.c 		sp = req->outstanding_cmds[cnt];
req              1789 drivers/scsi/qla2xxx/qla_os.c 			req->outstanding_cmds[cnt] = NULL;
req              1833 drivers/scsi/qla2xxx/qla_os.c 	struct req_que *req = vha->req;
req              1838 drivers/scsi/qla2xxx/qla_os.c 	scsi_change_queue_depth(sdev, req->max_q_depth);
req              2763 drivers/scsi/qla2xxx/qla_os.c 	struct req_que *req = NULL;
req              3075 drivers/scsi/qla2xxx/qla_os.c 	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
req              3083 drivers/scsi/qla2xxx/qla_os.c 	req->max_q_depth = MAX_Q_DEPTH;
req              3085 drivers/scsi/qla2xxx/qla_os.c 		req->max_q_depth = ql2xmaxqdepth;
req              3098 drivers/scsi/qla2xxx/qla_os.c 	base_vha->req = req;
req              3155 drivers/scsi/qla2xxx/qla_os.c 	ret = qla2x00_alloc_queues(ha, req, rsp);
req              3187 drivers/scsi/qla2xxx/qla_os.c 	rsp->req = req;
req              3188 drivers/scsi/qla2xxx/qla_os.c 	req->rsp = rsp;
req              3192 drivers/scsi/qla2xxx/qla_os.c 		ha->req_q_map[0] = req;
req              3198 drivers/scsi/qla2xxx/qla_os.c 	req->req_q_in = &ha->iobase->isp24.req_q_in;
req              3199 drivers/scsi/qla2xxx/qla_os.c 	req->req_q_out = &ha->iobase->isp24.req_q_out;
req              3204 drivers/scsi/qla2xxx/qla_os.c 		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req              3205 drivers/scsi/qla2xxx/qla_os.c 		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
req              3211 drivers/scsi/qla2xxx/qla_os.c 		req->req_q_in = &ha->iobase->ispfx00.req_q_in;
req              3212 drivers/scsi/qla2xxx/qla_os.c 		req->req_q_out = &ha->iobase->ispfx00.req_q_out;
req              3218 drivers/scsi/qla2xxx/qla_os.c 		req->req_q_out = &ha->iobase->isp82.req_q_out[0];
req              3225 drivers/scsi/qla2xxx/qla_os.c 	    ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
req              3229 drivers/scsi/qla2xxx/qla_os.c 	    req->req_q_in, req->req_q_out,
req              3233 drivers/scsi/qla2xxx/qla_os.c 	    ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
req              3236 drivers/scsi/qla2xxx/qla_os.c 	    req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
req              3273 drivers/scsi/qla2xxx/qla_os.c 		host->can_queue = req->num_outstanding_cmds - 10;
req              3277 drivers/scsi/qla2xxx/qla_os.c 	    host->can_queue, base_vha->req,
req              3476 drivers/scsi/qla2xxx/qla_os.c 	req = NULL;
req              3481 drivers/scsi/qla2xxx/qla_os.c 	qla2x00_free_req_que(ha, req);
req              3971 drivers/scsi/qla2xxx/qla_os.c 	struct req_que **req, struct rsp_que **rsp)
req              4150 drivers/scsi/qla2xxx/qla_os.c 	*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
req              4151 drivers/scsi/qla2xxx/qla_os.c 	if (!*req) {
req              4156 drivers/scsi/qla2xxx/qla_os.c 	(*req)->length = req_len;
req              4157 drivers/scsi/qla2xxx/qla_os.c 	(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
req              4158 drivers/scsi/qla2xxx/qla_os.c 		((*req)->length + 1) * sizeof(request_t),
req              4159 drivers/scsi/qla2xxx/qla_os.c 		&(*req)->dma, GFP_KERNEL);
req              4160 drivers/scsi/qla2xxx/qla_os.c 	if (!(*req)->ring) {
req              4182 drivers/scsi/qla2xxx/qla_os.c 	(*req)->rsp = *rsp;
req              4183 drivers/scsi/qla2xxx/qla_os.c 	(*rsp)->req = *req;
req              4187 drivers/scsi/qla2xxx/qla_os.c 	    *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
req              4278 drivers/scsi/qla2xxx/qla_os.c 	dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
req              4279 drivers/scsi/qla2xxx/qla_os.c 		sizeof(request_t), (*req)->ring, (*req)->dma);
req              4280 drivers/scsi/qla2xxx/qla_os.c 	(*req)->ring = NULL;
req              4281 drivers/scsi/qla2xxx/qla_os.c 	(*req)->dma = 0;
req              4283 drivers/scsi/qla2xxx/qla_os.c 	kfree(*req);
req              4284 drivers/scsi/qla2xxx/qla_os.c 	*req = NULL;
req              6539 drivers/scsi/qla2xxx/qla_os.c 	struct req_que *req;
req              6594 drivers/scsi/qla2xxx/qla_os.c 				req = ha->req_q_map[0];
req              6596 drivers/scsi/qla2xxx/qla_os.c 				    index < req->num_outstanding_cmds;
req              6600 drivers/scsi/qla2xxx/qla_os.c 					sp = req->outstanding_cmds[index];
req               554 drivers/scsi/qla2xxx/qla_sup.c 	struct req_que *req = ha->req_q_map[0];
req               555 drivers/scsi/qla2xxx/qla_sup.c 	struct qla_flt_location *fltl = (void *)req->ring;
req               556 drivers/scsi/qla2xxx/qla_sup.c 	uint32_t *dcode = (void *)req->ring;
req               557 drivers/scsi/qla2xxx/qla_sup.c 	uint8_t *buf = (void *)req->ring, *bcode,  last_image;
req               613 drivers/scsi/qla2xxx/qla_sup.c 	wptr = (void *)req->ring;
req               950 drivers/scsi/qla2xxx/qla_sup.c 	struct req_que *req = ha->req_q_map[0];
req               952 drivers/scsi/qla2xxx/qla_sup.c 	uint16_t *wptr = (void *)req->ring;
req               953 drivers/scsi/qla2xxx/qla_sup.c 	struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
req              1047 drivers/scsi/qla2xxx/qla_sup.c 	struct req_que *req = ha->req_q_map[0];
req              1052 drivers/scsi/qla2xxx/qla_sup.c 	wptr = (uint32_t *)req->ring;
req              1053 drivers/scsi/qla2xxx/qla_sup.c 	ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
req              1712 drivers/scsi/qla2xxx/qla_target.c 	qla2x00_start_iocbs(vha, qpair->req);
req              1738 drivers/scsi/qla2xxx/qla_target.c 	resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
req              1750 drivers/scsi/qla2xxx/qla_target.c 		qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
req              1753 drivers/scsi/qla2xxx/qla_target.c 	resp->handle = MAKE_HANDLE(qpair->req->id, h);
req              1794 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              1869 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              1938 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              2238 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(ha, qpair->req);
req              2309 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              2475 drivers/scsi/qla2xxx/qla_target.c 	struct req_que *req = qpair->req;
req              2477 drivers/scsi/qla2xxx/qla_target.c 	if (req->cnt < (req_cnt + 2)) {
req              2478 drivers/scsi/qla2xxx/qla_target.c 		cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
req              2479 drivers/scsi/qla2xxx/qla_target.c 		    RD_REG_DWORD_RELAXED(req->req_q_out));
req              2481 drivers/scsi/qla2xxx/qla_target.c 		if  (req->ring_index < cnt)
req              2482 drivers/scsi/qla2xxx/qla_target.c 			req->cnt = cnt - req->ring_index;
req              2484 drivers/scsi/qla2xxx/qla_target.c 			req->cnt = req->length - (req->ring_index - cnt);
req              2486 drivers/scsi/qla2xxx/qla_target.c 		if (unlikely(req->cnt < (req_cnt + 2)))
req              2490 drivers/scsi/qla2xxx/qla_target.c 	req->cnt -= req_cnt;
req              2498 drivers/scsi/qla2xxx/qla_target.c static inline void *qlt_get_req_pkt(struct req_que *req)
req              2501 drivers/scsi/qla2xxx/qla_target.c 	req->ring_index++;
req              2502 drivers/scsi/qla2xxx/qla_target.c 	if (req->ring_index == req->length) {
req              2503 drivers/scsi/qla2xxx/qla_target.c 		req->ring_index = 0;
req              2504 drivers/scsi/qla2xxx/qla_target.c 		req->ring_ptr = req->ring;
req              2506 drivers/scsi/qla2xxx/qla_target.c 		req->ring_ptr++;
req              2508 drivers/scsi/qla2xxx/qla_target.c 	return (cont_entry_t *)req->ring_ptr;
req              2517 drivers/scsi/qla2xxx/qla_target.c 	struct req_que *req = qpair->req;
req              2519 drivers/scsi/qla2xxx/qla_target.c 	h = req->current_outstanding_cmd;
req              2521 drivers/scsi/qla2xxx/qla_target.c 	for (index = 1; index < req->num_outstanding_cmds; index++) {
req              2523 drivers/scsi/qla2xxx/qla_target.c 		if (h == req->num_outstanding_cmds)
req              2529 drivers/scsi/qla2xxx/qla_target.c 		if (!req->outstanding_cmds[h]) {
req              2536 drivers/scsi/qla2xxx/qla_target.c 		req->current_outstanding_cmd = h;
req              2556 drivers/scsi/qla2xxx/qla_target.c 	pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
req              2573 drivers/scsi/qla2xxx/qla_target.c 		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
req              2575 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle = MAKE_HANDLE(qpair->req->id, h);
req              2603 drivers/scsi/qla2xxx/qla_target.c 			   prm->cmd->qpair->req);
req              3006 drivers/scsi/qla2xxx/qla_target.c 	pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
req              3086 drivers/scsi/qla2xxx/qla_target.c 		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
req              3088 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
req              3186 drivers/scsi/qla2xxx/qla_target.c 	qpair->req->outstanding_cmds[h] = NULL;
req              3256 drivers/scsi/qla2xxx/qla_target.c 		qpair->req->cnt += full_req_cnt;
req              3294 drivers/scsi/qla2xxx/qla_target.c 				    qpair->req);
req              3337 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              3400 drivers/scsi/qla2xxx/qla_target.c 		qpair->req->cnt += prm.req_cnt;
req              3420 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              3584 drivers/scsi/qla2xxx/qla_target.c 	qla2x00_start_iocbs(vha, vha->req);
req              3658 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              3852 drivers/scsi/qla2xxx/qla_target.c 	struct req_que *req;
req              3859 drivers/scsi/qla2xxx/qla_target.c 	if (qid == rsp->req->id) {
req              3860 drivers/scsi/qla2xxx/qla_target.c 		req = rsp->req;
req              3865 drivers/scsi/qla2xxx/qla_target.c 		req = vha->hw->req_q_map[qid];
req              3873 drivers/scsi/qla2xxx/qla_target.c 		if (unlikely(h >= req->num_outstanding_cmds)) {
req              3880 drivers/scsi/qla2xxx/qla_target.c 		cmd = (void *) req->outstanding_cmds[h];
req              3884 drivers/scsi/qla2xxx/qla_target.c 				vha->vp_idx, handle, req->id, rsp->id);
req              3887 drivers/scsi/qla2xxx/qla_target.c 		req->outstanding_cmds[h] = NULL;
req              5329 drivers/scsi/qla2xxx/qla_target.c 		qla2x00_start_iocbs(vha, qpair->req);
req              6411 drivers/scsi/qla2xxx/qla_target.c 	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
req               331 drivers/scsi/qla2xxx/qla_tmpl.c 			struct req_que *req = vha->hw->req_q_map[i];
req               333 drivers/scsi/qla2xxx/qla_tmpl.c 			if (req || !buf) {
req               334 drivers/scsi/qla2xxx/qla_tmpl.c 				length = req ?
req               335 drivers/scsi/qla2xxx/qla_tmpl.c 				    req->length : REQUEST_ENTRY_CNT_24XX;
req               338 drivers/scsi/qla2xxx/qla_tmpl.c 				qla27xx_insertbuf(req ? req->ring : NULL,
req               339 drivers/scsi/qla2xxx/qla_tmpl.c 				    length * sizeof(*req->ring), buf, len);
req               629 drivers/scsi/qla2xxx/qla_tmpl.c 			struct req_que *req = vha->hw->req_q_map[i];
req               631 drivers/scsi/qla2xxx/qla_tmpl.c 			if (req || !buf) {
req               634 drivers/scsi/qla2xxx/qla_tmpl.c 				qla27xx_insert32(req && req->out_ptr ?
req               635 drivers/scsi/qla2xxx/qla_tmpl.c 				    *req->out_ptr : 0, buf, len);
req                36 drivers/scsi/scsi_debugfs.c 	struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
req               284 drivers/scsi/scsi_error.c enum blk_eh_timer_return scsi_times_out(struct request *req)
req               286 drivers/scsi/scsi_error.c 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
req               970 drivers/scsi/scsi_error.c 	ses->resid_len = scmd->req.resid_len;
req               981 drivers/scsi/scsi_error.c 	scmd->req.resid_len = 0;
req              1034 drivers/scsi/scsi_error.c 	scmd->req.resid_len = ses->resid_len;
req              1956 drivers/scsi/scsi_error.c static void eh_lock_door_done(struct request *req, blk_status_t status)
req              1958 drivers/scsi/scsi_error.c 	blk_put_request(req);
req              1974 drivers/scsi/scsi_error.c 	struct request *req;
req              1977 drivers/scsi/scsi_error.c 	req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
req              1978 drivers/scsi/scsi_error.c 	if (IS_ERR(req))
req              1980 drivers/scsi/scsi_error.c 	rq = scsi_req(req);
req              1990 drivers/scsi/scsi_error.c 	req->rq_flags |= RQF_QUIET;
req              1991 drivers/scsi/scsi_error.c 	req->timeout = 10 * HZ;
req              1994 drivers/scsi/scsi_error.c 	blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
req               254 drivers/scsi/scsi_lib.c 	struct request *req;
req               258 drivers/scsi/scsi_lib.c 	req = blk_get_request(sdev->request_queue,
req               261 drivers/scsi/scsi_lib.c 	if (IS_ERR(req))
req               263 drivers/scsi/scsi_lib.c 	rq = scsi_req(req);
req               265 drivers/scsi/scsi_lib.c 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
req               272 drivers/scsi/scsi_lib.c 	req->timeout = timeout;
req               273 drivers/scsi/scsi_lib.c 	req->cmd_flags |= flags;
req               274 drivers/scsi/scsi_lib.c 	req->rq_flags |= rq_flags | RQF_QUIET;
req               279 drivers/scsi/scsi_lib.c 	blk_execute_rq(req->q, NULL, req, 1);
req               298 drivers/scsi/scsi_lib.c 	blk_put_request(req);
req               572 drivers/scsi/scsi_lib.c static bool scsi_end_request(struct request *req, blk_status_t error,
req               575 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
req               579 drivers/scsi/scsi_lib.c 	if (blk_update_request(req, error, bytes))
req               583 drivers/scsi/scsi_lib.c 		add_disk_randomness(req->rq_disk);
req               585 drivers/scsi/scsi_lib.c 	if (!blk_rq_is_scsi(req)) {
req               613 drivers/scsi/scsi_lib.c 	__blk_mq_end_request(req, error);
req               676 drivers/scsi/scsi_lib.c 	struct request *req = cmd->request;
req               680 drivers/scsi/scsi_lib.c 	unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
req               792 drivers/scsi/scsi_lib.c 		if (!(req->rq_flags & RQF_QUIET)) {
req               813 drivers/scsi/scsi_lib.c 		if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
req               840 drivers/scsi/scsi_lib.c 	struct request *req = cmd->request;
req               847 drivers/scsi/scsi_lib.c 	if (blk_rq_is_passthrough(req)) {
req               852 drivers/scsi/scsi_lib.c 			scsi_req(req)->sense_len =
req               858 drivers/scsi/scsi_lib.c 	} else if (blk_rq_bytes(req) == 0 && sense_current) {
req               881 drivers/scsi/scsi_lib.c 		else if (req->rq_flags & RQF_QUIET)
req               936 drivers/scsi/scsi_lib.c 	struct request *req = cmd->request;
req               942 drivers/scsi/scsi_lib.c 	if (unlikely(blk_rq_is_passthrough(req))) {
req               946 drivers/scsi/scsi_lib.c 		scsi_req(req)->result = cmd->result;
req               955 drivers/scsi/scsi_lib.c 		blk_rq_sectors(req), good_bytes));
req               962 drivers/scsi/scsi_lib.c 	if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
req               963 drivers/scsi/scsi_lib.c 		if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
req               969 drivers/scsi/scsi_lib.c 		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
req               985 drivers/scsi/scsi_lib.c static blk_status_t scsi_init_sgtable(struct request *req,
req               994 drivers/scsi/scsi_lib.c 			blk_rq_nr_phys_segments(req), sdb->table.sgl,
req              1002 drivers/scsi/scsi_lib.c 	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
req              1005 drivers/scsi/scsi_lib.c 	sdb->length = blk_rq_payload_bytes(req);
req              1086 drivers/scsi/scsi_lib.c 	scsi_req_init(&cmd->req);
req              1151 drivers/scsi/scsi_lib.c 	memset((char *)cmd + sizeof(cmd->req), 0,
req              1152 drivers/scsi/scsi_lib.c 		sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
req              1166 drivers/scsi/scsi_lib.c 		struct request *req)
req              1168 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
req              1176 drivers/scsi/scsi_lib.c 	if (req->bio) {
req              1181 drivers/scsi/scsi_lib.c 		BUG_ON(blk_rq_bytes(req));
req              1186 drivers/scsi/scsi_lib.c 	cmd->cmd_len = scsi_req(req)->cmd_len;
req              1187 drivers/scsi/scsi_lib.c 	cmd->cmnd = scsi_req(req)->cmd;
req              1188 drivers/scsi/scsi_lib.c 	cmd->transfersize = blk_rq_bytes(req);
req              1189 drivers/scsi/scsi_lib.c 	cmd->allowed = scsi_req(req)->retries;
req              1198 drivers/scsi/scsi_lib.c 		struct request *req)
req              1200 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
req              1203 drivers/scsi/scsi_lib.c 		blk_status_t ret = sdev->handler->prep_fn(sdev, req);
req              1208 drivers/scsi/scsi_lib.c 	cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
req              1214 drivers/scsi/scsi_lib.c 		struct request *req)
req              1216 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
req              1218 drivers/scsi/scsi_lib.c 	if (!blk_rq_bytes(req))
req              1220 drivers/scsi/scsi_lib.c 	else if (rq_data_dir(req) == WRITE)
req              1225 drivers/scsi/scsi_lib.c 	if (blk_rq_is_scsi(req))
req              1226 drivers/scsi/scsi_lib.c 		return scsi_setup_scsi_cmnd(sdev, req);
req              1228 drivers/scsi/scsi_lib.c 		return scsi_setup_fs_cmnd(sdev, req);
req              1232 drivers/scsi/scsi_lib.c scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
req              1260 drivers/scsi/scsi_lib.c 		if (req && !(req->rq_flags & RQF_PREEMPT))
req              1269 drivers/scsi/scsi_lib.c 		if (req && !(req->rq_flags & RQF_PREEMPT))
req              1579 drivers/scsi/scsi_lib.c static blk_status_t scsi_mq_prep_fn(struct request *req)
req              1581 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
req              1582 drivers/scsi/scsi_lib.c 	struct scsi_device *sdev = req->q->queuedata;
req              1588 drivers/scsi/scsi_lib.c 	cmd->request = req;
req              1589 drivers/scsi/scsi_lib.c 	cmd->tag = req->tag;
req              1602 drivers/scsi/scsi_lib.c 	blk_mq_start_request(req);
req              1604 drivers/scsi/scsi_lib.c 	return scsi_setup_cmnd(sdev, req);
req              1647 drivers/scsi/scsi_lib.c 	struct request *req = bd->rq;
req              1648 drivers/scsi/scsi_lib.c 	struct request_queue *q = req->q;
req              1651 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
req              1660 drivers/scsi/scsi_lib.c 		ret = scsi_prep_state_check(sdev, req);
req              1671 drivers/scsi/scsi_lib.c 	if (!(req->rq_flags & RQF_DONTPREP)) {
req              1672 drivers/scsi/scsi_lib.c 		ret = scsi_mq_prep_fn(req);
req              1675 drivers/scsi/scsi_lib.c 		req->rq_flags |= RQF_DONTPREP;
req              1678 drivers/scsi/scsi_lib.c 		blk_mq_start_request(req);
req              1716 drivers/scsi/scsi_lib.c 			scsi_req(req)->result = DID_NO_CONNECT << 16;
req              1718 drivers/scsi/scsi_lib.c 			scsi_req(req)->result = DID_ERROR << 16;
req              1724 drivers/scsi/scsi_lib.c 		if (req->rq_flags & RQF_DONTPREP)
req              1731 drivers/scsi/scsi_lib.c static enum blk_eh_timer_return scsi_timeout(struct request *req,
req              1736 drivers/scsi/scsi_lib.c 	return scsi_times_out(req);
req              1753 drivers/scsi/scsi_lib.c 	cmd->req.sense = cmd->sense_buffer;
req                74 drivers/scsi/scsi_priv.h extern enum blk_eh_timer_return scsi_times_out(struct request *req);
req              3555 drivers/scsi/scsi_transport_fc.c fc_bsg_job_timeout(struct request *req)
req              3557 drivers/scsi/scsi_transport_fc.c 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
req              3581 drivers/scsi/scsi_transport_fc.c 		blk_mq_end_request(req, BLK_STS_IOERR);
req              1487 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_bsg_request *req = job->request;
req              1500 drivers/scsi/scsi_transport_iscsi.c 	switch (req->msgcode) {
req              1504 drivers/scsi/scsi_transport_iscsi.c 		    (req->rqst_data.h_vendor.vendor_id !=
req              1908 drivers/scsi/sd.c 	struct request *req = scmd->request;
req              1930 drivers/scsi/sd.c 	start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
req              1964 drivers/scsi/sd.c 	struct request *req = SCpnt->request;
req              1968 drivers/scsi/sd.c 	switch (req_op(req)) {
req              1975 drivers/scsi/sd.c 			good_bytes = blk_rq_bytes(req);
req              1979 drivers/scsi/sd.c 			scsi_set_resid(SCpnt, blk_rq_bytes(req));
req              2049 drivers/scsi/sd.c 					req->rq_flags |= RQF_QUIET;
req              1323 drivers/scsi/sg.c 	struct scsi_request *req = scsi_req(rq);
req              1342 drivers/scsi/sg.c 	sense = req->sense;
req              1343 drivers/scsi/sg.c 	result = req->result;
req              1344 drivers/scsi/sg.c 	resid = req->resid_len;
req              1379 drivers/scsi/sg.c 	if (req->sense_len)
req              1380 drivers/scsi/sg.c 		memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
req              1706 drivers/scsi/sg.c 	struct scsi_request *req;
req              1746 drivers/scsi/sg.c 	req = scsi_req(rq);
req              1749 drivers/scsi/sg.c 		req->cmd = long_cmdp;
req              1750 drivers/scsi/sg.c 	memcpy(req->cmd, cmd, hp->cmd_len);
req              1751 drivers/scsi/sg.c 	req->cmd_len = hp->cmd_len;
req              1755 drivers/scsi/sg.c 	req->retries = SG_DEFAULT_RETRIES;
req               109 drivers/scsi/snic/snic_ctl.c 	struct snic_host_req *req = NULL;
req               125 drivers/scsi/snic/snic_ctl.c 	req = rqi_to_req(rqi);
req               128 drivers/scsi/snic/snic_ctl.c 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
req               131 drivers/scsi/snic/snic_ctl.c 	req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
req               132 drivers/scsi/snic/snic_ctl.c 	req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
req               136 drivers/scsi/snic/snic_ctl.c 	ret = snic_queue_wq_desc(snic, req, sizeof(*req));
req                59 drivers/scsi/snic/snic_disc.c snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
req                65 drivers/scsi/snic/snic_disc.c 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
req                68 drivers/scsi/snic/snic_disc.c 	req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
req                69 drivers/scsi/snic/snic_disc.c 	sgd = req_to_sgl(req);
req                73 drivers/scsi/snic/snic_disc.c 	req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
req               130 drivers/scsi/snic/snic_disc.c 	snic_report_tgt_init(rqi->req,
req               139 drivers/scsi/snic/snic_disc.c 	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
req               502 drivers/scsi/snic/snic_fwint.h snic_color_enc(struct snic_fw_req *req, u8 color)
req               504 drivers/scsi/snic/snic_fwint.h 	u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
req               513 drivers/scsi/snic/snic_fwint.h snic_color_dec(struct snic_fw_req *req, u8 *color)
req               515 drivers/scsi/snic/snic_fwint.h 	u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
req               100 drivers/scsi/snic/snic_io.c 	struct snic_host_req *req = buf->os_buf;
req               108 drivers/scsi/snic/snic_io.c 	rqi = req_to_rqi(req);
req               168 drivers/scsi/snic/snic_io.c 	struct snic_host_req *req = (struct snic_host_req *) os_buf;
req               183 drivers/scsi/snic/snic_io.c 	req->req_pa = (ulong)pa;
req               188 drivers/scsi/snic/snic_io.c 	desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
req               191 drivers/scsi/snic/snic_io.c 		req->req_pa = 0;
req               254 drivers/scsi/snic/snic_io.c 	rqi->req = (struct snic_host_req *) (rqi + 1);
req               258 drivers/scsi/snic/snic_io.c 	rqi->req = (struct snic_host_req *)(rqi + 1);
req               272 drivers/scsi/snic/snic_io.c 	memset(rqi->req, 0, rqi->req_len);
req               275 drivers/scsi/snic/snic_io.c 	rqi->req->hdr.init_ctx = (ulong) rqi;
req               288 drivers/scsi/snic/snic_io.c 	struct snic_host_req *req = NULL;
req               297 drivers/scsi/snic/snic_io.c 	req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
req               298 drivers/scsi/snic/snic_io.c 	if (!req) {
req               305 drivers/scsi/snic/snic_io.c 	rqi->abort_req = req;
req               306 drivers/scsi/snic/snic_io.c 	memset(req, 0, sizeof(struct snic_host_req));
req               308 drivers/scsi/snic/snic_io.c 	req->hdr.init_ctx = (ulong) rqi;
req               310 drivers/scsi/snic/snic_io.c 	return req;
req               319 drivers/scsi/snic/snic_io.c 	struct snic_host_req *req = NULL;
req               323 drivers/scsi/snic/snic_io.c 	req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
req               324 drivers/scsi/snic/snic_io.c 	if (!req) {
req               332 drivers/scsi/snic/snic_io.c 	rqi->dr_req = req;
req               333 drivers/scsi/snic/snic_io.c 	memset(req, 0, sizeof(struct snic_host_req));
req               335 drivers/scsi/snic/snic_io.c 	req->hdr.init_ctx = (ulong) rqi;
req               337 drivers/scsi/snic/snic_io.c 	return req;
req               344 drivers/scsi/snic/snic_io.c 	SNIC_BUG_ON(rqi->req == rqi->abort_req);
req               345 drivers/scsi/snic/snic_io.c 	SNIC_BUG_ON(rqi->req == rqi->dr_req);
req               350 drivers/scsi/snic/snic_io.c 		      rqi, rqi->req, rqi->abort_req, rqi->dr_req);
req               372 drivers/scsi/snic/snic_io.c 	if (rqi->req->req_pa)
req               374 drivers/scsi/snic/snic_io.c 				 rqi->req->req_pa,
req               459 drivers/scsi/snic/snic_io.c 	struct snic_host_req *req = (struct snic_host_req *) os_buf;
req               465 drivers/scsi/snic/snic_io.c 	if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
req               468 drivers/scsi/snic/snic_io.c 		rqi = (struct snic_req_info *) req->hdr.init_ctx;
req               470 drivers/scsi/snic/snic_io.c 	SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
req               471 drivers/scsi/snic/snic_io.c 	switch (req->hdr.type) {
req               480 drivers/scsi/snic/snic_io.c 			 req->u.icmnd.cdb[0]);
req               510 drivers/scsi/snic/snic_io.c 			 rqi->req->u.icmnd.cdb[0]);
req               545 drivers/scsi/snic/snic_io.c 		  fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
req               546 drivers/scsi/snic/snic_io.c 		  req->hdr.init_ctx);
req                78 drivers/scsi/snic/snic_io.h 	struct snic_host_req *req;
req               101 drivers/scsi/snic/snic_io.h 	((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
req               103 drivers/scsi/snic/snic_io.h #define req_to_rqi(req)	\
req               104 drivers/scsi/snic/snic_io.h 	((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
req               106 drivers/scsi/snic/snic_io.h #define req_to_sgl(req)	\
req               107 drivers/scsi/snic/snic_io.h 	((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
req                28 drivers/scsi/snic/snic_res.h snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
req                33 drivers/scsi/snic/snic_res.h 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
req                36 drivers/scsi/snic/snic_res.h 	req->u.icmnd.flags = cpu_to_le16(flags);
req                37 drivers/scsi/snic/snic_res.h 	req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
req                38 drivers/scsi/snic/snic_res.h 	memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
req                39 drivers/scsi/snic/snic_res.h 	req->u.icmnd.cdb_len = cdb_len;
req                40 drivers/scsi/snic/snic_res.h 	memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
req                41 drivers/scsi/snic/snic_res.h 	memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
req                42 drivers/scsi/snic/snic_res.h 	req->u.icmnd.data_len = cpu_to_le32(data_len);
req                43 drivers/scsi/snic/snic_res.h 	req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
req                44 drivers/scsi/snic/snic_res.h 	req->u.icmnd.sense_len = cpu_to_le32(sense_len);
req                45 drivers/scsi/snic/snic_res.h 	req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
req                49 drivers/scsi/snic/snic_res.h snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
req                52 drivers/scsi/snic/snic_res.h 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
req                54 drivers/scsi/snic/snic_res.h 	req->u.itmf.tm_type = tm_type;
req                55 drivers/scsi/snic/snic_res.h 	req->u.itmf.flags = cpu_to_le16(flags);
req                57 drivers/scsi/snic/snic_res.h 	req->u.itmf.req_id = cpu_to_le32(req_id);
req                58 drivers/scsi/snic/snic_res.h 	req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
req                59 drivers/scsi/snic/snic_res.h 	memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
req               131 drivers/scsi/snic/snic_scsi.c 	struct snic_host_req *req = rqi_to_req(rqi);
req               144 drivers/scsi/snic/snic_scsi.c 		      sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
req               148 drivers/scsi/snic/snic_scsi.c 	if (req->u.icmnd.sense_addr)
req               150 drivers/scsi/snic/snic_scsi.c 				 le64_to_cpu(req->u.icmnd.sense_addr),
req               178 drivers/scsi/snic/snic_scsi.c 		sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
req               208 drivers/scsi/snic/snic_scsi.c 	snic_icmnd_init(rqi->req,
req               219 drivers/scsi/snic/snic_scsi.c 			(ulong) req_to_sgl(rqi->req),
req               224 drivers/scsi/snic/snic_scsi.c 	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
req               552 drivers/scsi/snic/snic_scsi.c 	struct snic_host_req *req = NULL;
req               608 drivers/scsi/snic/snic_scsi.c 	WARN_ON_ONCE(req);
req              2235 drivers/scsi/snic/snic_scsi.c 	struct snic_host_req *req = NULL;
req              2255 drivers/scsi/snic/snic_scsi.c 	req = rqi_to_req(rqi);
req              2267 drivers/scsi/snic/snic_scsi.c 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
req              2270 drivers/scsi/snic/snic_scsi.c 	req->u.reset.flags = 0;
req              2272 drivers/scsi/snic/snic_scsi.c 	ret = snic_queue_wq_desc(snic, req, sizeof(*req));
req               475 drivers/scsi/st.c static void st_do_stats(struct scsi_tape *STp, struct request *req)
req               480 drivers/scsi/st.c 	if (scsi_req(req)->cmd[0] == WRITE_6) {
req               485 drivers/scsi/st.c 		if (scsi_req(req)->result) {
req               494 drivers/scsi/st.c 	} else if (scsi_req(req)->cmd[0] == READ_6) {
req               499 drivers/scsi/st.c 		if (scsi_req(req)->result) {
req               516 drivers/scsi/st.c static void st_scsi_execute_end(struct request *req, blk_status_t status)
req               518 drivers/scsi/st.c 	struct st_request *SRpnt = req->end_io_data;
req               519 drivers/scsi/st.c 	struct scsi_request *rq = scsi_req(req);
req               526 drivers/scsi/st.c 	st_do_stats(STp, req);
req               535 drivers/scsi/st.c 	blk_put_request(req);
req               542 drivers/scsi/st.c 	struct request *req;
req               548 drivers/scsi/st.c 	req = blk_get_request(SRpnt->stp->device->request_queue,
req               551 drivers/scsi/st.c 	if (IS_ERR(req))
req               553 drivers/scsi/st.c 	rq = scsi_req(req);
req               554 drivers/scsi/st.c 	req->rq_flags |= RQF_QUIET;
req               559 drivers/scsi/st.c 		err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
req               562 drivers/scsi/st.c 			blk_put_request(req);
req               578 drivers/scsi/st.c 	SRpnt->bio = req->bio;
req               582 drivers/scsi/st.c 	req->timeout = timeout;
req               584 drivers/scsi/st.c 	req->end_io_data = SRpnt;
req               586 drivers/scsi/st.c 	blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
req               296 drivers/scsi/stex.c 	struct req_msg *req;
req               411 drivers/scsi/stex.c 	struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
req               416 drivers/scsi/stex.c 	return req;
req               426 drivers/scsi/stex.c 	struct req_msg *req, struct st_ccb *ccb)
req               438 drivers/scsi/stex.c 		dst = (struct st_sgtable *)req->variable;
req               458 drivers/scsi/stex.c 	struct req_msg *req, struct st_ccb *ccb)
req               470 drivers/scsi/stex.c 		dst = (struct st_sgtable *)req->variable;
req               518 drivers/scsi/stex.c stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
req               520 drivers/scsi/stex.c 	req->tag = cpu_to_le16(tag);
req               522 drivers/scsi/stex.c 	hba->ccb[tag].req = req;
req               531 drivers/scsi/stex.c stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
req               537 drivers/scsi/stex.c 	req->tag = cpu_to_le16(tag);
req               539 drivers/scsi/stex.c 	hba->ccb[tag].req = req;
req               543 drivers/scsi/stex.c 	msg_h = (struct st_msg_header *)req - 1;
req               574 drivers/scsi/stex.c 		if (ccb->req == NULL)
req               576 drivers/scsi/stex.c 		ccb->req = NULL;
req               602 drivers/scsi/stex.c 	struct req_msg *req;
req               699 drivers/scsi/stex.c 	req = hba->alloc_rq(hba);
req               701 drivers/scsi/stex.c 	req->lun = lun;
req               702 drivers/scsi/stex.c 	req->target = id;
req               705 drivers/scsi/stex.c 	memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
req               708 drivers/scsi/stex.c 		req->data_dir = MSG_DATA_DIR_IN;
req               710 drivers/scsi/stex.c 		req->data_dir = MSG_DATA_DIR_OUT;
req               712 drivers/scsi/stex.c 		req->data_dir = MSG_DATA_DIR_ND;
req               718 drivers/scsi/stex.c 	if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
req               720 drivers/scsi/stex.c 		memset(&req->variable[0], 0, 8);
req               723 drivers/scsi/stex.c 	hba->send(hba, req, tag);
req               842 drivers/scsi/stex.c 		if (unlikely(ccb->req == NULL)) {
req               859 drivers/scsi/stex.c 		ccb->req = NULL;
req               947 drivers/scsi/stex.c 		if (unlikely(ccb->req == NULL)) {
req               953 drivers/scsi/stex.c 		ccb->req = NULL;
req              1261 drivers/scsi/stex.c 		hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
req              1299 drivers/scsi/stex.c 	hba->wait_ccb->req = NULL; /* nullify the req's future return */
req              1854 drivers/scsi/stex.c 	struct req_msg *req;
req              1869 drivers/scsi/stex.c 	req = hba->alloc_rq(hba);
req              1871 drivers/scsi/stex.c 		msg_h = (struct st_msg_header *)req - 1;
req              1874 drivers/scsi/stex.c 		memset(req, 0, hba->rq_size);
req              1879 drivers/scsi/stex.c 		req->cdb[0] = MGT_CMD;
req              1880 drivers/scsi/stex.c 		req->cdb[1] = MGT_CMD_SIGNATURE;
req              1881 drivers/scsi/stex.c 		req->cdb[2] = CTLR_CONFIG_CMD;
req              1882 drivers/scsi/stex.c 		req->cdb[3] = CTLR_SHUTDOWN;
req              1885 drivers/scsi/stex.c 		req->cdb[0] = MGT_CMD;
req              1886 drivers/scsi/stex.c 		req->cdb[1] = MGT_CMD_SIGNATURE;
req              1887 drivers/scsi/stex.c 		req->cdb[2] = CTLR_CONFIG_CMD;
req              1888 drivers/scsi/stex.c 		req->cdb[3] = PMIC_SHUTDOWN;
req              1889 drivers/scsi/stex.c 		req->cdb[4] = st_sleep_mic;
req              1891 drivers/scsi/stex.c 		req->cdb[0] = CONTROLLER_CMD;
req              1892 drivers/scsi/stex.c 		req->cdb[1] = CTLR_POWER_STATE_CHANGE;
req              1893 drivers/scsi/stex.c 		req->cdb[2] = CTLR_POWER_SAVING;
req              1900 drivers/scsi/stex.c 	hba->send(hba, req, tag);
req              3945 drivers/scsi/sym53c8xx_2/sym_hipd.c sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
req              3991 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req && chg)
req              4002 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req)
req              4025 drivers/scsi/sym53c8xx_2/sym_hipd.c 	int req = 1;
req              4035 drivers/scsi/sym53c8xx_2/sym_hipd.c 		req = 0;
req              4041 drivers/scsi/sym53c8xx_2/sym_hipd.c 	result = sym_sync_nego_check(np, req, cp);
req              4044 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (req) {	/* Was a request, send response. */
req              4060 drivers/scsi/sym53c8xx_2/sym_hipd.c sym_ppr_nego_check(struct sym_hcb *np, int req, int target)
req              4117 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req && chg)
req              4128 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req)
req              4150 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req && !opts) {
req              4162 drivers/scsi/sym53c8xx_2/sym_hipd.c 	int req = 1;
req              4172 drivers/scsi/sym53c8xx_2/sym_hipd.c 		req = 0;
req              4178 drivers/scsi/sym53c8xx_2/sym_hipd.c 	result = sym_ppr_nego_check(np, req, cp->target);
req              4181 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (req) {	/* Was a request, send response. */
req              4197 drivers/scsi/sym53c8xx_2/sym_hipd.c sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
req              4229 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req && chg)
req              4240 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (!req)
req              4262 drivers/scsi/sym53c8xx_2/sym_hipd.c 	int req = 1;
req              4272 drivers/scsi/sym53c8xx_2/sym_hipd.c 		req = 0;
req              4278 drivers/scsi/sym53c8xx_2/sym_hipd.c 	result = sym_wide_nego_check(np, req, cp);
req              4281 drivers/scsi/sym53c8xx_2/sym_hipd.c 	if (req) {	/* Was a request, send response. */
req                48 drivers/scsi/virtio_scsi.c 	} req;
req               410 drivers/scsi/virtio_scsi.c 	struct scatterlist *sgs[6], req, resp;
req               424 drivers/scsi/virtio_scsi.c 	sg_init_one(&req, &cmd->req, req_size);
req               425 drivers/scsi/virtio_scsi.c 	sgs[out_num++] = &req;
req               566 drivers/scsi/virtio_scsi.c 		virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
req               567 drivers/scsi/virtio_scsi.c 		memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
req               568 drivers/scsi/virtio_scsi.c 		req_size = sizeof(cmd->req.cmd_pi);
req               572 drivers/scsi/virtio_scsi.c 		virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
req               573 drivers/scsi/virtio_scsi.c 		memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
req               574 drivers/scsi/virtio_scsi.c 		req_size = sizeof(cmd->req.cmd);
req               597 drivers/scsi/virtio_scsi.c 			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
req               633 drivers/scsi/virtio_scsi.c 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
req               691 drivers/scsi/virtio_scsi.c 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
req               338 drivers/slimbus/qcom-ngd-ctrl.c 				struct slimbus_select_inst_req_msg_v01 *req)
req               354 drivers/slimbus/qcom-ngd-ctrl.c 				slimbus_select_inst_req_msg_v01_ei, req);
req               391 drivers/slimbus/qcom-ngd-ctrl.c 					struct slimbus_power_req_msg_v01 *req)
req               403 drivers/slimbus/qcom-ngd-ctrl.c 				slimbus_power_req_msg_v01_ei, req);
req               440 drivers/slimbus/qcom-ngd-ctrl.c 	struct slimbus_select_inst_req_msg_v01 req;
req               464 drivers/slimbus/qcom-ngd-ctrl.c 	req.instance = (ctrl->ngd->id >> 1);
req               465 drivers/slimbus/qcom-ngd-ctrl.c 	req.mode_valid = 1;
req               469 drivers/slimbus/qcom-ngd-ctrl.c 		req.mode = SLIMBUS_MODE_SATELLITE_V01;
req               471 drivers/slimbus/qcom-ngd-ctrl.c 		req.mode = SLIMBUS_MODE_MASTER_V01;
req               475 drivers/slimbus/qcom-ngd-ctrl.c 	rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req);
req               505 drivers/slimbus/qcom-ngd-ctrl.c 	struct slimbus_power_req_msg_v01 req;
req               508 drivers/slimbus/qcom-ngd-ctrl.c 		req.pm_req = SLIMBUS_PM_ACTIVE_V01;
req               510 drivers/slimbus/qcom-ngd-ctrl.c 		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
req               512 drivers/slimbus/qcom-ngd-ctrl.c 	req.resp_type_valid = 0;
req               514 drivers/slimbus/qcom-ngd-ctrl.c 	return qcom_slim_qmi_send_power_request(ctrl, &req);
req                91 drivers/soc/imx/gpc.c 	u32 val, req;
req               111 drivers/soc/imx/gpc.c 	req = BIT(pd->cntr_pdn_bit + 1);
req               112 drivers/soc/imx/gpc.c 	regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
req               115 drivers/soc/imx/gpc.c 	ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
req                23 drivers/soc/imx/soc-imx-scu.c 		} __packed req;
req                75 drivers/soc/imx/soc-imx-scu.c 	msg.data.req.control = IMX_SC_C_ID;
req                76 drivers/soc/imx/soc-imx-scu.c 	msg.data.req.resource = IMX_SC_R_SYSTEM;
req                44 drivers/soc/qcom/rpmh-internal.h 	const struct tcs_request *req[MAX_TCS_PER_TYPE];
req               198 drivers/soc/qcom/rpmh-rsc.c 			return tcs->req[tcs_id - tcs->offset];
req               212 drivers/soc/qcom/rpmh-rsc.c 	const struct tcs_request *req;
req               218 drivers/soc/qcom/rpmh-rsc.c 		req = get_req_from_tcs(drv, i);
req               219 drivers/soc/qcom/rpmh-rsc.c 		if (!req) {
req               225 drivers/soc/qcom/rpmh-rsc.c 		for (j = 0; j < req->num_cmds; j++) {
req               228 drivers/soc/qcom/rpmh-rsc.c 			cmd = &req->cmds[j];
req               231 drivers/soc/qcom/rpmh-rsc.c 			   ((req->wait_for_compl || cmd->wait) &&
req               239 drivers/soc/qcom/rpmh-rsc.c 		trace_rpmh_tx_done(drv, i, req, err);
req               248 drivers/soc/qcom/rpmh-rsc.c 		if (req)
req               249 drivers/soc/qcom/rpmh-rsc.c 			rpmh_tx_done(req, err);
req               378 drivers/soc/qcom/rpmh-rsc.c 	tcs->req[tcs_id - tcs->offset] = msg;
req               104 drivers/soc/qcom/rpmh.c 	struct cache_req *p, *req = NULL;
req               108 drivers/soc/qcom/rpmh.c 			req = p;
req               113 drivers/soc/qcom/rpmh.c 	return req;
req               120 drivers/soc/qcom/rpmh.c 	struct cache_req *req;
req               124 drivers/soc/qcom/rpmh.c 	req = __find_req(ctrlr, cmd->addr);
req               125 drivers/soc/qcom/rpmh.c 	if (req)
req               128 drivers/soc/qcom/rpmh.c 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
req               129 drivers/soc/qcom/rpmh.c 	if (!req) {
req               130 drivers/soc/qcom/rpmh.c 		req = ERR_PTR(-ENOMEM);
req               134 drivers/soc/qcom/rpmh.c 	req->addr = cmd->addr;
req               135 drivers/soc/qcom/rpmh.c 	req->sleep_val = req->wake_val = UINT_MAX;
req               136 drivers/soc/qcom/rpmh.c 	INIT_LIST_HEAD(&req->list);
req               137 drivers/soc/qcom/rpmh.c 	list_add_tail(&req->list, &ctrlr->cache);
req               142 drivers/soc/qcom/rpmh.c 		if (req->sleep_val != UINT_MAX)
req               143 drivers/soc/qcom/rpmh.c 			req->wake_val = cmd->data;
req               146 drivers/soc/qcom/rpmh.c 		req->wake_val = cmd->data;
req               149 drivers/soc/qcom/rpmh.c 		req->sleep_val = cmd->data;
req               159 drivers/soc/qcom/rpmh.c 	return req;
req               178 drivers/soc/qcom/rpmh.c 	struct cache_req *req;
req               185 drivers/soc/qcom/rpmh.c 		req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
req               186 drivers/soc/qcom/rpmh.c 		if (IS_ERR(req))
req               187 drivers/soc/qcom/rpmh.c 			return PTR_ERR(req);
req               204 drivers/soc/qcom/rpmh.c static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
req               210 drivers/soc/qcom/rpmh.c 	memcpy(req->cmd, cmd, n * sizeof(*cmd));
req               212 drivers/soc/qcom/rpmh.c 	req->msg.state = state;
req               213 drivers/soc/qcom/rpmh.c 	req->msg.cmds = req->cmd;
req               214 drivers/soc/qcom/rpmh.c 	req->msg.num_cmds = n;
req               284 drivers/soc/qcom/rpmh.c static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
req               289 drivers/soc/qcom/rpmh.c 	list_add_tail(&req->list, &ctrlr->batch_cache);
req               295 drivers/soc/qcom/rpmh.c 	struct batch_cache_req *req;
req               303 drivers/soc/qcom/rpmh.c 	list_for_each_entry(req, &ctrlr->batch_cache, list) {
req               304 drivers/soc/qcom/rpmh.c 		for (i = 0; i < req->count; i++) {
req               305 drivers/soc/qcom/rpmh.c 			rpm_msg = req->rpm_msgs + i;
req               319 drivers/soc/qcom/rpmh.c 	struct batch_cache_req *req, *tmp;
req               323 drivers/soc/qcom/rpmh.c 	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
req               324 drivers/soc/qcom/rpmh.c 		kfree(req);
req               349 drivers/soc/qcom/rpmh.c 	struct batch_cache_req *req;
req               366 drivers/soc/qcom/rpmh.c 	ptr = kzalloc(sizeof(*req) +
req               367 drivers/soc/qcom/rpmh.c 		      count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
req               372 drivers/soc/qcom/rpmh.c 	req = ptr;
req               373 drivers/soc/qcom/rpmh.c 	compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
req               375 drivers/soc/qcom/rpmh.c 	req->count = count;
req               376 drivers/soc/qcom/rpmh.c 	rpm_msgs = req->rpm_msgs;
req               384 drivers/soc/qcom/rpmh.c 		cache_batch(ctrlr, req);
req               423 drivers/soc/qcom/rpmh.c static int is_req_valid(struct cache_req *req)
req               425 drivers/soc/qcom/rpmh.c 	return (req->sleep_val != UINT_MAX &&
req               426 drivers/soc/qcom/rpmh.c 		req->wake_val != UINT_MAX &&
req               427 drivers/soc/qcom/rpmh.c 		req->sleep_val != req->wake_val);
req               209 drivers/soc/qcom/rpmpd.c 	struct rpmpd_req req = {
req               216 drivers/soc/qcom/rpmpd.c 				  pd->res_type, pd->res_id, &req, sizeof(req));
req               221 drivers/soc/qcom/rpmpd.c 	struct rpmpd_req req = {
req               228 drivers/soc/qcom/rpmpd.c 				  &req, sizeof(req));
req               103 drivers/soc/qcom/smd-rpm.c 		struct qcom_rpm_request req;
req               121 drivers/soc/qcom/smd-rpm.c 	pkt->req.msg_id = cpu_to_le32(msg_id++);
req               122 drivers/soc/qcom/smd-rpm.c 	pkt->req.flags = cpu_to_le32(state);
req               123 drivers/soc/qcom/smd-rpm.c 	pkt->req.type = cpu_to_le32(type);
req               124 drivers/soc/qcom/smd-rpm.c 	pkt->req.id = cpu_to_le32(id);
req               125 drivers/soc/qcom/smd-rpm.c 	pkt->req.data_len = cpu_to_le32(count);
req               199 drivers/soc/qcom/wcnss_ctrl.c 	struct wcnss_download_nv_req *req;
req               205 drivers/soc/qcom/wcnss_ctrl.c 	req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
req               206 drivers/soc/qcom/wcnss_ctrl.c 	if (!req)
req               219 drivers/soc/qcom/wcnss_ctrl.c 	req->hdr.type = WCNSS_DOWNLOAD_NV_REQ;
req               220 drivers/soc/qcom/wcnss_ctrl.c 	req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE;
req               222 drivers/soc/qcom/wcnss_ctrl.c 	req->last = 0;
req               223 drivers/soc/qcom/wcnss_ctrl.c 	req->frag_size = NV_FRAGMENT_SIZE;
req               225 drivers/soc/qcom/wcnss_ctrl.c 	req->seq = 0;
req               228 drivers/soc/qcom/wcnss_ctrl.c 			req->last = 1;
req               229 drivers/soc/qcom/wcnss_ctrl.c 			req->frag_size = left;
req               230 drivers/soc/qcom/wcnss_ctrl.c 			req->hdr.len = sizeof(*req) + left;
req               233 drivers/soc/qcom/wcnss_ctrl.c 		memcpy(req->fragment, data, req->frag_size);
req               235 drivers/soc/qcom/wcnss_ctrl.c 		ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
req               242 drivers/soc/qcom/wcnss_ctrl.c 		req->seq++;
req               260 drivers/soc/qcom/wcnss_ctrl.c 	kfree(req);
req                88 drivers/soc/rockchip/pm_domains.c #define DOMAIN(pwr, status, req, idle, ack, wakeup)	\
req                92 drivers/soc/rockchip/pm_domains.c 	.req_mask = (req),				\
req                98 drivers/soc/rockchip/pm_domains.c #define DOMAIN_M(pwr, status, req, idle, ack, wakeup)	\
req               103 drivers/soc/rockchip/pm_domains.c 	.req_w_mask = (req) << 16,			\
req               104 drivers/soc/rockchip/pm_domains.c 	.req_mask = (req),				\
req               110 drivers/soc/rockchip/pm_domains.c #define DOMAIN_RK3036(req, ack, idle, wakeup)		\
req               112 drivers/soc/rockchip/pm_domains.c 	.req_mask = (req),				\
req               113 drivers/soc/rockchip/pm_domains.c 	.req_w_mask = (req) << 16,			\
req               119 drivers/soc/rockchip/pm_domains.c #define DOMAIN_PX30(pwr, status, req, wakeup)		\
req               120 drivers/soc/rockchip/pm_domains.c 	DOMAIN_M(pwr, status, req, (req) << 16, req, wakeup)
req               122 drivers/soc/rockchip/pm_domains.c #define DOMAIN_RK3288(pwr, status, req, wakeup)		\
req               123 drivers/soc/rockchip/pm_domains.c 	DOMAIN(pwr, status, req, req, (req) << 16, wakeup)
req               125 drivers/soc/rockchip/pm_domains.c #define DOMAIN_RK3328(pwr, status, req, wakeup)		\
req               126 drivers/soc/rockchip/pm_domains.c 	DOMAIN_M(pwr, pwr, req, (req) << 10, req, wakeup)
req               128 drivers/soc/rockchip/pm_domains.c #define DOMAIN_RK3368(pwr, status, req, wakeup)		\
req               129 drivers/soc/rockchip/pm_domains.c 	DOMAIN(pwr, status, req, (req) << 16, req, wakeup)
req               131 drivers/soc/rockchip/pm_domains.c #define DOMAIN_RK3399(pwr, status, req, wakeup)		\
req               132 drivers/soc/rockchip/pm_domains.c 	DOMAIN(pwr, status, req, req, req, wakeup)
req               165 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.buf		= p_buf;
req               166 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.length		= length;
req               167 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.dma		= 0;
req               168 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.zero		= true;
req               169 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.complete	= _nbu2ss_ep0_complete;
req               170 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.status		= -EINPROGRESS;
req               171 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.context	= udc;
req               172 drivers/staging/emxx_udc/emxx_udc.c 	udc->ep0_req.req.actual		= 0;
req               456 drivers/staging/emxx_udc/emxx_udc.c 				   struct nbu2ss_req *req, u8 direct)
req               458 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.dma == DMA_ADDR_INVALID) {
req               459 drivers/staging/emxx_udc/emxx_udc.c 		if (req->unaligned) {
req               460 drivers/staging/emxx_udc/emxx_udc.c 			req->req.dma = ep->phys_buf;
req               462 drivers/staging/emxx_udc/emxx_udc.c 			req->req.dma = dma_map_single(udc->gadget.dev.parent,
req               463 drivers/staging/emxx_udc/emxx_udc.c 						      req->req.buf,
req               464 drivers/staging/emxx_udc/emxx_udc.c 						      req->req.length,
req               469 drivers/staging/emxx_udc/emxx_udc.c 		req->mapped = 1;
req               471 drivers/staging/emxx_udc/emxx_udc.c 		if (!req->unaligned)
req               473 drivers/staging/emxx_udc/emxx_udc.c 						   req->req.dma,
req               474 drivers/staging/emxx_udc/emxx_udc.c 						   req->req.length,
req               479 drivers/staging/emxx_udc/emxx_udc.c 		req->mapped = 0;
req               486 drivers/staging/emxx_udc/emxx_udc.c 				     struct nbu2ss_req *req, u8 direct)
req               493 drivers/staging/emxx_udc/emxx_udc.c 		count = req->req.actual % 4;
req               495 drivers/staging/emxx_udc/emxx_udc.c 			p = req->req.buf;
req               496 drivers/staging/emxx_udc/emxx_udc.c 			p += (req->req.actual - count);
req               501 drivers/staging/emxx_udc/emxx_udc.c 	if (req->mapped) {
req               502 drivers/staging/emxx_udc/emxx_udc.c 		if (req->unaligned) {
req               504 drivers/staging/emxx_udc/emxx_udc.c 				memcpy(req->req.buf, ep->virt_buf,
req               505 drivers/staging/emxx_udc/emxx_udc.c 				       req->req.actual & 0xfffffffc);
req               508 drivers/staging/emxx_udc/emxx_udc.c 					 req->req.dma, req->req.length,
req               513 drivers/staging/emxx_udc/emxx_udc.c 		req->req.dma = DMA_ADDR_INVALID;
req               514 drivers/staging/emxx_udc/emxx_udc.c 		req->mapped = 0;
req               516 drivers/staging/emxx_udc/emxx_udc.c 		if (!req->unaligned)
req               518 drivers/staging/emxx_udc/emxx_udc.c 						req->req.dma, req->req.length,
req               525 drivers/staging/emxx_udc/emxx_udc.c 		p = req->req.buf;
req               526 drivers/staging/emxx_udc/emxx_udc.c 		p += (req->req.actual - count);
req               658 drivers/staging/emxx_udc/emxx_udc.c 				   struct nbu2ss_req *req)
req               667 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual == req->req.length) {
req               668 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->req.actual % EP0_PACKETSIZE) == 0) {
req               669 drivers/staging/emxx_udc/emxx_udc.c 			if (req->zero) {
req               670 drivers/staging/emxx_udc/emxx_udc.c 				req->zero = false;
req               686 drivers/staging/emxx_udc/emxx_udc.c 	i_remain_size = req->req.length - req->req.actual;
req               687 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer = (u8 *)req->req.buf;
req               688 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer += req->req.actual;
req               694 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = result;
req               705 drivers/staging/emxx_udc/emxx_udc.c 		req->div_len = result;
req               713 drivers/staging/emxx_udc/emxx_udc.c 				    struct nbu2ss_req *req)
req               727 drivers/staging/emxx_udc/emxx_udc.c 		i_remain_size = req->req.length - req->req.actual;
req               728 drivers/staging/emxx_udc/emxx_udc.c 		p_buffer = (u8 *)req->req.buf;
req               729 drivers/staging/emxx_udc/emxx_udc.c 		p_buffer += req->req.actual;
req               736 drivers/staging/emxx_udc/emxx_udc.c 		req->req.actual += result;
req               745 drivers/staging/emxx_udc/emxx_udc.c 			req->req.actual += result;
req               753 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual == req->req.length) {
req               754 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->req.actual % EP0_PACKETSIZE) == 0) {
req               755 drivers/staging/emxx_udc/emxx_udc.c 			if (req->zero) {
req               756 drivers/staging/emxx_udc/emxx_udc.c 				req->zero = false;
req               765 drivers/staging/emxx_udc/emxx_udc.c 	if ((req->req.actual % EP0_PACKETSIZE) != 0)
req               768 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual > req->req.length) {
req               787 drivers/staging/emxx_udc/emxx_udc.c static int _nbu2ss_out_dma(struct nbu2ss_udc *udc, struct nbu2ss_req *req,
req               799 drivers/staging/emxx_udc/emxx_udc.c 	if (req->dma_flag)
req               802 drivers/staging/emxx_udc/emxx_udc.c 	req->dma_flag = true;
req               803 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer = req->req.dma;
req               804 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer += req->req.actual;
req               840 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = result;
req               847 drivers/staging/emxx_udc/emxx_udc.c 			       struct nbu2ss_req *req, u32 length)
req               858 drivers/staging/emxx_udc/emxx_udc.c 	if (req->dma_flag)
req               864 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer = (u8 *)req->req.buf;
req               865 drivers/staging/emxx_udc/emxx_udc.c 	p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
req               890 drivers/staging/emxx_udc/emxx_udc.c 	req->req.actual += result;
req               892 drivers/staging/emxx_udc/emxx_udc.c 	if ((req->req.actual == req->req.length) ||
req               893 drivers/staging/emxx_udc/emxx_udc.c 	    ((req->req.actual % ep->ep.maxpacket) != 0)) {
req               902 drivers/staging/emxx_udc/emxx_udc.c 				struct nbu2ss_req *req, u32 data_size)
req               913 drivers/staging/emxx_udc/emxx_udc.c 	i_buf_size = min((req->req.length - req->req.actual), data_size);
req               915 drivers/staging/emxx_udc/emxx_udc.c 	if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
req               917 drivers/staging/emxx_udc/emxx_udc.c 		nret = _nbu2ss_out_dma(udc, req, num, i_buf_size);
req               920 drivers/staging/emxx_udc/emxx_udc.c 		nret = _nbu2ss_epn_out_pio(udc, ep, req, i_buf_size);
req               929 drivers/staging/emxx_udc/emxx_udc.c 				    struct nbu2ss_req *req)
req               947 drivers/staging/emxx_udc/emxx_udc.c 		result = _nbu2ss_epn_out_data(udc, ep, req, i_recv_length);
req               950 drivers/staging/emxx_udc/emxx_udc.c 				req->req.actual += result;
req               955 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->req.actual == req->req.length) ||
req               956 drivers/staging/emxx_udc/emxx_udc.c 		    ((req->req.actual % ep->ep.maxpacket) != 0)) {
req               962 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->req.actual % ep->ep.maxpacket) == 0) {
req               963 drivers/staging/emxx_udc/emxx_udc.c 			if (req->zero) {
req               964 drivers/staging/emxx_udc/emxx_udc.c 				req->zero = false;
req               970 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual > req->req.length) {
req               973 drivers/staging/emxx_udc/emxx_udc.c 			req->req.actual, req->req.length);
req               982 drivers/staging/emxx_udc/emxx_udc.c 			  struct nbu2ss_req *req, u32 num, u32 length)
req               993 drivers/staging/emxx_udc/emxx_udc.c 	if (req->dma_flag)
req               997 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual == 0)
req               998 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN);
req              1000 drivers/staging/emxx_udc/emxx_udc.c 	req->dma_flag = true;
req              1030 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer = req->req.dma;
req              1031 drivers/staging/emxx_udc/emxx_udc.c 	p_buffer += req->req.actual;
req              1046 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = result;
req              1053 drivers/staging/emxx_udc/emxx_udc.c 			      struct nbu2ss_req *req, u32 length)
req              1064 drivers/staging/emxx_udc/emxx_udc.c 	if (req->dma_flag)
req              1068 drivers/staging/emxx_udc/emxx_udc.c 		p_buffer = (u8 *)req->req.buf;
req              1069 drivers/staging/emxx_udc/emxx_udc.c 		p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
req              1095 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = result;
req              1102 drivers/staging/emxx_udc/emxx_udc.c 			       struct nbu2ss_req *req, u32 data_size)
req              1112 drivers/staging/emxx_udc/emxx_udc.c 	if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
req              1114 drivers/staging/emxx_udc/emxx_udc.c 		nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
req              1117 drivers/staging/emxx_udc/emxx_udc.c 		nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size);
req              1125 drivers/staging/emxx_udc/emxx_udc.c 				   struct nbu2ss_ep *ep, struct nbu2ss_req *req)
req              1141 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual == 0) {
req              1152 drivers/staging/emxx_udc/emxx_udc.c 	i_buf_size = req->req.length - req->req.actual;
req              1154 drivers/staging/emxx_udc/emxx_udc.c 		result = _nbu2ss_epn_in_data(udc, ep, req, i_buf_size);
req              1155 drivers/staging/emxx_udc/emxx_udc.c 	else if (req->req.length == 0)
req              1164 drivers/staging/emxx_udc/emxx_udc.c 				  struct nbu2ss_req *req,
req              1169 drivers/staging/emxx_udc/emxx_udc.c 	req->dma_flag = false;
req              1170 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = 0;
req              1172 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.length == 0) {
req              1173 drivers/staging/emxx_udc/emxx_udc.c 		req->zero = false;
req              1175 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->req.length % ep->ep.maxpacket) == 0)
req              1176 drivers/staging/emxx_udc/emxx_udc.c 			req->zero = req->req.zero;
req              1178 drivers/staging/emxx_udc/emxx_udc.c 			req->zero = false;
req              1185 drivers/staging/emxx_udc/emxx_udc.c 			nret = _nbu2ss_ep0_in_transfer(udc, req);
req              1189 drivers/staging/emxx_udc/emxx_udc.c 			nret = _nbu2ss_ep0_out_transfer(udc, req);
req              1205 drivers/staging/emxx_udc/emxx_udc.c 				nret = _nbu2ss_epn_out_transfer(udc, ep, req);
req              1208 drivers/staging/emxx_udc/emxx_udc.c 			nret = _nbu2ss_epn_in_transfer(udc, ep, req);
req              1220 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req *req;
req              1222 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
req              1223 drivers/staging/emxx_udc/emxx_udc.c 	if (!req)
req              1235 drivers/staging/emxx_udc/emxx_udc.c 	_nbu2ss_start_transfer(ep->udc, ep, req, bflag);
req              1678 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req	*req;
req              1681 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
req              1682 drivers/staging/emxx_udc/emxx_udc.c 	if (!req)
req              1683 drivers/staging/emxx_udc/emxx_udc.c 		req = &udc->ep0_req;
req              1685 drivers/staging/emxx_udc/emxx_udc.c 	req->req.actual += req->div_len;
req              1686 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = 0;
req              1688 drivers/staging/emxx_udc/emxx_udc.c 	nret = _nbu2ss_ep0_in_transfer(udc, req);
req              1701 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req	*req;
req              1704 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
req              1705 drivers/staging/emxx_udc/emxx_udc.c 	if (!req)
req              1706 drivers/staging/emxx_udc/emxx_udc.c 		req = &udc->ep0_req;
req              1708 drivers/staging/emxx_udc/emxx_udc.c 	nret = _nbu2ss_ep0_out_transfer(udc, req);
req              1715 drivers/staging/emxx_udc/emxx_udc.c 		req->req.status = nret;
req              1724 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req	*req;
req              1727 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
req              1728 drivers/staging/emxx_udc/emxx_udc.c 	if (!req) {
req              1729 drivers/staging/emxx_udc/emxx_udc.c 		req = &udc->ep0_req;
req              1730 drivers/staging/emxx_udc/emxx_udc.c 		if (req->req.complete)
req              1731 drivers/staging/emxx_udc/emxx_udc.c 			req->req.complete(&ep->ep, &req->req);
req              1734 drivers/staging/emxx_udc/emxx_udc.c 		if (req->req.complete)
req              1735 drivers/staging/emxx_udc/emxx_udc.c 			_nbu2ss_ep_done(ep, req, 0);
req              1826 drivers/staging/emxx_udc/emxx_udc.c 			    struct nbu2ss_req *req,
req              1831 drivers/staging/emxx_udc/emxx_udc.c 	list_del_init(&req->queue);
req              1836 drivers/staging/emxx_udc/emxx_udc.c 	if (likely(req->req.status == -EINPROGRESS))
req              1837 drivers/staging/emxx_udc/emxx_udc.c 		req->req.status = status;
req              1848 drivers/staging/emxx_udc/emxx_udc.c 	    (req->req.dma != 0))
req              1849 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
req              1853 drivers/staging/emxx_udc/emxx_udc.c 	req->req.complete(&ep->ep, &req->req);
req              1860 drivers/staging/emxx_udc/emxx_udc.c 				      struct nbu2ss_req *req)
req              1867 drivers/staging/emxx_udc/emxx_udc.c 	if (req->dma_flag)
req              1870 drivers/staging/emxx_udc/emxx_udc.c 	req->req.actual += req->div_len;
req              1871 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = 0;
req              1873 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual != req->req.length) {
req              1876 drivers/staging/emxx_udc/emxx_udc.c 		result = _nbu2ss_epn_in_transfer(udc, ep, req);
req              1879 drivers/staging/emxx_udc/emxx_udc.c 		if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
req              1886 drivers/staging/emxx_udc/emxx_udc.c 				req->zero = false;
req              1896 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_ep_done(ep, req, result);
req              1903 drivers/staging/emxx_udc/emxx_udc.c 				       struct nbu2ss_req *req)
req              1907 drivers/staging/emxx_udc/emxx_udc.c 	result = _nbu2ss_epn_out_transfer(udc, ep, req);
req              1909 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_ep_done(ep, req, result);
req              1915 drivers/staging/emxx_udc/emxx_udc.c 					  struct nbu2ss_req *req)
req              1921 drivers/staging/emxx_udc/emxx_udc.c 	preq = &req->req;
req              1923 drivers/staging/emxx_udc/emxx_udc.c 	if (!req->dma_flag)
req              1926 drivers/staging/emxx_udc/emxx_udc.c 	preq->actual += req->div_len;
req              1927 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = 0;
req              1928 drivers/staging/emxx_udc/emxx_udc.c 	req->dma_flag = false;
req              1931 drivers/staging/emxx_udc/emxx_udc.c 	_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN);
req              1935 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_epn_in_transfer(udc, ep, req);
req              1943 drivers/staging/emxx_udc/emxx_udc.c 			_nbu2ss_epn_in_int(udc, ep, req);
req              1951 drivers/staging/emxx_udc/emxx_udc.c 					   struct nbu2ss_req *req)
req              1961 drivers/staging/emxx_udc/emxx_udc.c 	if (req->req.actual == req->req.length) {
req              1962 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->req.length % ep->ep.maxpacket) && !req->zero) {
req              1963 drivers/staging/emxx_udc/emxx_udc.c 			req->div_len = 0;
req              1964 drivers/staging/emxx_udc/emxx_udc.c 			req->dma_flag = false;
req              1965 drivers/staging/emxx_udc/emxx_udc.c 			_nbu2ss_ep_done(ep, req, 0);
req              1986 drivers/staging/emxx_udc/emxx_udc.c 		if ((req->div_len % mpkt) == 0)
req              1987 drivers/staging/emxx_udc/emxx_udc.c 			req->div_len -= mpkt * dmacnt;
req              1990 drivers/staging/emxx_udc/emxx_udc.c 	if ((req->req.actual % ep->ep.maxpacket) > 0) {
req              1991 drivers/staging/emxx_udc/emxx_udc.c 		if (req->req.actual == req->div_len) {
req              1992 drivers/staging/emxx_udc/emxx_udc.c 			req->div_len = 0;
req              1993 drivers/staging/emxx_udc/emxx_udc.c 			req->dma_flag = false;
req              1994 drivers/staging/emxx_udc/emxx_udc.c 			_nbu2ss_ep_done(ep, req, 0);
req              1999 drivers/staging/emxx_udc/emxx_udc.c 	req->req.actual += req->div_len;
req              2000 drivers/staging/emxx_udc/emxx_udc.c 	req->div_len = 0;
req              2001 drivers/staging/emxx_udc/emxx_udc.c 	req->dma_flag = false;
req              2003 drivers/staging/emxx_udc/emxx_udc.c 	_nbu2ss_epn_out_int(udc, ep, req);
req              2012 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req	*req;
req              2023 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
req              2024 drivers/staging/emxx_udc/emxx_udc.c 	if (!req) {
req              2031 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_epn_out_dma_int(udc, ep, req);
req              2035 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_epn_out_int(udc, ep, req);
req              2039 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_epn_in_dma_int(udc, ep, req);
req              2043 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_epn_in_int(udc, ep, req);
req              2067 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req *req;
req              2079 drivers/staging/emxx_udc/emxx_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              2080 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_ep_done(ep, req, status);
req              2508 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req *req;
req              2510 drivers/staging/emxx_udc/emxx_udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req              2511 drivers/staging/emxx_udc/emxx_udc.c 	if (!req)
req              2515 drivers/staging/emxx_udc/emxx_udc.c 	req->req.dma = DMA_ADDR_INVALID;
req              2517 drivers/staging/emxx_udc/emxx_udc.c 	INIT_LIST_HEAD(&req->queue);
req              2519 drivers/staging/emxx_udc/emxx_udc.c 	return &req->req;
req              2526 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req *req;
req              2529 drivers/staging/emxx_udc/emxx_udc.c 		req = container_of(_req, struct nbu2ss_req, req);
req              2531 drivers/staging/emxx_udc/emxx_udc.c 		kfree(req);
req              2539 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req	*req;
req              2557 drivers/staging/emxx_udc/emxx_udc.c 	req = container_of(_req, struct nbu2ss_req, req);
req              2560 drivers/staging/emxx_udc/emxx_udc.c 		     !list_empty(&req->queue))) {
req              2567 drivers/staging/emxx_udc/emxx_udc.c 		if (!list_empty(&req->queue))
req              2590 drivers/staging/emxx_udc/emxx_udc.c 	if ((uintptr_t)req->req.buf & 0x3)
req              2591 drivers/staging/emxx_udc/emxx_udc.c 		req->unaligned = true;
req              2593 drivers/staging/emxx_udc/emxx_udc.c 		req->unaligned = false;
req              2595 drivers/staging/emxx_udc/emxx_udc.c 	if (req->unaligned) {
req              2602 drivers/staging/emxx_udc/emxx_udc.c 				memcpy(ep->virt_buf, req->req.buf,
req              2603 drivers/staging/emxx_udc/emxx_udc.c 				       req->req.length);
req              2608 drivers/staging/emxx_udc/emxx_udc.c 	    (req->req.dma != 0))
req              2609 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
req              2616 drivers/staging/emxx_udc/emxx_udc.c 	list_add_tail(&req->queue, &ep->queue);
req              2619 drivers/staging/emxx_udc/emxx_udc.c 		result = _nbu2ss_start_transfer(udc, ep, req, false);
req              2623 drivers/staging/emxx_udc/emxx_udc.c 			list_del(&req->queue);
req              2626 drivers/staging/emxx_udc/emxx_udc.c 			if (req->req.length < 4 &&
req              2627 drivers/staging/emxx_udc/emxx_udc.c 			    req->req.length == req->req.actual)
req              2629 drivers/staging/emxx_udc/emxx_udc.c 			if (req->req.length == req->req.actual)
req              2631 drivers/staging/emxx_udc/emxx_udc.c 				_nbu2ss_ep_done(ep, req, result);
req              2643 drivers/staging/emxx_udc/emxx_udc.c 	struct nbu2ss_req	*req;
req              2663 drivers/staging/emxx_udc/emxx_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              2664 drivers/staging/emxx_udc/emxx_udc.c 		if (&req->req == _req)
req              2667 drivers/staging/emxx_udc/emxx_udc.c 	if (&req->req != _req) {
req              2673 drivers/staging/emxx_udc/emxx_udc.c 	_nbu2ss_ep_done(ep, req, -ECONNRESET);
req               522 drivers/staging/emxx_udc/emxx_udc.h 	struct usb_request		req;
req                16 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_set_config_request req;
req                18 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
req                19 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req                20 drivers/staging/greybus/audio_apbridgea.c 	req.format = cpu_to_le32(format);
req                21 drivers/staging/greybus/audio_apbridgea.c 	req.rate = cpu_to_le32(rate);
req                22 drivers/staging/greybus/audio_apbridgea.c 	req.mclk_freq = cpu_to_le32(mclk_freq);
req                24 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req                33 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_register_cport_request req;
req                36 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
req                37 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req                38 drivers/staging/greybus/audio_apbridgea.c 	req.cport = cpu_to_le16(cportid);
req                39 drivers/staging/greybus/audio_apbridgea.c 	req.direction = direction;
req                45 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req                54 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_unregister_cport_request req;
req                57 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
req                58 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req                59 drivers/staging/greybus/audio_apbridgea.c 	req.cport = cpu_to_le16(cportid);
req                60 drivers/staging/greybus/audio_apbridgea.c 	req.direction = direction;
req                62 drivers/staging/greybus/audio_apbridgea.c 	ret = gb_hd_output(connection->hd, &req, sizeof(req),
req                74 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_set_tx_data_size_request req;
req                76 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
req                77 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req                78 drivers/staging/greybus/audio_apbridgea.c 	req.size = cpu_to_le16(size);
req                80 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req                88 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_prepare_tx_request req;
req                90 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
req                91 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req                93 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               101 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_start_tx_request req;
req               103 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
req               104 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               105 drivers/staging/greybus/audio_apbridgea.c 	req.timestamp = cpu_to_le64(timestamp);
req               107 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               114 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_stop_tx_request req;
req               116 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
req               117 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               119 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               127 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_shutdown_tx_request req;
req               129 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
req               130 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               132 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               140 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_set_rx_data_size_request req;
req               142 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
req               143 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               144 drivers/staging/greybus/audio_apbridgea.c 	req.size = cpu_to_le16(size);
req               146 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               154 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_prepare_rx_request req;
req               156 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
req               157 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               159 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               167 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_start_rx_request req;
req               169 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
req               170 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               172 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               179 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_stop_rx_request req;
req               181 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
req               182 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               184 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req               192 drivers/staging/greybus/audio_apbridgea.c 	struct audio_apbridgea_shutdown_rx_request req;
req               194 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
req               195 drivers/staging/greybus/audio_apbridgea.c 	req.hdr.i2s_port = cpu_to_le16(i2s_port);
req               197 drivers/staging/greybus/audio_apbridgea.c 	return gb_hd_output(connection->hd, &req, sizeof(req),
req                50 drivers/staging/greybus/audio_gb.c 	struct gb_audio_get_control_request req;
req                54 drivers/staging/greybus/audio_gb.c 	req.control_id = control_id;
req                55 drivers/staging/greybus/audio_gb.c 	req.index = index;
req                58 drivers/staging/greybus/audio_gb.c 				&req, sizeof(req), &resp, sizeof(resp));
req                72 drivers/staging/greybus/audio_gb.c 	struct gb_audio_set_control_request req;
req                74 drivers/staging/greybus/audio_gb.c 	req.control_id = control_id;
req                75 drivers/staging/greybus/audio_gb.c 	req.index = index;
req                76 drivers/staging/greybus/audio_gb.c 	memcpy(&req.value, value, sizeof(req.value));
req                79 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req                86 drivers/staging/greybus/audio_gb.c 	struct gb_audio_enable_widget_request req;
req                88 drivers/staging/greybus/audio_gb.c 	req.widget_id = widget_id;
req                91 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req                98 drivers/staging/greybus/audio_gb.c 	struct gb_audio_disable_widget_request req;
req               100 drivers/staging/greybus/audio_gb.c 	req.widget_id = widget_id;
req               103 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               111 drivers/staging/greybus/audio_gb.c 	struct gb_audio_get_pcm_request req;
req               115 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               118 drivers/staging/greybus/audio_gb.c 				&req, sizeof(req), &resp, sizeof(resp));
req               135 drivers/staging/greybus/audio_gb.c 	struct gb_audio_set_pcm_request req;
req               137 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               138 drivers/staging/greybus/audio_gb.c 	req.format = cpu_to_le32(format);
req               139 drivers/staging/greybus/audio_gb.c 	req.rate = cpu_to_le32(rate);
req               140 drivers/staging/greybus/audio_gb.c 	req.channels = channels;
req               141 drivers/staging/greybus/audio_gb.c 	req.sig_bits = sig_bits;
req               144 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               151 drivers/staging/greybus/audio_gb.c 	struct gb_audio_set_tx_data_size_request req;
req               153 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               154 drivers/staging/greybus/audio_gb.c 	req.size = cpu_to_le16(size);
req               157 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               164 drivers/staging/greybus/audio_gb.c 	struct gb_audio_activate_tx_request req;
req               166 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               169 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               176 drivers/staging/greybus/audio_gb.c 	struct gb_audio_deactivate_tx_request req;
req               178 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               181 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               188 drivers/staging/greybus/audio_gb.c 	struct gb_audio_set_rx_data_size_request req;
req               190 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               191 drivers/staging/greybus/audio_gb.c 	req.size = cpu_to_le16(size);
req               194 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               201 drivers/staging/greybus/audio_gb.c 	struct gb_audio_activate_rx_request req;
req               203 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               206 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req               213 drivers/staging/greybus/audio_gb.c 	struct gb_audio_deactivate_rx_request req;
req               215 drivers/staging/greybus/audio_gb.c 	req.data_cport = cpu_to_le16(data_cport);
req               218 drivers/staging/greybus/audio_gb.c 				 &req, sizeof(req), NULL, 0);
req                21 drivers/staging/greybus/audio_module.c 				struct gb_audio_jack_event_request *req)
req                30 drivers/staging/greybus/audio_module.c 				    req->jack_attribute, req->event);
req                36 drivers/staging/greybus/audio_module.c 			     req->jack_attribute, req->event);
req                38 drivers/staging/greybus/audio_module.c 	if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
req                50 drivers/staging/greybus/audio_module.c 	report = req->jack_attribute & module->jack_mask;
req                54 drivers/staging/greybus/audio_module.c 				    req->jack_attribute, req->event);
req                70 drivers/staging/greybus/audio_module.c 				  struct gb_audio_button_event_request *req)
req                78 drivers/staging/greybus/audio_module.c 				    req->button_id, req->event);
req                84 drivers/staging/greybus/audio_module.c 			     req->button_id, req->event);
req                96 drivers/staging/greybus/audio_module.c 	switch (req->button_id) {
req               120 drivers/staging/greybus/audio_module.c 	if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
req               133 drivers/staging/greybus/audio_module.c 				  struct gb_audio_streaming_event_request *req)
req               136 drivers/staging/greybus/audio_module.c 		 le16_to_cpu(req->data_cport), req->event);
req               527 drivers/staging/greybus/camera.c 	struct gb_camera_configure_streams_request *req;
req               538 drivers/staging/greybus/camera.c 	req_size = sizeof(*req) + nstreams * sizeof(req->config[0]);
req               541 drivers/staging/greybus/camera.c 	req = kmalloc(req_size, GFP_KERNEL);
req               543 drivers/staging/greybus/camera.c 	if (!req || !resp) {
req               544 drivers/staging/greybus/camera.c 		kfree(req);
req               549 drivers/staging/greybus/camera.c 	req->num_streams = nstreams;
req               550 drivers/staging/greybus/camera.c 	req->flags = *flags;
req               551 drivers/staging/greybus/camera.c 	req->padding = 0;
req               554 drivers/staging/greybus/camera.c 		struct gb_camera_stream_config_request *cfg = &req->config[i];
req               576 drivers/staging/greybus/camera.c 					     req, req_size,
req               602 drivers/staging/greybus/camera.c 	    (req->flags & GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY))
req               631 drivers/staging/greybus/camera.c 		memset(req, 0, sizeof(*req));
req               634 drivers/staging/greybus/camera.c 				  req, sizeof(*req),
req               649 drivers/staging/greybus/camera.c 	kfree(req);
req               658 drivers/staging/greybus/camera.c 	struct gb_camera_capture_request *req;
req               665 drivers/staging/greybus/camera.c 	req_size = sizeof(*req) + settings_size;
req               666 drivers/staging/greybus/camera.c 	req = kmalloc(req_size, GFP_KERNEL);
req               667 drivers/staging/greybus/camera.c 	if (!req)
req               670 drivers/staging/greybus/camera.c 	req->request_id = cpu_to_le32(request_id);
req               671 drivers/staging/greybus/camera.c 	req->streams = streams;
req               672 drivers/staging/greybus/camera.c 	req->padding = 0;
req               673 drivers/staging/greybus/camera.c 	req->num_frames = cpu_to_le16(num_frames);
req               674 drivers/staging/greybus/camera.c 	memcpy(req->settings, settings, settings_size);
req               684 drivers/staging/greybus/camera.c 				req, req_size, NULL, 0);
req               688 drivers/staging/greybus/camera.c 	kfree(req);
req               119 drivers/staging/greybus/light.c 	struct gb_lights_set_flash_intensity_request req;
req               129 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               130 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               131 drivers/staging/greybus/light.c 	req.intensity_uA = cpu_to_le32(intensity);
req               134 drivers/staging/greybus/light.c 				&req, sizeof(req), NULL, 0);
req               322 drivers/staging/greybus/light.c 	struct gb_lights_set_fade_request req;
req               332 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               333 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               334 drivers/staging/greybus/light.c 	req.fade_in = channel->fade_in;
req               335 drivers/staging/greybus/light.c 	req.fade_out = channel->fade_out;
req               337 drivers/staging/greybus/light.c 				&req, sizeof(req), NULL, 0);
req               348 drivers/staging/greybus/light.c 	struct gb_lights_set_color_request req;
req               358 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               359 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               360 drivers/staging/greybus/light.c 	req.color = cpu_to_le32(color);
req               362 drivers/staging/greybus/light.c 				&req, sizeof(req), NULL, 0);
req               371 drivers/staging/greybus/light.c 	struct gb_lights_set_brightness_request req;
req               384 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               385 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               386 drivers/staging/greybus/light.c 	req.brightness = (u8)channel->led->brightness;
req               389 drivers/staging/greybus/light.c 				&req, sizeof(req), NULL, 0);
req               456 drivers/staging/greybus/light.c 	struct gb_lights_blink_request req;
req               473 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               474 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               475 drivers/staging/greybus/light.c 	req.time_on_ms = cpu_to_le16(*delay_on);
req               476 drivers/staging/greybus/light.c 	req.time_off_ms = cpu_to_le16(*delay_off);
req               478 drivers/staging/greybus/light.c 	ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BLINK, &req,
req               479 drivers/staging/greybus/light.c 				sizeof(req), NULL, 0);
req               637 drivers/staging/greybus/light.c 	struct gb_lights_set_flash_strobe_request req;
req               647 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               648 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               649 drivers/staging/greybus/light.c 	req.state = state ? 1 : 0;
req               652 drivers/staging/greybus/light.c 				&req, sizeof(req), NULL, 0);
req               678 drivers/staging/greybus/light.c 	struct gb_lights_set_flash_timeout_request req;
req               688 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               689 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               690 drivers/staging/greybus/light.c 	req.timeout_us = cpu_to_le32(timeout);
req               693 drivers/staging/greybus/light.c 				&req, sizeof(req), NULL, 0);
req               709 drivers/staging/greybus/light.c 	struct gb_lights_get_flash_fault_request req;
req               720 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               721 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               724 drivers/staging/greybus/light.c 				&req, sizeof(req), &resp, sizeof(resp));
req               828 drivers/staging/greybus/light.c 	struct gb_lights_get_channel_flash_config_request req;
req               833 drivers/staging/greybus/light.c 	req.light_id = channel->light->id;
req               834 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               838 drivers/staging/greybus/light.c 				&req, sizeof(req), &conf, sizeof(conf));
req               948 drivers/staging/greybus/light.c 	struct gb_lights_get_channel_config_request req;
req               954 drivers/staging/greybus/light.c 	req.light_id = light->id;
req               955 drivers/staging/greybus/light.c 	req.channel_id = channel->id;
req               958 drivers/staging/greybus/light.c 				&req, sizeof(req), &conf, sizeof(conf));
req              1006 drivers/staging/greybus/light.c 	struct gb_lights_get_light_config_request req;
req              1014 drivers/staging/greybus/light.c 	req.id = id;
req              1018 drivers/staging/greybus/light.c 				&req, sizeof(req), &conf, sizeof(conf));
req               747 drivers/staging/greybus/loopback.c 	u64 req = gb->requests_completed * USEC_PER_SEC;
req               749 drivers/staging/greybus/loopback.c 	gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
req               477 drivers/staging/greybus/power_supply.c 	struct gb_power_supply_get_description_request req;
req               481 drivers/staging/greybus/power_supply.c 	req.psy_id = gbpsy->id;
req               485 drivers/staging/greybus/power_supply.c 				&req, sizeof(req), &resp, sizeof(resp));
req               509 drivers/staging/greybus/power_supply.c 	struct gb_power_supply_get_property_descriptors_request *req;
req               522 drivers/staging/greybus/power_supply.c 				 sizeof(*req),
req               528 drivers/staging/greybus/power_supply.c 	req = op->request->payload;
req               529 drivers/staging/greybus/power_supply.c 	req->psy_id = gbpsy->id;
req               594 drivers/staging/greybus/power_supply.c 	struct gb_power_supply_get_property_request req;
req               602 drivers/staging/greybus/power_supply.c 	req.psy_id = gbpsy->id;
req               603 drivers/staging/greybus/power_supply.c 	req.property = prop->gb_prop;
req               606 drivers/staging/greybus/power_supply.c 				&req, sizeof(req), &resp, sizeof(resp));
req               770 drivers/staging/greybus/power_supply.c 	struct gb_power_supply_set_property_request req;
req               783 drivers/staging/greybus/power_supply.c 	req.psy_id = gbpsy->id;
req               784 drivers/staging/greybus/power_supply.c 	req.property = prop->gb_prop;
req               785 drivers/staging/greybus/power_supply.c 	req.prop_val = cpu_to_le32((s32)val);
req               788 drivers/staging/greybus/power_supply.c 				&req, sizeof(req), NULL, 0);
req              1593 drivers/staging/isdn/gigaset/bas-gigaset.c static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
req              1599 drivers/staging/isdn/gigaset/bas-gigaset.c 	gig_dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
req              1607 drivers/staging/isdn/gigaset/bas-gigaset.c 			req, ucs->pending);
req              1612 drivers/staging/isdn/gigaset/bas-gigaset.c 	ucs->dr_ctrl.bRequest = req;
req              1624 drivers/staging/isdn/gigaset/bas-gigaset.c 			req, get_usb_rcmsg(ret));
req              1628 drivers/staging/isdn/gigaset/bas-gigaset.c 	ucs->pending = req;
req              1650 drivers/staging/isdn/gigaset/bas-gigaset.c 	int req, ret;
req              1679 drivers/staging/isdn/gigaset/bas-gigaset.c 	req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
req              1680 drivers/staging/isdn/gigaset/bas-gigaset.c 	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
req              1706 drivers/staging/isdn/gigaset/bas-gigaset.c 	int req, ret;
req              1724 drivers/staging/isdn/gigaset/bas-gigaset.c 	req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
req              1725 drivers/staging/isdn/gigaset/bas-gigaset.c 	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
req               372 drivers/staging/isdn/gigaset/capi.c 	unsigned char *req = skb_mac_header(dskb);
req               393 drivers/staging/isdn/gigaset/capi.c 	flags = CAPIMSG_FLAGS(req);
req               395 drivers/staging/isdn/gigaset/capi.c 		send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req),
req               396 drivers/staging/isdn/gigaset/capi.c 				  bcs->channel + 1, CAPIMSG_HANDLE_REQ(req),
req               175 drivers/staging/isdn/gigaset/usb-gigaset.c static int set_value(struct cardstate *cs, u8 req, u16 val)
req               181 drivers/staging/isdn/gigaset/usb-gigaset.c 		(unsigned)req, (unsigned)val);
req               190 drivers/staging/isdn/gigaset/usb-gigaset.c 	r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41,
req               194 drivers/staging/isdn/gigaset/usb-gigaset.c 			-r, (unsigned)req);
req              1347 drivers/staging/ks7010/ks_hostif.c 			 struct hostif_request *req)
req              1349 drivers/staging/ks7010/ks_hostif.c 	req->phy_type = cpu_to_le16(priv->reg.phy_type);
req              1350 drivers/staging/ks7010/ks_hostif.c 	req->cts_mode = cpu_to_le16(priv->reg.cts_mode);
req              1351 drivers/staging/ks7010/ks_hostif.c 	req->scan_type = cpu_to_le16(priv->reg.scan_type);
req              1352 drivers/staging/ks7010/ks_hostif.c 	req->rate_set.size = priv->reg.rate_set.size;
req              1353 drivers/staging/ks7010/ks_hostif.c 	req->capability = ks_wlan_cap(priv);
req              1354 drivers/staging/ks7010/ks_hostif.c 	memcpy(&req->rate_set.body[0], &priv->reg.rate_set.body[0],
req              1122 drivers/staging/ks7010/ks_wlan_net.c 	struct iw_scan_req *req = NULL;
req              1131 drivers/staging/ks7010/ks_wlan_net.c 		req = (struct iw_scan_req *)extra;
req              1132 drivers/staging/ks7010/ks_wlan_net.c 		priv->scan_ssid_len = req->essid_len;
req              1133 drivers/staging/ks7010/ks_wlan_net.c 		memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
req               159 drivers/staging/media/hantro/hantro_drv.c 	v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
req               168 drivers/staging/media/hantro/hantro_drv.c 	v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
req               656 drivers/staging/media/hantro/hantro_v4l2.c 		v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
req               686 drivers/staging/media/hantro/hantro_v4l2.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_handler);
req               154 drivers/staging/media/sunxi/cedrus/cedrus.c static int cedrus_request_validate(struct media_request *req)
req               163 drivers/staging/media/sunxi/cedrus/cedrus.c 	list_for_each_entry(obj, &req->objects, list) {
req               177 drivers/staging/media/sunxi/cedrus/cedrus.c 	count = vb2_request_buffer_cnt(req);
req               190 drivers/staging/media/sunxi/cedrus/cedrus.c 	hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
req               212 drivers/staging/media/sunxi/cedrus/cedrus.c 	return vb2_request_validate(req);
req                36 drivers/staging/media/sunxi/cedrus/cedrus_dec.c 	src_req = run.src->vb2_buf.req_obj.req;
req               408 drivers/staging/media/sunxi/cedrus/cedrus_video.c 		v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
req               494 drivers/staging/media/sunxi/cedrus/cedrus_video.c 	v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
req              1124 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		struct iw_scan_req *req = (struct iw_scan_req *)extra;
req              1127 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			int len = min_t(int, req->essid_len,
req              1130 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			memcpy(ssid[0].ssid, req->essid, len);
req              1133 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
req              1140 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
req               406 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 		struct iw_scan_req *req = (struct iw_scan_req *)b;
req               408 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 		if (req->essid_len) {
req               409 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 			ieee->current_network.ssid_len = req->essid_len;
req               410 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 			memcpy(ieee->current_network.ssid, req->essid,
req               411 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 			       req->essid_len);
req               221 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct rtllib_hdr_3addr *req = NULL;
req               242 drivers/staging/rtl8192e/rtl819x_BAProc.c 	req = (struct rtllib_hdr_3addr *) skb->data;
req               243 drivers/staging/rtl8192e/rtl819x_BAProc.c 	tag = (u8 *)req;
req               244 drivers/staging/rtl8192e/rtl819x_BAProc.c 	dst = (u8 *)(&req->addr2[0]);
req               249 drivers/staging/rtl8192e/rtl819x_BAProc.c 	pBaStartSeqCtrl = (union sequence_control *)(req + 7);
req               189 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		struct aead_request *req;
req               196 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		req = aead_request_alloc(key->tfm, GFP_ATOMIC);
req               197 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		if (!req)
req               208 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_set_callback(req, 0, NULL, NULL);
req               209 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_set_ad(req, aad_len);
req               210 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_set_crypt(req, sg, sg, data_len, iv);
req               212 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		ret = crypto_aead_encrypt(req);
req               213 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_free(req);
req               274 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		struct aead_request *req;
req               280 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		req = aead_request_alloc(key->tfm, GFP_ATOMIC);
req               281 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		if(!req)
req               290 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_set_callback(req, 0, NULL, NULL);
req               291 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_set_ad(req, aad_len);
req               292 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_set_crypt(req, sg, sg, data_len, iv);
req               294 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		ret = crypto_aead_decrypt(req);
req               295 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		aead_request_free(req);
req               334 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
req               347 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
req               348 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               349 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
req               350 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		ret = crypto_skcipher_encrypt(req);
req               351 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_zero(req);
req               417 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
req               445 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
req               446 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               447 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
req               448 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		err = crypto_skcipher_decrypt(req);
req               449 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		skcipher_request_zero(req);
req               134 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
req               146 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_set_sync_tfm(req, wep->tx_tfm);
req               147 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               148 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
req               149 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		err = crypto_skcipher_encrypt(req);
req               150 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_zero(req);
req               198 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
req               202 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_set_sync_tfm(req, wep->rx_tfm);
req               203 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               204 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
req               205 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		err = crypto_skcipher_decrypt(req);
req               206 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		skcipher_request_zero(req);
req               337 drivers/staging/rtl8192e/rtllib_softmac.c 	struct rtllib_probe_request *req;
req               351 drivers/staging/rtl8192e/rtllib_softmac.c 	req = skb_put(skb, sizeof(struct rtllib_probe_request));
req               352 drivers/staging/rtl8192e/rtllib_softmac.c 	req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
req               353 drivers/staging/rtl8192e/rtllib_softmac.c 	req->header.duration_id = 0;
req               355 drivers/staging/rtl8192e/rtllib_softmac.c 	eth_broadcast_addr(req->header.addr1);
req               356 drivers/staging/rtl8192e/rtllib_softmac.c 	ether_addr_copy(req->header.addr2, ieee->dev->dev_addr);
req               357 drivers/staging/rtl8192e/rtllib_softmac.c 	eth_broadcast_addr(req->header.addr3);
req               196 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		struct aead_request *req;
req               203 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		req = aead_request_alloc(key->tfm, GFP_ATOMIC);
req               204 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		if (!req)
req               216 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_set_callback(req, 0, NULL, NULL);
req               217 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_set_ad(req, aad_len);
req               218 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_set_crypt(req, sg, sg, data_len, iv);
req               220 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		ret = crypto_aead_encrypt(req);
req               221 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_free(req);
req               283 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		struct aead_request *req;
req               290 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		req = aead_request_alloc(key->tfm, GFP_ATOMIC);
req               291 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		if (!req)
req               300 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_set_callback(req, 0, NULL, NULL);
req               301 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_set_ad(req, aad_len);
req               302 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_set_crypt(req, sg, sg, data_len, iv);
req               304 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		ret = crypto_aead_decrypt(req);
req               305 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		aead_request_free(req);
req               337 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
req               347 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
req               348 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               349 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
req               350 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		ret = crypto_skcipher_encrypt(req);
req               351 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_zero(req);
req               415 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
req               440 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
req               441 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               442 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
req               444 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		err = crypto_skcipher_decrypt(req);
req               445 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		skcipher_request_zero(req);
req               127 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
req               140 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_set_sync_tfm(req, wep->tx_tfm);
req               141 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               142 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
req               144 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		err = crypto_skcipher_encrypt(req);
req               145 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_zero(req);
req               192 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
req               197 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_set_sync_tfm(req, wep->rx_tfm);
req               198 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               199 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
req               201 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		err = crypto_skcipher_decrypt(req);
req               202 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		skcipher_request_zero(req);
req               315 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct ieee80211_probe_request *req;
req               328 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	req = skb_put(skb, sizeof(struct ieee80211_probe_request));
req               329 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
req               330 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	req->header.duration_id = 0; /* FIXME: is this OK? */
req               332 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	eth_broadcast_addr(req->header.addr1);
req               333 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
req               334 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	eth_broadcast_addr(req->header.addr3);
req               317 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct rtl_80211_hdr_3addr *req = NULL;
req               336 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	req = (struct rtl_80211_hdr_3addr *)skb->data;
req               337 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	tag = (u8 *)req;
req               338 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	dst = &req->addr2[0];
req               343 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	pBaStartSeqCtrl = (union sequence_control *)(req + 7);
req               333 drivers/staging/rtl8192u/r8192U_wx.c 		struct iw_scan_req *req = (struct iw_scan_req *)b;
req               335 drivers/staging/rtl8192u/r8192U_wx.c 		if (req->essid_len) {
req               336 drivers/staging/rtl8192u/r8192U_wx.c 			ieee->current_network.ssid_len = req->essid_len;
req               337 drivers/staging/rtl8192u/r8192U_wx.c 			memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
req              1152 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		struct iw_scan_req *req = (struct iw_scan_req *)extra;
req              1157 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 			u32 len = min_t(u8, req->essid_len, IW_ESSID_MAX_SIZE);
req              1161 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 			memcpy(ssid.Ssid, req->essid, len);
req              1311 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		struct iw_scan_req *req = (struct iw_scan_req *)extra;
req              1314 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
req              1316 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			memcpy(ssid[0].Ssid, req->essid, len);
req              1319 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			DBG_871X("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
req              1327 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
req               544 drivers/staging/wlan-ng/p80211netdev.c 	struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
req               548 drivers/staging/wlan-ng/p80211netdev.c 	netdev_dbg(dev, "rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len);
req               559 drivers/staging/wlan-ng/p80211netdev.c 	if (req->magic != P80211_IOCTL_MAGIC) {
req               573 drivers/staging/wlan-ng/p80211netdev.c 	msgbuf = kmalloc(req->len, GFP_KERNEL);
req               575 drivers/staging/wlan-ng/p80211netdev.c 		if (copy_from_user(msgbuf, (void __user *)req->data, req->len))
req               582 drivers/staging/wlan-ng/p80211netdev.c 			    ((void __user *)req->data, msgbuf, req->len)) {
req               176 drivers/staging/wlan-ng/p80211req.c 		struct p80211msg_lnxreq_hostwep *req =
req               180 drivers/staging/wlan-ng/p80211req.c 		if (req->decrypt.data == P80211ENUM_truth_true)
req               182 drivers/staging/wlan-ng/p80211req.c 		if (req->encrypt.data == P80211ENUM_truth_true)
req               386 drivers/staging/wlan-ng/prism2mgmt.c 	struct p80211msg_dot11req_scan_results *req;
req               392 drivers/staging/wlan-ng/prism2mgmt.c 	req = msgp;
req               394 drivers/staging/wlan-ng/prism2mgmt.c 	req->resultcode.status = P80211ENUM_msgitem_status_data_ok;
req               400 drivers/staging/wlan-ng/prism2mgmt.c 		req->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
req               408 drivers/staging/wlan-ng/prism2mgmt.c 	if (req->bssindex.data >= count) {
req               411 drivers/staging/wlan-ng/prism2mgmt.c 			   req->bssindex.data, count);
req               413 drivers/staging/wlan-ng/prism2mgmt.c 		req->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
req               417 drivers/staging/wlan-ng/prism2mgmt.c 	item = &hw->scanresults->info.hscanresult.result[req->bssindex.data];
req               419 drivers/staging/wlan-ng/prism2mgmt.c 	req->signal.status = P80211ENUM_msgitem_status_data_ok;
req               420 drivers/staging/wlan-ng/prism2mgmt.c 	req->noise.status = P80211ENUM_msgitem_status_data_ok;
req               421 drivers/staging/wlan-ng/prism2mgmt.c 	req->signal.data = le16_to_cpu(item->sl);
req               422 drivers/staging/wlan-ng/prism2mgmt.c 	req->noise.data = le16_to_cpu(item->anl);
req               425 drivers/staging/wlan-ng/prism2mgmt.c 	req->bssid.status = P80211ENUM_msgitem_status_data_ok;
req               426 drivers/staging/wlan-ng/prism2mgmt.c 	req->bssid.data.len = WLAN_BSSID_LEN;
req               427 drivers/staging/wlan-ng/prism2mgmt.c 	memcpy(req->bssid.data.data, item->bssid, WLAN_BSSID_LEN);
req               430 drivers/staging/wlan-ng/prism2mgmt.c 	req->ssid.status = P80211ENUM_msgitem_status_data_ok;
req               431 drivers/staging/wlan-ng/prism2mgmt.c 	req->ssid.data.len = le16_to_cpu(item->ssid.len);
req               432 drivers/staging/wlan-ng/prism2mgmt.c 	req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
req               433 drivers/staging/wlan-ng/prism2mgmt.c 	memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
req               444 drivers/staging/wlan-ng/prism2mgmt.c 			req->basicrate ## N .data = item->supprates[(N) - 1]; \
req               445 drivers/staging/wlan-ng/prism2mgmt.c 			req->basicrate ## N .status = \
req               462 drivers/staging/wlan-ng/prism2mgmt.c 			req->supprate ## N .data = item->supprates[(N) - 1]; \
req               463 drivers/staging/wlan-ng/prism2mgmt.c 			req->supprate ## N .status = \
req               478 drivers/staging/wlan-ng/prism2mgmt.c 	req->beaconperiod.status = P80211ENUM_msgitem_status_data_ok;
req               479 drivers/staging/wlan-ng/prism2mgmt.c 	req->beaconperiod.data = le16_to_cpu(item->bcnint);
req               482 drivers/staging/wlan-ng/prism2mgmt.c 	req->timestamp.status = P80211ENUM_msgitem_status_data_ok;
req               483 drivers/staging/wlan-ng/prism2mgmt.c 	req->timestamp.data = jiffies;
req               484 drivers/staging/wlan-ng/prism2mgmt.c 	req->localtime.status = P80211ENUM_msgitem_status_data_ok;
req               485 drivers/staging/wlan-ng/prism2mgmt.c 	req->localtime.data = jiffies;
req               488 drivers/staging/wlan-ng/prism2mgmt.c 	req->ibssatimwindow.status = P80211ENUM_msgitem_status_data_ok;
req               489 drivers/staging/wlan-ng/prism2mgmt.c 	req->ibssatimwindow.data = le16_to_cpu(item->atim);
req               492 drivers/staging/wlan-ng/prism2mgmt.c 	req->dschannel.status = P80211ENUM_msgitem_status_data_ok;
req               493 drivers/staging/wlan-ng/prism2mgmt.c 	req->dschannel.data = le16_to_cpu(item->chid);
req               497 drivers/staging/wlan-ng/prism2mgmt.c 	req->capinfo.status = P80211ENUM_msgitem_status_data_ok;
req               498 drivers/staging/wlan-ng/prism2mgmt.c 	req->capinfo.data = count;
req               501 drivers/staging/wlan-ng/prism2mgmt.c 	req->privacy.status = P80211ENUM_msgitem_status_data_ok;
req               502 drivers/staging/wlan-ng/prism2mgmt.c 	req->privacy.data = WLAN_GET_MGMT_CAP_INFO_PRIVACY(count);
req               505 drivers/staging/wlan-ng/prism2mgmt.c 	req->cfpollable.status = P80211ENUM_msgitem_status_data_ok;
req               506 drivers/staging/wlan-ng/prism2mgmt.c 	req->cfpollable.data = WLAN_GET_MGMT_CAP_INFO_CFPOLLABLE(count);
req               509 drivers/staging/wlan-ng/prism2mgmt.c 	req->cfpollreq.status = P80211ENUM_msgitem_status_data_ok;
req               510 drivers/staging/wlan-ng/prism2mgmt.c 	req->cfpollreq.data = WLAN_GET_MGMT_CAP_INFO_CFPOLLREQ(count);
req               513 drivers/staging/wlan-ng/prism2mgmt.c 	req->bsstype.status = P80211ENUM_msgitem_status_data_ok;
req               514 drivers/staging/wlan-ng/prism2mgmt.c 	req->bsstype.data = (WLAN_GET_MGMT_CAP_INFO_ESS(count)) ?
req               518 drivers/staging/wlan-ng/prism2mgmt.c 	req->resultcode.data = P80211ENUM_resultcode_success;
req               651 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_abort_req *req = cplhdr(skb);
req               654 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->cmd = CPL_ABORT_NO_RST;
req              1128 drivers/target/iscsi/cxgbit/cxgbit_cm.c cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
req              1155 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		      req->tcpopt.tstamp,
req              1181 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (req->tcpopt.tstamp)
req              1183 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (req->tcpopt.sack)
req              1188 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	hlen = ntohl(req->hdr_len);
req              1191 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		tcph = (struct tcphdr *)((u8 *)(req + 1) +
req              1194 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		tcph = (struct tcphdr *)((u8 *)(req + 1) +
req              1220 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
req              1221 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
req              1223 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	unsigned int tid = GET_TID(req);
req              1224 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	u16 peer_mss = ntohs(req->tcpopt.mss);
req              1256 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
req              1270 drivers/target/iscsi/cxgbit/cxgbit_cm.c 				      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
req              1280 drivers/target/iscsi/cxgbit/cxgbit_cm.c 				       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
req              1312 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		sizeof(struct tcphdr) +	(req->tcpopt.tstamp ? 12 : 0);
req              1319 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
req              1379 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_pass_accept_rpl(csk, req);
req              1491 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_set_tcb_field *req;
req              1494 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	unsigned int len = roundup(sizeof(*req), 16);
req              1502 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req = __skb_put_zero(skb, len);
req              1504 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	INIT_TP_WR(req, csk->tid);
req              1505 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req              1506 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req              1507 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->word_cookie = htons(0);
req              1508 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->mask = cpu_to_be64(0x3 << 4);
req              1509 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
req              1530 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_set_tcb_field *req;
req              1531 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	unsigned int len = roundup(sizeof(*req), 16);
req              1538 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req = __skb_put_zero(skb, len);
req              1540 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	INIT_TP_WR(req, csk->tid);
req              1541 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req              1542 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req              1543 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->word_cookie = htons(0);
req              1544 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->mask = cpu_to_be64(0x3 << 8);
req              1545 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req->val = cpu_to_be64(pg_idx << 8);
req              1609 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_pass_establish *req = cplhdr(skb);
req              1611 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	unsigned int tid = GET_TID(req);
req              1614 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	u16 tcp_opt = be16_to_cpu(req->tcp_opt);
req              1615 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	u32 snd_isn = be32_to_cpu(req->snd_isn);
req              1616 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
req                67 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	struct ulp_mem_io *req;
req                79 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req = __skb_put(skb, wr_len);
req                80 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	INIT_ULPTX_WR(req, wr_len, 0, tid);
req                81 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
req                83 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
req                86 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
req                87 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
req                88 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
req                90 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	idata = (struct ulptx_idata *)(req + 1);
req               105 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	struct ulp_mem_io *req;
req               114 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req = (struct ulp_mem_io *)skb->data;
req               115 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	idata = (struct ulptx_idata *)(req + 1);
req               163 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct fw_ofld_tx_data_wr *req;
req               167 drivers/target/iscsi/cxgbit/cxgbit_target.c 	u32 hdr_size = sizeof(*req);
req               183 drivers/target/iscsi/cxgbit/cxgbit_target.c 	req = __skb_push(skb, hdr_size);
req               184 drivers/target/iscsi/cxgbit/cxgbit_target.c 	req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
req               187 drivers/target/iscsi/cxgbit/cxgbit_target.c 	req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
req               189 drivers/target/iscsi/cxgbit/cxgbit_target.c 	req->plen = htonl(len);
req               193 drivers/target/iscsi/cxgbit/cxgbit_target.c 	req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
req               270 drivers/target/iscsi/cxgbit/cxgbit_target.c 			struct cpl_close_con_req *req =
req               272 drivers/target/iscsi/cxgbit/cxgbit_target.c 			req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
req              1331 drivers/target/iscsi/cxgbit/cxgbit_target.c 	login_req = (struct iscsi_login_req *)login->req;
req              1343 drivers/target/iscsi/cxgbit/cxgbit_target.c 		login_req = (struct iscsi_login_req *)login->req;
req              1036 drivers/target/iscsi/iscsi_target_login.c 	if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
req              1039 drivers/target/iscsi/iscsi_target_login.c 	login_req = (struct iscsi_login_req *)login->req;
req              1052 drivers/target/iscsi/iscsi_target_login.c 		login_req = (struct iscsi_login_req *)login->req;
req              1316 drivers/target/iscsi/iscsi_target_login.c 	buffer = &login->req[0];
req               175 drivers/target/iscsi/iscsi_target_nego.c 	login_req = (struct iscsi_login_req *) login->req;
req               752 drivers/target/iscsi/iscsi_target_nego.c 	login_req = (struct iscsi_login_req *) login->req;
req               811 drivers/target/iscsi/iscsi_target_nego.c 	login_req = (struct iscsi_login_req *) login->req;
req               899 drivers/target/iscsi/iscsi_target_nego.c 	login_req = (struct iscsi_login_req *) login->req;
req               960 drivers/target/iscsi/iscsi_target_nego.c 	login_req = (struct iscsi_login_req *) login->req;
req              1058 drivers/target/iscsi/iscsi_target_nego.c 	login_req = (struct iscsi_login_req *) login->req;
req                53 drivers/target/sbp/sbp_target.c static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
req                58 drivers/target/sbp/sbp_target.c 	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
req                59 drivers/target/sbp/sbp_target.c 			req->node_addr, req->generation, req->speed,
req                65 drivers/target/sbp/sbp_target.c 	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
req                66 drivers/target/sbp/sbp_target.c 			req->node_addr, req->generation, req->speed,
req               259 drivers/target/sbp/sbp_target.c 	struct sbp_management_agent *agent, struct sbp_management_request *req,
req               272 drivers/target/sbp/sbp_target.c 			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
req               275 drivers/target/sbp/sbp_target.c 			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
req               277 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               283 drivers/target/sbp/sbp_target.c 	ret = read_peer_guid(&guid, req);
req               287 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               325 drivers/target/sbp/sbp_target.c 	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
req               329 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               342 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               356 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               374 drivers/target/sbp/sbp_target.c 			req->status.status = cpu_to_be32(
req               381 drivers/target/sbp/sbp_target.c 		sess->node_id = req->node_addr;
req               382 drivers/target/sbp/sbp_target.c 		sess->card = fw_card_get(req->card);
req               383 drivers/target/sbp/sbp_target.c 		sess->generation = req->generation;
req               384 drivers/target/sbp/sbp_target.c 		sess->speed = req->speed;
req               392 drivers/target/sbp/sbp_target.c 		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
req               401 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               409 drivers/target/sbp/sbp_target.c 	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
req               410 drivers/target/sbp/sbp_target.c 	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
req               421 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               438 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               445 drivers/target/sbp/sbp_target.c 			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
req               456 drivers/target/sbp/sbp_target.c 		sbp2_pointer_to_addr(&req->orb.ptr2), response,
req               464 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               472 drivers/target/sbp/sbp_target.c 	req->status.status = cpu_to_be32(
req               478 drivers/target/sbp/sbp_target.c 	struct sbp_management_agent *agent, struct sbp_management_request *req,
req               484 drivers/target/sbp/sbp_target.c 	req->status.status = cpu_to_be32(
req               490 drivers/target/sbp/sbp_target.c 	struct sbp_management_agent *agent, struct sbp_management_request *req,
req               499 drivers/target/sbp/sbp_target.c 	ret = read_peer_guid(&guid, req);
req               503 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               512 drivers/target/sbp/sbp_target.c 		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
req               517 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               526 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               537 drivers/target/sbp/sbp_target.c 	login->sess->generation = req->generation;
req               538 drivers/target/sbp/sbp_target.c 	login->sess->node_id = req->node_addr;
req               539 drivers/target/sbp/sbp_target.c 	login->sess->card = fw_card_get(req->card);
req               540 drivers/target/sbp/sbp_target.c 	login->sess->speed = req->speed;
req               543 drivers/target/sbp/sbp_target.c 	req->status.status = cpu_to_be32(
req               549 drivers/target/sbp/sbp_target.c 	struct sbp_management_agent *agent, struct sbp_management_request *req,
req               557 drivers/target/sbp/sbp_target.c 	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
req               563 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               572 drivers/target/sbp/sbp_target.c 	if (req->node_addr != login->sess->node_id) {
req               575 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req               583 drivers/target/sbp/sbp_target.c 	req->status.status = cpu_to_be32(
req               856 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req =
req               860 drivers/target/sbp/sbp_target.c 			req->orb_pointer,
req               861 drivers/target/sbp/sbp_target.c 			sbp2_pointer_to_addr(&req->orb.next_orb),
req               862 drivers/target/sbp/sbp_target.c 			sbp2_pointer_to_addr(&req->orb.data_descriptor),
req               863 drivers/target/sbp/sbp_target.c 			be32_to_cpu(req->orb.misc));
req               865 drivers/target/sbp/sbp_target.c 	if (req->orb_pointer >> 32)
req               868 drivers/target/sbp/sbp_target.c 	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
req               870 drivers/target/sbp/sbp_target.c 			sbp_handle_command(req);
req               874 drivers/target/sbp/sbp_target.c 			req->status.status |= cpu_to_be32(
req               881 drivers/target/sbp/sbp_target.c 			sbp_send_status(req);
req               884 drivers/target/sbp/sbp_target.c 			req->status.status |= cpu_to_be32(
req               891 drivers/target/sbp/sbp_target.c 			sbp_send_status(req);
req               914 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req;
req               921 drivers/target/sbp/sbp_target.c 	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
req               922 drivers/target/sbp/sbp_target.c 	memset(req, 0, sizeof(*req));
req               923 drivers/target/sbp/sbp_target.c 	req->se_cmd.map_tag = tag;
req               924 drivers/target/sbp/sbp_target.c 	req->se_cmd.map_cpu = cpu;
req               925 drivers/target/sbp/sbp_target.c 	req->se_cmd.tag = next_orb;
req               927 drivers/target/sbp/sbp_target.c 	return req;
req               935 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req;
req               941 drivers/target/sbp/sbp_target.c 		req = sbp_mgt_get_req(sess, sess->card, next_orb);
req               942 drivers/target/sbp/sbp_target.c 		if (IS_ERR(req)) {
req               949 drivers/target/sbp/sbp_target.c 		req->login = agent->login;
req               950 drivers/target/sbp/sbp_target.c 		req->orb_pointer = next_orb;
req               952 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
req               953 drivers/target/sbp/sbp_target.c 					req->orb_pointer >> 32));
req               954 drivers/target/sbp/sbp_target.c 		req->status.orb_low = cpu_to_be32(
req               955 drivers/target/sbp/sbp_target.c 				req->orb_pointer & 0xfffffffc);
req               960 drivers/target/sbp/sbp_target.c 				req->orb_pointer, &req->orb, sizeof(req->orb));
req               963 drivers/target/sbp/sbp_target.c 			req->status.status |= cpu_to_be32(
req               976 drivers/target/sbp/sbp_target.c 			sbp_send_status(req);
req               981 drivers/target/sbp/sbp_target.c 		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
req               983 drivers/target/sbp/sbp_target.c 			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
req               986 drivers/target/sbp/sbp_target.c 			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
req               987 drivers/target/sbp/sbp_target.c 			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
req               992 drivers/target/sbp/sbp_target.c 			INIT_WORK(&req->work, tgt_agent_process_work);
req               993 drivers/target/sbp/sbp_target.c 			queue_work(system_unbound_wq, &req->work);
req               996 drivers/target/sbp/sbp_target.c 			sbp_free_request(req);
req              1085 drivers/target/sbp/sbp_target.c static int sbp_run_request_transaction(struct sbp_target_request *req,
req              1089 drivers/target/sbp/sbp_target.c 	struct sbp_login_descriptor *login = req->login;
req              1109 drivers/target/sbp/sbp_target.c static int sbp_fetch_command(struct sbp_target_request *req)
req              1113 drivers/target/sbp/sbp_target.c 	cmd_len = scsi_command_size(req->orb.command_block);
req              1115 drivers/target/sbp/sbp_target.c 	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
req              1116 drivers/target/sbp/sbp_target.c 	if (!req->cmd_buf)
req              1119 drivers/target/sbp/sbp_target.c 	memcpy(req->cmd_buf, req->orb.command_block,
req              1120 drivers/target/sbp/sbp_target.c 		min_t(int, cmd_len, sizeof(req->orb.command_block)));
req              1122 drivers/target/sbp/sbp_target.c 	if (cmd_len > sizeof(req->orb.command_block)) {
req              1124 drivers/target/sbp/sbp_target.c 		copy_len = cmd_len - sizeof(req->orb.command_block);
req              1126 drivers/target/sbp/sbp_target.c 		ret = sbp_run_request_transaction(req,
req              1128 drivers/target/sbp/sbp_target.c 				req->orb_pointer + sizeof(req->orb),
req              1129 drivers/target/sbp/sbp_target.c 				req->cmd_buf + sizeof(req->orb.command_block),
req              1138 drivers/target/sbp/sbp_target.c static int sbp_fetch_page_table(struct sbp_target_request *req)
req              1143 drivers/target/sbp/sbp_target.c 	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
req              1146 drivers/target/sbp/sbp_target.c 	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
req              1153 drivers/target/sbp/sbp_target.c 	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
req              1154 drivers/target/sbp/sbp_target.c 			sbp2_pointer_to_addr(&req->orb.data_descriptor),
req              1161 drivers/target/sbp/sbp_target.c 	req->pg_tbl = pg_tbl;
req              1165 drivers/target/sbp/sbp_target.c static void sbp_calc_data_length_direction(struct sbp_target_request *req,
req              1170 drivers/target/sbp/sbp_target.c 	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
req              1171 drivers/target/sbp/sbp_target.c 	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
req              1181 drivers/target/sbp/sbp_target.c 	if (req->pg_tbl) {
req              1185 drivers/target/sbp/sbp_target.c 					req->pg_tbl[idx].segment_length);
req              1192 drivers/target/sbp/sbp_target.c static void sbp_handle_command(struct sbp_target_request *req)
req              1194 drivers/target/sbp/sbp_target.c 	struct sbp_login_descriptor *login = req->login;
req              1200 drivers/target/sbp/sbp_target.c 	ret = sbp_fetch_command(req);
req              1206 drivers/target/sbp/sbp_target.c 	ret = sbp_fetch_page_table(req);
req              1213 drivers/target/sbp/sbp_target.c 	unpacked_lun = req->login->login_lun;
req              1214 drivers/target/sbp/sbp_target.c 	sbp_calc_data_length_direction(req, &data_length, &data_dir);
req              1217 drivers/target/sbp/sbp_target.c 			req->orb_pointer, unpacked_lun, data_length, data_dir);
req              1220 drivers/target/sbp/sbp_target.c 	req->se_cmd.tag = req->orb_pointer;
req              1221 drivers/target/sbp/sbp_target.c 	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req              1222 drivers/target/sbp/sbp_target.c 			      req->sense_buf, unpacked_lun, data_length,
req              1229 drivers/target/sbp/sbp_target.c 	req->status.status |= cpu_to_be32(
req              1234 drivers/target/sbp/sbp_target.c 	sbp_send_status(req);
req              1241 drivers/target/sbp/sbp_target.c static int sbp_rw_data(struct sbp_target_request *req)
req              1243 drivers/target/sbp/sbp_target.c 	struct sbp_session *sess = req->login->sess;
req              1252 drivers/target/sbp/sbp_target.c 	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
req              1260 drivers/target/sbp/sbp_target.c 	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
req              1261 drivers/target/sbp/sbp_target.c 	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
req              1263 drivers/target/sbp/sbp_target.c 	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
req              1275 drivers/target/sbp/sbp_target.c 	if (req->pg_tbl) {
req              1276 drivers/target/sbp/sbp_target.c 		pte = req->pg_tbl;
req              1277 drivers/target/sbp/sbp_target.c 		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
req              1285 drivers/target/sbp/sbp_target.c 		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
req              1286 drivers/target/sbp/sbp_target.c 		length = req->se_cmd.data_length;
req              1289 drivers/target/sbp/sbp_target.c 	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
req              1331 drivers/target/sbp/sbp_target.c static int sbp_send_status(struct sbp_target_request *req)
req              1334 drivers/target/sbp/sbp_target.c 	struct sbp_login_descriptor *login = req->login;
req              1336 drivers/target/sbp/sbp_target.c 	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
req              1338 drivers/target/sbp/sbp_target.c 	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
req              1339 drivers/target/sbp/sbp_target.c 			login->status_fifo_addr, &req->status, length);
req              1347 drivers/target/sbp/sbp_target.c 			req->orb_pointer);
req              1354 drivers/target/sbp/sbp_target.c 	target_put_sess_cmd(&req->se_cmd);
req              1358 drivers/target/sbp/sbp_target.c static void sbp_sense_mangle(struct sbp_target_request *req)
req              1360 drivers/target/sbp/sbp_target.c 	struct se_cmd *se_cmd = &req->se_cmd;
req              1361 drivers/target/sbp/sbp_target.c 	u8 *sense = req->sense_buf;
req              1362 drivers/target/sbp/sbp_target.c 	u8 *status = req->status.data;
req              1382 drivers/target/sbp/sbp_target.c 		req->status.status |= cpu_to_be32(
req              1418 drivers/target/sbp/sbp_target.c 	req->status.status |= cpu_to_be32(
req              1425 drivers/target/sbp/sbp_target.c static int sbp_send_sense(struct sbp_target_request *req)
req              1427 drivers/target/sbp/sbp_target.c 	struct se_cmd *se_cmd = &req->se_cmd;
req              1430 drivers/target/sbp/sbp_target.c 		sbp_sense_mangle(req);
req              1432 drivers/target/sbp/sbp_target.c 		req->status.status |= cpu_to_be32(
req              1439 drivers/target/sbp/sbp_target.c 	return sbp_send_status(req);
req              1442 drivers/target/sbp/sbp_target.c static void sbp_free_request(struct sbp_target_request *req)
req              1444 drivers/target/sbp/sbp_target.c 	struct se_cmd *se_cmd = &req->se_cmd;
req              1447 drivers/target/sbp/sbp_target.c 	kfree(req->pg_tbl);
req              1448 drivers/target/sbp/sbp_target.c 	kfree(req->cmd_buf);
req              1457 drivers/target/sbp/sbp_target.c 	struct sbp_management_request *req = agent->request;
req              1462 drivers/target/sbp/sbp_target.c 	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
req              1463 drivers/target/sbp/sbp_target.c 		req->node_addr, req->generation, req->speed,
req              1464 drivers/target/sbp/sbp_target.c 		agent->orb_offset, &req->orb, sizeof(req->orb));
req              1471 drivers/target/sbp/sbp_target.c 		sbp2_pointer_to_addr(&req->orb.ptr1),
req              1472 drivers/target/sbp/sbp_target.c 		sbp2_pointer_to_addr(&req->orb.ptr2),
req              1473 drivers/target/sbp/sbp_target.c 		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
req              1474 drivers/target/sbp/sbp_target.c 		sbp2_pointer_to_addr(&req->orb.status_fifo));
req              1476 drivers/target/sbp/sbp_target.c 	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
req              1477 drivers/target/sbp/sbp_target.c 		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
req              1482 drivers/target/sbp/sbp_target.c 	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
req              1484 drivers/target/sbp/sbp_target.c 		sbp_management_request_login(agent, req, &status_data_len);
req              1488 drivers/target/sbp/sbp_target.c 		sbp_management_request_query_logins(agent, req,
req              1493 drivers/target/sbp/sbp_target.c 		sbp_management_request_reconnect(agent, req, &status_data_len);
req              1499 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req              1506 drivers/target/sbp/sbp_target.c 		sbp_management_request_logout(agent, req, &status_data_len);
req              1512 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req              1521 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req              1530 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req              1539 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req              1547 drivers/target/sbp/sbp_target.c 			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
req              1549 drivers/target/sbp/sbp_target.c 		req->status.status = cpu_to_be32(
req              1556 drivers/target/sbp/sbp_target.c 	req->status.status |= cpu_to_be32(
req              1560 drivers/target/sbp/sbp_target.c 	req->status.orb_low = cpu_to_be32(agent->orb_offset);
req              1563 drivers/target/sbp/sbp_target.c 	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
req              1564 drivers/target/sbp/sbp_target.c 		req->node_addr, req->generation, req->speed,
req              1565 drivers/target/sbp/sbp_target.c 		sbp2_pointer_to_addr(&req->orb.status_fifo),
req              1566 drivers/target/sbp/sbp_target.c 		&req->status, 8 + status_data_len);
req              1573 drivers/target/sbp/sbp_target.c 	fw_card_put(req->card);
req              1574 drivers/target/sbp/sbp_target.c 	kfree(req);
req              1597 drivers/target/sbp/sbp_target.c 		struct sbp_management_request *req;
req              1610 drivers/target/sbp/sbp_target.c 		req = kzalloc(sizeof(*req), GFP_ATOMIC);
req              1611 drivers/target/sbp/sbp_target.c 		if (!req) {
req              1616 drivers/target/sbp/sbp_target.c 		req->card = fw_card_get(card);
req              1617 drivers/target/sbp/sbp_target.c 		req->generation = generation;
req              1618 drivers/target/sbp/sbp_target.c 		req->node_addr = source;
req              1619 drivers/target/sbp/sbp_target.c 		req->speed = fw_get_request_speed(request);
req              1622 drivers/target/sbp/sbp_target.c 		agent->request = req;
req              1705 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req = container_of(se_cmd,
req              1708 drivers/target/sbp/sbp_target.c 	sbp_free_request(req);
req              1718 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req = container_of(se_cmd,
req              1722 drivers/target/sbp/sbp_target.c 	ret = sbp_rw_data(req);
req              1724 drivers/target/sbp/sbp_target.c 		req->status.status |= cpu_to_be32(
req              1731 drivers/target/sbp/sbp_target.c 		sbp_send_status(req);
req              1751 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req = container_of(se_cmd,
req              1755 drivers/target/sbp/sbp_target.c 	ret = sbp_rw_data(req);
req              1757 drivers/target/sbp/sbp_target.c 		req->status.status |= cpu_to_be32(
req              1762 drivers/target/sbp/sbp_target.c 		sbp_send_status(req);
req              1766 drivers/target/sbp/sbp_target.c 	return sbp_send_sense(req);
req              1775 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req = container_of(se_cmd,
req              1778 drivers/target/sbp/sbp_target.c 	return sbp_send_sense(req);
req              1792 drivers/target/sbp/sbp_target.c 	struct sbp_target_request *req = container_of(se_cmd,
req              1795 drivers/target/sbp/sbp_target.c 	return transport_generic_free_cmd(&req->se_cmd, 0);
req               847 drivers/target/target_core_pscsi.c 		struct request *req)
req               914 drivers/target/target_core_pscsi.c 				rc = blk_rq_append_bio(req, &bio);
req               933 drivers/target/target_core_pscsi.c 		rc = blk_rq_append_bio(req, &bio);
req               961 drivers/target/target_core_pscsi.c 	struct request *req;
req               977 drivers/target/target_core_pscsi.c 	req = blk_get_request(pdv->pdv_sd->request_queue,
req               980 drivers/target/target_core_pscsi.c 	if (IS_ERR(req)) {
req               987 drivers/target/target_core_pscsi.c 		ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
req               992 drivers/target/target_core_pscsi.c 	req->end_io = pscsi_req_done;
req               993 drivers/target/target_core_pscsi.c 	req->end_io_data = cmd;
req               994 drivers/target/target_core_pscsi.c 	scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
req               995 drivers/target/target_core_pscsi.c 	scsi_req(req)->cmd = &pt->pscsi_cdb[0];
req               998 drivers/target/target_core_pscsi.c 		req->timeout = PS_TIMEOUT_DISK;
req              1000 drivers/target/target_core_pscsi.c 		req->timeout = PS_TIMEOUT_OTHER;
req              1001 drivers/target/target_core_pscsi.c 	scsi_req(req)->retries = PS_RETRY;
req              1003 drivers/target/target_core_pscsi.c 	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
req              1010 drivers/target/target_core_pscsi.c 	blk_put_request(req);
req              1038 drivers/target/target_core_pscsi.c static void pscsi_req_done(struct request *req, blk_status_t status)
req              1040 drivers/target/target_core_pscsi.c 	struct se_cmd *cmd = req->end_io_data;
req              1042 drivers/target/target_core_pscsi.c 	int result = scsi_req(req)->result;
req              1051 drivers/target/target_core_pscsi.c 	pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense);
req              1056 drivers/target/target_core_pscsi.c 			cmd->data_length - scsi_req(req)->resid_len);
req              1066 drivers/target/target_core_pscsi.c 	blk_put_request(req);
req               866 drivers/target/target_core_user.c 	return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
req              1044 drivers/target/target_core_user.c 	iov = &entry->req.iov[0];
req              1051 drivers/target/target_core_user.c 	entry->req.iov_cnt = iov_cnt;
req              1061 drivers/target/target_core_user.c 	entry->req.iov_bidi_cnt = iov_cnt;
req              1077 drivers/target/target_core_user.c 	base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
req              1078 drivers/target/target_core_user.c 						       entry->req.iov_bidi_cnt);
req              1086 drivers/target/target_core_user.c 	entry->req.cdb_off = cdb_off;
req                41 drivers/tee/optee/supp.c 	struct optee_supp_req *req;
req                47 drivers/tee/optee/supp.c 	idr_for_each_entry(&supp->idr, req, id) {
req                49 drivers/tee/optee/supp.c 		req->ret = TEEC_ERROR_COMMUNICATION;
req                50 drivers/tee/optee/supp.c 		complete(&req->c);
req                54 drivers/tee/optee/supp.c 	list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
req                55 drivers/tee/optee/supp.c 		list_del(&req->link);
req                56 drivers/tee/optee/supp.c 		req->in_queue = false;
req                57 drivers/tee/optee/supp.c 		req->ret = TEEC_ERROR_COMMUNICATION;
req                58 drivers/tee/optee/supp.c 		complete(&req->c);
req                82 drivers/tee/optee/supp.c 	struct optee_supp_req *req;
req                93 drivers/tee/optee/supp.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req                94 drivers/tee/optee/supp.c 	if (!req)
req                97 drivers/tee/optee/supp.c 	init_completion(&req->c);
req                98 drivers/tee/optee/supp.c 	req->func = func;
req                99 drivers/tee/optee/supp.c 	req->num_params = num_params;
req               100 drivers/tee/optee/supp.c 	req->param = param;
req               104 drivers/tee/optee/supp.c 	list_add_tail(&req->link, &supp->reqs);
req               105 drivers/tee/optee/supp.c 	req->in_queue = true;
req               116 drivers/tee/optee/supp.c 	while (wait_for_completion_interruptible(&req->c)) {
req               133 drivers/tee/optee/supp.c 			if (req->in_queue) {
req               134 drivers/tee/optee/supp.c 				list_del(&req->link);
req               135 drivers/tee/optee/supp.c 				req->in_queue = false;
req               141 drivers/tee/optee/supp.c 			req->ret = TEEC_ERROR_COMMUNICATION;
req               146 drivers/tee/optee/supp.c 	ret = req->ret;
req               147 drivers/tee/optee/supp.c 	kfree(req);
req               155 drivers/tee/optee/supp.c 	struct optee_supp_req *req;
req               168 drivers/tee/optee/supp.c 	req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
req               170 drivers/tee/optee/supp.c 	if (num_params < req->num_params) {
req               175 drivers/tee/optee/supp.c 	*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
req               179 drivers/tee/optee/supp.c 	list_del(&req->link);
req               180 drivers/tee/optee/supp.c 	req->in_queue = false;
req               182 drivers/tee/optee/supp.c 	return req;
req               235 drivers/tee/optee/supp.c 	struct optee_supp_req *req = NULL;
req               246 drivers/tee/optee/supp.c 		req = supp_pop_entry(supp, *num_params - num_meta, &id);
req               249 drivers/tee/optee/supp.c 		if (req) {
req               250 drivers/tee/optee/supp.c 			if (IS_ERR(req))
req               251 drivers/tee/optee/supp.c 				return PTR_ERR(req);
req               283 drivers/tee/optee/supp.c 	*func = req->func;
req               284 drivers/tee/optee/supp.c 	*num_params = req->num_params + num_meta;
req               285 drivers/tee/optee/supp.c 	memcpy(param + num_meta, req->param,
req               286 drivers/tee/optee/supp.c 	       sizeof(struct tee_param) * req->num_params);
req               296 drivers/tee/optee/supp.c 	struct optee_supp_req *req;
req               315 drivers/tee/optee/supp.c 	req = idr_find(&supp->idr, id);
req               316 drivers/tee/optee/supp.c 	if (!req)
req               319 drivers/tee/optee/supp.c 	if ((num_params - nm) != req->num_params)
req               326 drivers/tee/optee/supp.c 	return req;
req               344 drivers/tee/optee/supp.c 	struct optee_supp_req *req;
req               349 drivers/tee/optee/supp.c 	req = supp_pop_req(supp, num_params, param, &num_meta);
req               352 drivers/tee/optee/supp.c 	if (IS_ERR(req)) {
req               354 drivers/tee/optee/supp.c 		return PTR_ERR(req);
req               358 drivers/tee/optee/supp.c 	for (n = 0; n < req->num_params; n++) {
req               359 drivers/tee/optee/supp.c 		struct tee_param *p = req->param + n;
req               376 drivers/tee/optee/supp.c 	req->ret = ret;
req               379 drivers/tee/optee/supp.c 	complete(&req->c);
req                36 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct mrq_thermal_host_to_bpmp_request req;
req                41 drivers/thermal/tegra/tegra-bpmp-thermal.c 	memset(&req, 0, sizeof(req));
req                42 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.type = CMD_THERMAL_GET_TEMP;
req                43 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.get_temp.zone = zone->idx;
req                47 drivers/thermal/tegra/tegra-bpmp-thermal.c 	msg.tx.data = &req;
req                48 drivers/thermal/tegra/tegra-bpmp-thermal.c 	msg.tx.size = sizeof(req);
req                64 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct mrq_thermal_host_to_bpmp_request req;
req                67 drivers/thermal/tegra/tegra-bpmp-thermal.c 	memset(&req, 0, sizeof(req));
req                68 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.type = CMD_THERMAL_SET_TRIP;
req                69 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.set_trip.zone = zone->idx;
req                70 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.set_trip.enabled = true;
req                71 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.set_trip.low = low;
req                72 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.set_trip.high = high;
req                76 drivers/thermal/tegra/tegra-bpmp-thermal.c 	msg.tx.data = &req;
req                77 drivers/thermal/tegra/tegra-bpmp-thermal.c 	msg.tx.size = sizeof(req);
req                95 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct mrq_thermal_bpmp_to_host_request *req;
req                99 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req = (struct mrq_thermal_bpmp_to_host_request *)ch->ib->data;
req               101 drivers/thermal/tegra/tegra-bpmp-thermal.c 	if (req->type != CMD_THERMAL_HOST_TRIP_REACHED) {
req               103 drivers/thermal/tegra/tegra-bpmp-thermal.c 			__func__, req->type);
req               109 drivers/thermal/tegra/tegra-bpmp-thermal.c 		if (tegra->zones[i]->idx != req->host_trip_reached.zone)
req               118 drivers/thermal/tegra/tegra-bpmp-thermal.c 		req->host_trip_reached.zone);
req               125 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct mrq_thermal_host_to_bpmp_request req;
req               130 drivers/thermal/tegra/tegra-bpmp-thermal.c 	memset(&req, 0, sizeof(req));
req               131 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.type = CMD_THERMAL_GET_NUM_ZONES;
req               135 drivers/thermal/tegra/tegra-bpmp-thermal.c 	msg.tx.data = &req;
req               136 drivers/thermal/tegra/tegra-bpmp-thermal.c 	msg.tx.size = sizeof(req);
req                68 drivers/thunderbolt/ctl.c 	struct tb_cfg_request *req;
req                70 drivers/thunderbolt/ctl.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req                71 drivers/thunderbolt/ctl.c 	if (!req)
req                74 drivers/thunderbolt/ctl.c 	kref_init(&req->kref);
req                76 drivers/thunderbolt/ctl.c 	return req;
req                83 drivers/thunderbolt/ctl.c void tb_cfg_request_get(struct tb_cfg_request *req)
req                86 drivers/thunderbolt/ctl.c 	kref_get(&req->kref);
req                92 drivers/thunderbolt/ctl.c 	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
req                94 drivers/thunderbolt/ctl.c 	kfree(req);
req               104 drivers/thunderbolt/ctl.c void tb_cfg_request_put(struct tb_cfg_request *req)
req               107 drivers/thunderbolt/ctl.c 	kref_put(&req->kref, tb_cfg_request_destroy);
req               112 drivers/thunderbolt/ctl.c 				  struct tb_cfg_request *req)
req               114 drivers/thunderbolt/ctl.c 	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
req               115 drivers/thunderbolt/ctl.c 	WARN_ON(req->ctl);
req               122 drivers/thunderbolt/ctl.c 	req->ctl = ctl;
req               123 drivers/thunderbolt/ctl.c 	list_add_tail(&req->list, &ctl->request_queue);
req               124 drivers/thunderbolt/ctl.c 	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
req               129 drivers/thunderbolt/ctl.c static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
req               131 drivers/thunderbolt/ctl.c 	struct tb_ctl *ctl = req->ctl;
req               134 drivers/thunderbolt/ctl.c 	list_del(&req->list);
req               135 drivers/thunderbolt/ctl.c 	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
req               136 drivers/thunderbolt/ctl.c 	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
req               141 drivers/thunderbolt/ctl.c static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
req               143 drivers/thunderbolt/ctl.c 	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
req               149 drivers/thunderbolt/ctl.c 	struct tb_cfg_request *req;
req               153 drivers/thunderbolt/ctl.c 	list_for_each_entry(req, &pkg->ctl->request_queue, list) {
req               154 drivers/thunderbolt/ctl.c 		tb_cfg_request_get(req);
req               155 drivers/thunderbolt/ctl.c 		if (req->match(req, pkg)) {
req               159 drivers/thunderbolt/ctl.c 		tb_cfg_request_put(req);
req               163 drivers/thunderbolt/ctl.c 	return found ? req : NULL;
req               411 drivers/thunderbolt/ctl.c 	struct tb_cfg_request *req;
req               472 drivers/thunderbolt/ctl.c 	req = tb_cfg_request_find(pkg->ctl, pkg);
req               473 drivers/thunderbolt/ctl.c 	if (req) {
req               474 drivers/thunderbolt/ctl.c 		if (req->copy(req, pkg))
req               475 drivers/thunderbolt/ctl.c 			schedule_work(&req->work);
req               476 drivers/thunderbolt/ctl.c 		tb_cfg_request_put(req);
req               485 drivers/thunderbolt/ctl.c 	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
req               487 drivers/thunderbolt/ctl.c 	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
req               488 drivers/thunderbolt/ctl.c 		req->callback(req->callback_data);
req               490 drivers/thunderbolt/ctl.c 	tb_cfg_request_dequeue(req);
req               491 drivers/thunderbolt/ctl.c 	tb_cfg_request_put(req);
req               504 drivers/thunderbolt/ctl.c int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
req               509 drivers/thunderbolt/ctl.c 	req->flags = 0;
req               510 drivers/thunderbolt/ctl.c 	req->callback = callback;
req               511 drivers/thunderbolt/ctl.c 	req->callback_data = callback_data;
req               512 drivers/thunderbolt/ctl.c 	INIT_WORK(&req->work, tb_cfg_request_work);
req               513 drivers/thunderbolt/ctl.c 	INIT_LIST_HEAD(&req->list);
req               515 drivers/thunderbolt/ctl.c 	tb_cfg_request_get(req);
req               516 drivers/thunderbolt/ctl.c 	ret = tb_cfg_request_enqueue(ctl, req);
req               520 drivers/thunderbolt/ctl.c 	ret = tb_ctl_tx(ctl, req->request, req->request_size,
req               521 drivers/thunderbolt/ctl.c 			req->request_type);
req               525 drivers/thunderbolt/ctl.c 	if (!req->response)
req               526 drivers/thunderbolt/ctl.c 		schedule_work(&req->work);
req               531 drivers/thunderbolt/ctl.c 	tb_cfg_request_dequeue(req);
req               533 drivers/thunderbolt/ctl.c 	tb_cfg_request_put(req);
req               546 drivers/thunderbolt/ctl.c void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
req               548 drivers/thunderbolt/ctl.c 	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
req               549 drivers/thunderbolt/ctl.c 	schedule_work(&req->work);
req               550 drivers/thunderbolt/ctl.c 	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
req               551 drivers/thunderbolt/ctl.c 	req->result.err = err;
req               571 drivers/thunderbolt/ctl.c 					 struct tb_cfg_request *req,
req               579 drivers/thunderbolt/ctl.c 	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
req               586 drivers/thunderbolt/ctl.c 		tb_cfg_request_cancel(req, -ETIMEDOUT);
req               588 drivers/thunderbolt/ctl.c 	flush_work(&req->work);
req               590 drivers/thunderbolt/ctl.c 	return req->result;
req               727 drivers/thunderbolt/ctl.c static bool tb_cfg_match(const struct tb_cfg_request *req,
req               735 drivers/thunderbolt/ctl.c 	if (pkg->frame.eof != req->response_type)
req               737 drivers/thunderbolt/ctl.c 	if (route != tb_cfg_get_route(req->request))
req               739 drivers/thunderbolt/ctl.c 	if (pkg->frame.size != req->response_size)
req               744 drivers/thunderbolt/ctl.c 		const struct cfg_read_pkg *req_hdr = req->request;
req               754 drivers/thunderbolt/ctl.c static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
req               759 drivers/thunderbolt/ctl.c 	res = parse_header(pkg, req->response_size, req->response_type,
req               760 drivers/thunderbolt/ctl.c 			   tb_cfg_get_route(req->request));
req               762 drivers/thunderbolt/ctl.c 		memcpy(req->response, pkg->buffer, req->response_size);
req               764 drivers/thunderbolt/ctl.c 	req->result = res;
req               783 drivers/thunderbolt/ctl.c 	struct tb_cfg_request *req;
req               785 drivers/thunderbolt/ctl.c 	req = tb_cfg_request_alloc();
req               786 drivers/thunderbolt/ctl.c 	if (!req) {
req               791 drivers/thunderbolt/ctl.c 	req->match = tb_cfg_match;
req               792 drivers/thunderbolt/ctl.c 	req->copy = tb_cfg_copy;
req               793 drivers/thunderbolt/ctl.c 	req->request = &request;
req               794 drivers/thunderbolt/ctl.c 	req->request_size = sizeof(request);
req               795 drivers/thunderbolt/ctl.c 	req->request_type = TB_CFG_PKG_RESET;
req               796 drivers/thunderbolt/ctl.c 	req->response = &reply;
req               797 drivers/thunderbolt/ctl.c 	req->response_size = sizeof(reply);
req               798 drivers/thunderbolt/ctl.c 	req->response_type = TB_CFG_PKG_RESET;
req               800 drivers/thunderbolt/ctl.c 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
req               802 drivers/thunderbolt/ctl.c 	tb_cfg_request_put(req);
req               830 drivers/thunderbolt/ctl.c 		struct tb_cfg_request *req;
req               832 drivers/thunderbolt/ctl.c 		req = tb_cfg_request_alloc();
req               833 drivers/thunderbolt/ctl.c 		if (!req) {
req               840 drivers/thunderbolt/ctl.c 		req->match = tb_cfg_match;
req               841 drivers/thunderbolt/ctl.c 		req->copy = tb_cfg_copy;
req               842 drivers/thunderbolt/ctl.c 		req->request = &request;
req               843 drivers/thunderbolt/ctl.c 		req->request_size = sizeof(request);
req               844 drivers/thunderbolt/ctl.c 		req->request_type = TB_CFG_PKG_READ;
req               845 drivers/thunderbolt/ctl.c 		req->response = &reply;
req               846 drivers/thunderbolt/ctl.c 		req->response_size = 12 + 4 * length;
req               847 drivers/thunderbolt/ctl.c 		req->response_type = TB_CFG_PKG_READ;
req               849 drivers/thunderbolt/ctl.c 		res = tb_cfg_request_sync(ctl, req, timeout_msec);
req               851 drivers/thunderbolt/ctl.c 		tb_cfg_request_put(req);
req               895 drivers/thunderbolt/ctl.c 		struct tb_cfg_request *req;
req               897 drivers/thunderbolt/ctl.c 		req = tb_cfg_request_alloc();
req               898 drivers/thunderbolt/ctl.c 		if (!req) {
req               905 drivers/thunderbolt/ctl.c 		req->match = tb_cfg_match;
req               906 drivers/thunderbolt/ctl.c 		req->copy = tb_cfg_copy;
req               907 drivers/thunderbolt/ctl.c 		req->request = &request;
req               908 drivers/thunderbolt/ctl.c 		req->request_size = 12 + 4 * length;
req               909 drivers/thunderbolt/ctl.c 		req->request_type = TB_CFG_PKG_WRITE;
req               910 drivers/thunderbolt/ctl.c 		req->response = &reply;
req               911 drivers/thunderbolt/ctl.c 		req->response_size = sizeof(reply);
req               912 drivers/thunderbolt/ctl.c 		req->response_type = TB_CFG_PKG_WRITE;
req               914 drivers/thunderbolt/ctl.c 		res = tb_cfg_request_sync(ctl, req, timeout_msec);
req               916 drivers/thunderbolt/ctl.c 		tb_cfg_request_put(req);
req                87 drivers/thunderbolt/ctl.h 	bool (*match)(const struct tb_cfg_request *req,
req                89 drivers/thunderbolt/ctl.h 	bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
req               102 drivers/thunderbolt/ctl.h void tb_cfg_request_get(struct tb_cfg_request *req);
req               103 drivers/thunderbolt/ctl.h void tb_cfg_request_put(struct tb_cfg_request *req);
req               104 drivers/thunderbolt/ctl.h int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
req               106 drivers/thunderbolt/ctl.h void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
req               108 drivers/thunderbolt/ctl.h 			struct tb_cfg_request *req, int timeout_msec);
req                65 drivers/thunderbolt/dma_port.c static bool dma_port_match(const struct tb_cfg_request *req,
req                72 drivers/thunderbolt/dma_port.c 	if (pkg->frame.eof != req->response_type)
req                74 drivers/thunderbolt/dma_port.c 	if (route != tb_cfg_get_route(req->request))
req                76 drivers/thunderbolt/dma_port.c 	if (pkg->frame.size != req->response_size)
req                82 drivers/thunderbolt/dma_port.c static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
req                84 drivers/thunderbolt/dma_port.c 	memcpy(req->response, pkg->buffer, req->response_size);
req               101 drivers/thunderbolt/dma_port.c 	struct tb_cfg_request *req;
req               105 drivers/thunderbolt/dma_port.c 	req = tb_cfg_request_alloc();
req               106 drivers/thunderbolt/dma_port.c 	if (!req)
req               109 drivers/thunderbolt/dma_port.c 	req->match = dma_port_match;
req               110 drivers/thunderbolt/dma_port.c 	req->copy = dma_port_copy;
req               111 drivers/thunderbolt/dma_port.c 	req->request = &request;
req               112 drivers/thunderbolt/dma_port.c 	req->request_size = sizeof(request);
req               113 drivers/thunderbolt/dma_port.c 	req->request_type = TB_CFG_PKG_READ;
req               114 drivers/thunderbolt/dma_port.c 	req->response = &reply;
req               115 drivers/thunderbolt/dma_port.c 	req->response_size = 12 + 4 * length;
req               116 drivers/thunderbolt/dma_port.c 	req->response_type = TB_CFG_PKG_READ;
req               118 drivers/thunderbolt/dma_port.c 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
req               120 drivers/thunderbolt/dma_port.c 	tb_cfg_request_put(req);
req               142 drivers/thunderbolt/dma_port.c 	struct tb_cfg_request *req;
req               148 drivers/thunderbolt/dma_port.c 	req = tb_cfg_request_alloc();
req               149 drivers/thunderbolt/dma_port.c 	if (!req)
req               152 drivers/thunderbolt/dma_port.c 	req->match = dma_port_match;
req               153 drivers/thunderbolt/dma_port.c 	req->copy = dma_port_copy;
req               154 drivers/thunderbolt/dma_port.c 	req->request = &request;
req               155 drivers/thunderbolt/dma_port.c 	req->request_size = 12 + 4 * length;
req               156 drivers/thunderbolt/dma_port.c 	req->request_type = TB_CFG_PKG_WRITE;
req               157 drivers/thunderbolt/dma_port.c 	req->response = &reply;
req               158 drivers/thunderbolt/dma_port.c 	req->response_size = sizeof(reply);
req               159 drivers/thunderbolt/dma_port.c 	req->response_type = TB_CFG_PKG_WRITE;
req               161 drivers/thunderbolt/dma_port.c 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
req               163 drivers/thunderbolt/dma_port.c 	tb_cfg_request_put(req);
req               238 drivers/thunderbolt/icm.c static bool icm_match(const struct tb_cfg_request *req,
req               242 drivers/thunderbolt/icm.c 	const struct icm_pkg_header *req_hdr = req->request;
req               244 drivers/thunderbolt/icm.c 	if (pkg->frame.eof != req->response_type)
req               252 drivers/thunderbolt/icm.c static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
req               256 drivers/thunderbolt/icm.c 	if (hdr->packet_id < req->npackets) {
req               257 drivers/thunderbolt/icm.c 		size_t offset = hdr->packet_id * req->response_size;
req               259 drivers/thunderbolt/icm.c 		memcpy(req->response + offset, pkg->buffer, req->response_size);
req               273 drivers/thunderbolt/icm.c 		struct tb_cfg_request *req;
req               276 drivers/thunderbolt/icm.c 		req = tb_cfg_request_alloc();
req               277 drivers/thunderbolt/icm.c 		if (!req)
req               280 drivers/thunderbolt/icm.c 		req->match = icm_match;
req               281 drivers/thunderbolt/icm.c 		req->copy = icm_copy;
req               282 drivers/thunderbolt/icm.c 		req->request = request;
req               283 drivers/thunderbolt/icm.c 		req->request_size = request_size;
req               284 drivers/thunderbolt/icm.c 		req->request_type = TB_CFG_PKG_ICM_CMD;
req               285 drivers/thunderbolt/icm.c 		req->response = response;
req               286 drivers/thunderbolt/icm.c 		req->npackets = npackets;
req               287 drivers/thunderbolt/icm.c 		req->response_size = response_size;
req               288 drivers/thunderbolt/icm.c 		req->response_type = TB_CFG_PKG_ICM_RESP;
req               291 drivers/thunderbolt/icm.c 		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
req               294 drivers/thunderbolt/icm.c 		tb_cfg_request_put(req);
req                48 drivers/thunderbolt/xdomain.c static bool tb_xdomain_match(const struct tb_cfg_request *req,
req                57 drivers/thunderbolt/xdomain.c 		const struct tb_xdp_header *req_hdr = req->request;
req                59 drivers/thunderbolt/xdomain.c 		if (pkg->frame.size < req->response_size / 4)
req                81 drivers/thunderbolt/xdomain.c static bool tb_xdomain_copy(struct tb_cfg_request *req,
req                84 drivers/thunderbolt/xdomain.c 	memcpy(req->response, pkg->buffer, req->response_size);
req                85 drivers/thunderbolt/xdomain.c 	req->result.err = 0;
req                97 drivers/thunderbolt/xdomain.c 	struct tb_cfg_request *req;
req                99 drivers/thunderbolt/xdomain.c 	req = tb_cfg_request_alloc();
req               100 drivers/thunderbolt/xdomain.c 	if (!req)
req               103 drivers/thunderbolt/xdomain.c 	req->match = tb_xdomain_match;
req               104 drivers/thunderbolt/xdomain.c 	req->copy = tb_xdomain_copy;
req               105 drivers/thunderbolt/xdomain.c 	req->request = response;
req               106 drivers/thunderbolt/xdomain.c 	req->request_size = size;
req               107 drivers/thunderbolt/xdomain.c 	req->request_type = type;
req               109 drivers/thunderbolt/xdomain.c 	return tb_cfg_request(ctl, req, response_ready, req);
req               136 drivers/thunderbolt/xdomain.c 	struct tb_cfg_request *req;
req               139 drivers/thunderbolt/xdomain.c 	req = tb_cfg_request_alloc();
req               140 drivers/thunderbolt/xdomain.c 	if (!req)
req               143 drivers/thunderbolt/xdomain.c 	req->match = tb_xdomain_match;
req               144 drivers/thunderbolt/xdomain.c 	req->copy = tb_xdomain_copy;
req               145 drivers/thunderbolt/xdomain.c 	req->request = request;
req               146 drivers/thunderbolt/xdomain.c 	req->request_size = request_size;
req               147 drivers/thunderbolt/xdomain.c 	req->request_type = request_type;
req               148 drivers/thunderbolt/xdomain.c 	req->response = response;
req               149 drivers/thunderbolt/xdomain.c 	req->response_size = response_size;
req               150 drivers/thunderbolt/xdomain.c 	req->response_type = response_type;
req               152 drivers/thunderbolt/xdomain.c 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
req               154 drivers/thunderbolt/xdomain.c 	tb_cfg_request_put(req);
req               230 drivers/thunderbolt/xdomain.c 	struct tb_xdp_uuid req;
req               233 drivers/thunderbolt/xdomain.c 	memset(&req, 0, sizeof(req));
req               234 drivers/thunderbolt/xdomain.c 	tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
req               235 drivers/thunderbolt/xdomain.c 			   sizeof(req));
req               238 drivers/thunderbolt/xdomain.c 	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
req               289 drivers/thunderbolt/xdomain.c 	struct tb_xdp_properties req;
req               300 drivers/thunderbolt/xdomain.c 	memset(&req, 0, sizeof(req));
req               301 drivers/thunderbolt/xdomain.c 	tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
req               302 drivers/thunderbolt/xdomain.c 			   sizeof(req));
req               303 drivers/thunderbolt/xdomain.c 	memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
req               304 drivers/thunderbolt/xdomain.c 	memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
req               310 drivers/thunderbolt/xdomain.c 		ret = __tb_xdomain_request(ctl, &req, sizeof(req),
req               335 drivers/thunderbolt/xdomain.c 		if (res->offset != req.offset) {
req               358 drivers/thunderbolt/xdomain.c 		memcpy(data + req.offset, res->data, len * 4);
req               359 drivers/thunderbolt/xdomain.c 		req.offset += len;
req               360 drivers/thunderbolt/xdomain.c 	} while (!data_len || req.offset < data_len);
req               378 drivers/thunderbolt/xdomain.c 	const struct tb_xdp_properties *req)
req               390 drivers/thunderbolt/xdomain.c 	if (!uuid_equal(src_uuid, &req->dst_uuid)) {
req               398 drivers/thunderbolt/xdomain.c 	if (req->offset >= xdomain_property_block_len) {
req               403 drivers/thunderbolt/xdomain.c 	len = xdomain_property_block_len - req->offset;
req               417 drivers/thunderbolt/xdomain.c 	res->offset = req->offset;
req               419 drivers/thunderbolt/xdomain.c 	uuid_copy(&res->dst_uuid, &req->src_uuid);
req               420 drivers/thunderbolt/xdomain.c 	memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
req               435 drivers/thunderbolt/xdomain.c 	struct tb_xdp_properties_changed req;
req               438 drivers/thunderbolt/xdomain.c 	memset(&req, 0, sizeof(req));
req               439 drivers/thunderbolt/xdomain.c 	tb_xdp_fill_header(&req.hdr, route, retry % 4,
req               440 drivers/thunderbolt/xdomain.c 			   PROPERTIES_CHANGED_REQUEST, sizeof(req));
req               441 drivers/thunderbolt/xdomain.c 	uuid_copy(&req.src_uuid, uuid);
req               444 drivers/thunderbolt/xdomain.c 	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
req               444 drivers/usb/cdns3/gadget.c 		struct usb_request *req;
req               446 drivers/usb/cdns3/gadget.c 		req = cdns3_next_request(&priv_ep->deferred_req_list);
req               450 drivers/usb/cdns3/gadget.c 		if (!req)
req               453 drivers/usb/cdns3/gadget.c 		cdns3_wa2_descmiss_copy_data(priv_ep, req);
req               455 drivers/usb/cdns3/gadget.c 		    req->length != req->actual) {
req               460 drivers/usb/cdns3/gadget.c 		if (req->status == -EINPROGRESS)
req               461 drivers/usb/cdns3/gadget.c 			req->status = 0;
req               463 drivers/usb/cdns3/gadget.c 		list_del_init(&req->list);
req               465 drivers/usb/cdns3/gadget.c 		return req;
req              2070 drivers/usb/cdns3/gadget.c 	struct usb_request *req, *req_temp;
req              2088 drivers/usb/cdns3/gadget.c 	list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
req              2090 drivers/usb/cdns3/gadget.c 		if (request == req) {
req              2096 drivers/usb/cdns3/gadget.c 	list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
req              2098 drivers/usb/cdns3/gadget.c 		if (request == req)
req               197 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               198 drivers/usb/cdns3/trace.h 	TP_ARGS(req),
req               200 drivers/usb/cdns3/trace.h 		__string(name, req->priv_ep->name)
req               201 drivers/usb/cdns3/trace.h 		__field(struct cdns3_request *, req)
req               215 drivers/usb/cdns3/trace.h 		__assign_str(name, req->priv_ep->name);
req               216 drivers/usb/cdns3/trace.h 		__entry->req = req;
req               217 drivers/usb/cdns3/trace.h 		__entry->buf = req->request.buf;
req               218 drivers/usb/cdns3/trace.h 		__entry->actual = req->request.actual;
req               219 drivers/usb/cdns3/trace.h 		__entry->length = req->request.length;
req               220 drivers/usb/cdns3/trace.h 		__entry->status = req->request.status;
req               221 drivers/usb/cdns3/trace.h 		__entry->zero = req->request.zero;
req               222 drivers/usb/cdns3/trace.h 		__entry->short_not_ok = req->request.short_not_ok;
req               223 drivers/usb/cdns3/trace.h 		__entry->no_interrupt = req->request.no_interrupt;
req               224 drivers/usb/cdns3/trace.h 		__entry->start_trb = req->start_trb;
req               225 drivers/usb/cdns3/trace.h 		__entry->end_trb = req->end_trb;
req               226 drivers/usb/cdns3/trace.h 		__entry->start_trb_addr = req->trb;
req               227 drivers/usb/cdns3/trace.h 		__entry->flags = req->flags;
req               231 drivers/usb/cdns3/trace.h 		__get_str(name), __entry->req, __entry->buf, __entry->actual,
req               245 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               246 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               250 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               251 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               255 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               256 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               260 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               261 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               265 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               266 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               289 drivers/usb/cdns3/trace.h 		__field(struct usb_request *, req)
req               298 drivers/usb/cdns3/trace.h 		__entry->req = &priv_req->request;
req               306 drivers/usb/cdns3/trace.h 		__get_str(name), __entry->req, __entry->buf, &__entry->dma,
req               313 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               314 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               318 drivers/usb/cdns3/trace.h 	TP_PROTO(struct cdns3_request *req),
req               319 drivers/usb/cdns3/trace.h 	TP_ARGS(req)
req               157 drivers/usb/chipidea/debug.c 	struct ci_hw_req *req = NULL;
req               168 drivers/usb/chipidea/debug.c 		list_for_each_entry(req, &ci->ci_hw_ep[i].qh.queue, queue) {
req               169 drivers/usb/chipidea/debug.c 			list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
req               361 drivers/usb/chipidea/udc.c 		u32 mul = hwreq->req.length / hwep->ep.maxpacket;
req               363 drivers/usb/chipidea/udc.c 		if (hwreq->req.length == 0
req               364 drivers/usb/chipidea/udc.c 				|| hwreq->req.length % hwep->ep.maxpacket)
req               369 drivers/usb/chipidea/udc.c 	temp = (u32) (hwreq->req.dma + hwreq->req.actual);
req               379 drivers/usb/chipidea/udc.c 	hwreq->req.actual += length;
req               414 drivers/usb/chipidea/udc.c 	unsigned rest = hwreq->req.length;
req               419 drivers/usb/chipidea/udc.c 	if (hwreq->req.status == -EALREADY)
req               422 drivers/usb/chipidea/udc.c 	hwreq->req.status = -EALREADY;
req               425 drivers/usb/chipidea/udc.c 					    &hwreq->req, hwep->dir);
req               433 drivers/usb/chipidea/udc.c 	if (hwreq->req.dma % PAGE_SIZE)
req               443 drivers/usb/chipidea/udc.c 		unsigned count = min(hwreq->req.length - hwreq->req.actual,
req               452 drivers/usb/chipidea/udc.c 	if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
req               453 drivers/usb/chipidea/udc.c 	    && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
req               465 drivers/usb/chipidea/udc.c 	if (!hwreq->req.no_interrupt)
req               469 drivers/usb/chipidea/udc.c 	hwreq->req.actual = 0;
req               501 drivers/usb/chipidea/udc.c 		u32 mul = hwreq->req.length / hwep->ep.maxpacket;
req               503 drivers/usb/chipidea/udc.c 		if (hwreq->req.length == 0
req               504 drivers/usb/chipidea/udc.c 				|| hwreq->req.length % hwep->ep.maxpacket)
req               551 drivers/usb/chipidea/udc.c 	unsigned actual = hwreq->req.length;
req               554 drivers/usb/chipidea/udc.c 	if (hwreq->req.status != -EALREADY)
req               557 drivers/usb/chipidea/udc.c 	hwreq->req.status = 0;
req               567 drivers/usb/chipidea/udc.c 			hwreq->req.status = -EALREADY;
req               575 drivers/usb/chipidea/udc.c 		hwreq->req.status = tmptoken & TD_STATUS;
req               576 drivers/usb/chipidea/udc.c 		if ((TD_STATUS_HALTED & hwreq->req.status)) {
req               577 drivers/usb/chipidea/udc.c 			hwreq->req.status = -EPIPE;
req               579 drivers/usb/chipidea/udc.c 		} else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
req               580 drivers/usb/chipidea/udc.c 			hwreq->req.status = -EPROTO;
req               582 drivers/usb/chipidea/udc.c 		} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
req               583 drivers/usb/chipidea/udc.c 			hwreq->req.status = -EILSEQ;
req               589 drivers/usb/chipidea/udc.c 				hwreq->req.status = -EPROTO;
req               606 drivers/usb/chipidea/udc.c 					&hwreq->req, hwep->dir);
req               608 drivers/usb/chipidea/udc.c 	hwreq->req.actual += actual;
req               610 drivers/usb/chipidea/udc.c 	if (hwreq->req.status)
req               611 drivers/usb/chipidea/udc.c 		return hwreq->req.status;
req               613 drivers/usb/chipidea/udc.c 	return hwreq->req.actual;
req               647 drivers/usb/chipidea/udc.c 		hwreq->req.status = -ESHUTDOWN;
req               649 drivers/usb/chipidea/udc.c 		if (hwreq->req.complete != NULL) {
req               651 drivers/usb/chipidea/udc.c 			usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
req               783 drivers/usb/chipidea/udc.c static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
req               785 drivers/usb/chipidea/udc.c 	if (ep == NULL || req == NULL)
req               788 drivers/usb/chipidea/udc.c 	kfree(req->buf);
req               789 drivers/usb/chipidea/udc.c 	usb_ep_free_request(ep, req);
req               801 drivers/usb/chipidea/udc.c static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
req               805 drivers/usb/chipidea/udc.c 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
req               809 drivers/usb/chipidea/udc.c 	if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
req               813 drivers/usb/chipidea/udc.c 		if (req->length)
req               824 drivers/usb/chipidea/udc.c 	    hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
req               836 drivers/usb/chipidea/udc.c 	hwreq->req.status = -EINPROGRESS;
req               837 drivers/usb/chipidea/udc.c 	hwreq->req.actual = 0;
req               862 drivers/usb/chipidea/udc.c 	struct usb_request *req = NULL;
req               870 drivers/usb/chipidea/udc.c 	req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
req               872 drivers/usb/chipidea/udc.c 	if (req == NULL)
req               875 drivers/usb/chipidea/udc.c 	req->complete = isr_get_status_complete;
req               876 drivers/usb/chipidea/udc.c 	req->length   = 2;
req               877 drivers/usb/chipidea/udc.c 	req->buf      = kzalloc(req->length, gfp_flags);
req               878 drivers/usb/chipidea/udc.c 	if (req->buf == NULL) {
req               884 drivers/usb/chipidea/udc.c 		*(u16 *)req->buf = (ci->remote_wakeup << 1) |
req               891 drivers/usb/chipidea/udc.c 		*(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
req               895 drivers/usb/chipidea/udc.c 	retval = _ep_queue(&hwep->ep, req, gfp_flags);
req               902 drivers/usb/chipidea/udc.c 	kfree(req->buf);
req               905 drivers/usb/chipidea/udc.c 	usb_ep_free_request(&hwep->ep, req);
req               919 drivers/usb/chipidea/udc.c isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
req               921 drivers/usb/chipidea/udc.c 	struct ci_hdrc *ci = req->context;
req               984 drivers/usb/chipidea/udc.c 		if (hwreq->req.complete != NULL) {
req               987 drivers/usb/chipidea/udc.c 					hwreq->req.length)
req               989 drivers/usb/chipidea/udc.c 			usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
req              1018 drivers/usb/chipidea/udc.c 	struct usb_ctrlrequest req;
req              1032 drivers/usb/chipidea/udc.c 		memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
req              1035 drivers/usb/chipidea/udc.c 	type = req.bRequestType;
req              1039 drivers/usb/chipidea/udc.c 	switch (req.bRequest) {
req              1042 drivers/usb/chipidea/udc.c 				le16_to_cpu(req.wValue) ==
req              1044 drivers/usb/chipidea/udc.c 			if (req.wLength != 0)
req              1046 drivers/usb/chipidea/udc.c 			num  = le16_to_cpu(req.wIndex);
req              1061 drivers/usb/chipidea/udc.c 				le16_to_cpu(req.wValue) ==
req              1063 drivers/usb/chipidea/udc.c 			if (req.wLength != 0)
req              1073 drivers/usb/chipidea/udc.c 			le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
req              1077 drivers/usb/chipidea/udc.c 		if (le16_to_cpu(req.wLength) != 2 ||
req              1078 drivers/usb/chipidea/udc.c 		    le16_to_cpu(req.wValue)  != 0)
req              1080 drivers/usb/chipidea/udc.c 		err = isr_get_status_response(ci, &req);
req              1085 drivers/usb/chipidea/udc.c 		if (le16_to_cpu(req.wLength) != 0 ||
req              1086 drivers/usb/chipidea/udc.c 		    le16_to_cpu(req.wIndex)  != 0)
req              1088 drivers/usb/chipidea/udc.c 		ci->address = (u8)le16_to_cpu(req.wValue);
req              1094 drivers/usb/chipidea/udc.c 				le16_to_cpu(req.wValue) ==
req              1096 drivers/usb/chipidea/udc.c 			if (req.wLength != 0)
req              1098 drivers/usb/chipidea/udc.c 			num  = le16_to_cpu(req.wIndex);
req              1110 drivers/usb/chipidea/udc.c 			if (req.wLength != 0)
req              1112 drivers/usb/chipidea/udc.c 			switch (le16_to_cpu(req.wValue)) {
req              1118 drivers/usb/chipidea/udc.c 				tmode = le16_to_cpu(req.wIndex) >> 8;
req              1160 drivers/usb/chipidea/udc.c 		if (req.wLength == 0)   /* no data phase */
req              1164 drivers/usb/chipidea/udc.c 		err = ci->driver->setup(&ci->gadget, &req);
req              1347 drivers/usb/chipidea/udc.c 	return (hwreq == NULL) ? NULL : &hwreq->req;
req              1355 drivers/usb/chipidea/udc.c static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
req              1358 drivers/usb/chipidea/udc.c 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
req              1362 drivers/usb/chipidea/udc.c 	if (ep == NULL || req == NULL) {
req              1388 drivers/usb/chipidea/udc.c static int ep_queue(struct usb_ep *ep, struct usb_request *req,
req              1395 drivers/usb/chipidea/udc.c 	if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
req              1403 drivers/usb/chipidea/udc.c 	retval = _ep_queue(ep, req, gfp_flags);
req              1413 drivers/usb/chipidea/udc.c static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
req              1416 drivers/usb/chipidea/udc.c 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
req              1420 drivers/usb/chipidea/udc.c 	if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
req              1438 drivers/usb/chipidea/udc.c 	usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
req              1440 drivers/usb/chipidea/udc.c 	req->status = -ECONNRESET;
req              1442 drivers/usb/chipidea/udc.c 	if (hwreq->req.complete != NULL) {
req              1444 drivers/usb/chipidea/udc.c 		usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
req                76 drivers/usb/chipidea/udc.h 	struct usb_request	req;
req               353 drivers/usb/class/cdc-wdm.c 	struct usb_ctrlrequest *req;
req               401 drivers/usb/class/cdc-wdm.c 	req = desc->orq;
req               407 drivers/usb/class/cdc-wdm.c 		(unsigned char *)req,
req               414 drivers/usb/class/cdc-wdm.c 	req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
req               416 drivers/usb/class/cdc-wdm.c 	req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req               417 drivers/usb/class/cdc-wdm.c 	req->wValue = 0;
req               418 drivers/usb/class/cdc-wdm.c 	req->wIndex = desc->inum; /* already converted */
req               419 drivers/usb/class/cdc-wdm.c 	req->wLength = cpu_to_le16(count);
req               432 drivers/usb/class/cdc-wdm.c 			le16_to_cpu(req->wIndex));
req              1898 drivers/usb/class/usbtmc.c 	if (request.req.wLength > USBTMC_BUFSIZE)
req              1901 drivers/usb/class/usbtmc.c 	if (request.req.wLength) {
req              1902 drivers/usb/class/usbtmc.c 		buffer = kmalloc(request.req.wLength, GFP_KERNEL);
req              1906 drivers/usb/class/usbtmc.c 		if ((request.req.bRequestType & USB_DIR_IN) == 0) {
req              1909 drivers/usb/class/usbtmc.c 					     request.req.wLength);
req              1919 drivers/usb/class/usbtmc.c 			request.req.bRequest,
req              1920 drivers/usb/class/usbtmc.c 			request.req.bRequestType,
req              1921 drivers/usb/class/usbtmc.c 			request.req.wValue,
req              1922 drivers/usb/class/usbtmc.c 			request.req.wIndex,
req              1923 drivers/usb/class/usbtmc.c 			buffer, request.req.wLength, USB_CTRL_GET_TIMEOUT);
req              1930 drivers/usb/class/usbtmc.c 	if (rv && (request.req.bRequestType & USB_DIR_IN)) {
req                98 drivers/usb/core/hub.h 	struct dev_pm_qos_request *req;
req              2057 drivers/usb/core/message.c 	struct set_config_request *req =
req              2059 drivers/usb/core/message.c 	struct usb_device *udev = req->udev;
req              2063 drivers/usb/core/message.c 	list_del(&req->node);
req              2066 drivers/usb/core/message.c 	if (req->config >= -1)		/* Is req still valid? */
req              2067 drivers/usb/core/message.c 		usb_set_configuration(udev, req->config);
req              2070 drivers/usb/core/message.c 	kfree(req);
req              2078 drivers/usb/core/message.c 	struct set_config_request *req;
req              2081 drivers/usb/core/message.c 	list_for_each_entry(req, &set_config_list, node) {
req              2082 drivers/usb/core/message.c 		if (req->udev == udev)
req              2083 drivers/usb/core/message.c 			req->config = -999;	/* Mark as cancelled */
req              2110 drivers/usb/core/message.c 	struct set_config_request *req;
req              2112 drivers/usb/core/message.c 	req = kmalloc(sizeof(*req), GFP_KERNEL);
req              2113 drivers/usb/core/message.c 	if (!req)
req              2115 drivers/usb/core/message.c 	req->udev = udev;
req              2116 drivers/usb/core/message.c 	req->config = config;
req              2117 drivers/usb/core/message.c 	INIT_WORK(&req->work, driver_set_config_work);
req              2120 drivers/usb/core/message.c 	list_add(&req->node, &set_config_list);
req              2124 drivers/usb/core/message.c 	schedule_work(&req->work);
req               186 drivers/usb/core/port.c 	kfree(port_dev->req);
req               541 drivers/usb/core/port.c 	port_dev->req = kzalloc(sizeof(*(port_dev->req)), GFP_KERNEL);
req               542 drivers/usb/core/port.c 	if (!port_dev->req) {
req               571 drivers/usb/core/port.c 	retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
req               606 drivers/usb/core/port.c 	retval = dev_pm_qos_remove_request(port_dev->req);
req               608 drivers/usb/core/port.c 		kfree(port_dev->req);
req               609 drivers/usb/core/port.c 		port_dev->req = NULL;
req               153 drivers/usb/dwc2/core.h 	struct dwc2_hsotg_req    *req;
req               193 drivers/usb/dwc2/core.h 	struct usb_request      req;
req               228 drivers/usb/dwc2/debugfs.c 	struct dwc2_hsotg_req *req;
req               263 drivers/usb/dwc2/debugfs.c 	list_for_each_entry(req, &ep->queue, queue) {
req               270 drivers/usb/dwc2/debugfs.c 			   req == ep->req ? '*' : ' ',
req               271 drivers/usb/dwc2/debugfs.c 			   req, req->req.length, req->req.buf);
req               273 drivers/usb/dwc2/debugfs.c 			   req->req.actual, req->req.status);
req                37 drivers/usb/dwc2/gadget.c static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
req                39 drivers/usb/dwc2/gadget.c 	return container_of(req, struct dwc2_hsotg_req, req);
req               386 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *req;
req               388 drivers/usb/dwc2/gadget.c 	req = kzalloc(sizeof(*req), flags);
req               389 drivers/usb/dwc2/gadget.c 	if (!req)
req               392 drivers/usb/dwc2/gadget.c 	INIT_LIST_HEAD(&req->queue);
req               394 drivers/usb/dwc2/gadget.c 	return &req->req;
req               422 drivers/usb/dwc2/gadget.c 	struct usb_request *req = &hs_req->req;
req               424 drivers/usb/dwc2/gadget.c 	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
req               497 drivers/usb/dwc2/gadget.c 	int buf_pos = hs_req->req.actual;
req               629 drivers/usb/dwc2/gadget.c 		to_write, hs_req->req.length, can_write, buf_pos);
req               634 drivers/usb/dwc2/gadget.c 	hs_req->req.actual = buf_pos + to_write;
req               641 drivers/usb/dwc2/gadget.c 	data = hs_req->req.buf + buf_pos;
req               847 drivers/usb/dwc2/gadget.c 	if (hs_ep->req)
req               848 drivers/usb/dwc2/gadget.c 		ureq = &hs_ep->req->req;
req               979 drivers/usb/dwc2/gadget.c 		dma_addr_t dma_addr = hs_req->req.dma;
req               981 drivers/usb/dwc2/gadget.c 		if (hs_req->req.num_sgs) {
req               982 drivers/usb/dwc2/gadget.c 			WARN_ON(hs_req->req.num_sgs > 1);
req               983 drivers/usb/dwc2/gadget.c 			dma_addr = sg_dma_address(hs_req->req.sg);
req               986 drivers/usb/dwc2/gadget.c 						 hs_req->req.length);
req              1018 drivers/usb/dwc2/gadget.c 	struct usb_request *ureq = &hs_req->req;
req              1031 drivers/usb/dwc2/gadget.c 		if (hs_ep->req && !continuing) {
req              1035 drivers/usb/dwc2/gadget.c 		} else if (hs_ep->req != hs_req && continuing) {
req              1112 drivers/usb/dwc2/gadget.c 	hs_ep->req = hs_req;
req              1229 drivers/usb/dwc2/gadget.c 			     struct usb_request *req)
req              1233 drivers/usb/dwc2/gadget.c 	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
req              1241 drivers/usb/dwc2/gadget.c 		__func__, req->buf, req->length);
req              1250 drivers/usb/dwc2/gadget.c 	void *req_buf = hs_req->req.buf;
req              1259 drivers/usb/dwc2/gadget.c 		hs_ep->ep.name, req_buf, hs_req->req.length);
req              1261 drivers/usb/dwc2/gadget.c 	hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
req              1262 drivers/usb/dwc2/gadget.c 	if (!hs_req->req.buf) {
req              1263 drivers/usb/dwc2/gadget.c 		hs_req->req.buf = req_buf;
req              1274 drivers/usb/dwc2/gadget.c 		memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
req              1288 drivers/usb/dwc2/gadget.c 		hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
req              1291 drivers/usb/dwc2/gadget.c 	if (!hs_ep->dir_in && !hs_req->req.status)
req              1292 drivers/usb/dwc2/gadget.c 		memcpy(hs_req->saved_req_buf, hs_req->req.buf,
req              1293 drivers/usb/dwc2/gadget.c 		       hs_req->req.actual);
req              1296 drivers/usb/dwc2/gadget.c 	kfree(hs_req->req.buf);
req              1298 drivers/usb/dwc2/gadget.c 	hs_req->req.buf = hs_req->saved_req_buf;
req              1361 drivers/usb/dwc2/gadget.c static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
req              1364 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = our_req(req);
req              1374 drivers/usb/dwc2/gadget.c 		ep->name, req, req->length, req->buf, req->no_interrupt,
req              1375 drivers/usb/dwc2/gadget.c 		req->zero, req->short_not_ok);
req              1386 drivers/usb/dwc2/gadget.c 	req->actual = 0;
req              1387 drivers/usb/dwc2/gadget.c 	req->status = -EINPROGRESS;
req              1391 drivers/usb/dwc2/gadget.c 	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
req              1401 drivers/usb/dwc2/gadget.c 		if (hs_ep->dir_in && req->length > maxsize) {
req              1403 drivers/usb/dwc2/gadget.c 				req->length, maxsize);
req              1407 drivers/usb/dwc2/gadget.c 		if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
req              1409 drivers/usb/dwc2/gadget.c 				req->length, hs_ep->ep.maxpacket);
req              1420 drivers/usb/dwc2/gadget.c 		ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
req              1442 drivers/usb/dwc2/gadget.c 			dma_addr_t dma_addr = hs_req->req.dma;
req              1444 drivers/usb/dwc2/gadget.c 			if (hs_req->req.num_sgs) {
req              1445 drivers/usb/dwc2/gadget.c 				WARN_ON(hs_req->req.num_sgs > 1);
req              1446 drivers/usb/dwc2/gadget.c 				dma_addr = sg_dma_address(hs_req->req.sg);
req              1449 drivers/usb/dwc2/gadget.c 						   hs_req->req.length);
req              1455 drivers/usb/dwc2/gadget.c 	if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
req              1481 drivers/usb/dwc2/gadget.c static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
req              1490 drivers/usb/dwc2/gadget.c 	ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
req              1497 drivers/usb/dwc2/gadget.c 				       struct usb_request *req)
req              1499 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = our_req(req);
req              1513 drivers/usb/dwc2/gadget.c 					 struct usb_request *req)
req              1518 drivers/usb/dwc2/gadget.c 	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
req              1520 drivers/usb/dwc2/gadget.c 	dwc2_hsotg_ep_free_request(ep, req);
req              1593 drivers/usb/dwc2/gadget.c 	struct usb_request *req;
req              1598 drivers/usb/dwc2/gadget.c 	req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
req              1599 drivers/usb/dwc2/gadget.c 	hsotg->ep0_reply = req;
req              1600 drivers/usb/dwc2/gadget.c 	if (!req) {
req              1605 drivers/usb/dwc2/gadget.c 	req->buf = hsotg->ep0_buff;
req              1606 drivers/usb/dwc2/gadget.c 	req->length = length;
req              1611 drivers/usb/dwc2/gadget.c 	req->zero = 0;
req              1612 drivers/usb/dwc2/gadget.c 	req->complete = dwc2_hsotg_complete_oursetup;
req              1615 drivers/usb/dwc2/gadget.c 		memcpy(req->buf, buff, length);
req              1617 drivers/usb/dwc2/gadget.c 	ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
req              1820 drivers/usb/dwc2/gadget.c 				if (ep->req) {
req              1821 drivers/usb/dwc2/gadget.c 					hs_req = ep->req;
req              1822 drivers/usb/dwc2/gadget.c 					ep->req = NULL;
req              1824 drivers/usb/dwc2/gadget.c 					if (hs_req->req.complete) {
req              1827 drivers/usb/dwc2/gadget.c 							&ep->ep, &hs_req->req);
req              1833 drivers/usb/dwc2/gadget.c 				if (!ep->req)
req              1977 drivers/usb/dwc2/gadget.c 				      struct usb_request *req)
req              1982 drivers/usb/dwc2/gadget.c 	if (req->status < 0) {
req              1983 drivers/usb/dwc2/gadget.c 		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
req              1988 drivers/usb/dwc2/gadget.c 	if (req->actual == 0)
req              1991 drivers/usb/dwc2/gadget.c 		dwc2_hsotg_process_control(hsotg, req->buf);
req              2004 drivers/usb/dwc2/gadget.c 	struct usb_request *req = hsotg->ctrl_req;
req              2005 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = our_req(req);
req              2010 drivers/usb/dwc2/gadget.c 	req->zero = 0;
req              2011 drivers/usb/dwc2/gadget.c 	req->length = 8;
req              2012 drivers/usb/dwc2/gadget.c 	req->buf = hsotg->ctrl_buff;
req              2013 drivers/usb/dwc2/gadget.c 	req->complete = dwc2_hsotg_complete_setup;
req              2024 drivers/usb/dwc2/gadget.c 	ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
req              2093 drivers/usb/dwc2/gadget.c 		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
req              2100 drivers/usb/dwc2/gadget.c 	if (hs_req->req.status == -EINPROGRESS)
req              2101 drivers/usb/dwc2/gadget.c 		hs_req->req.status = result;
req              2108 drivers/usb/dwc2/gadget.c 	hs_ep->req = NULL;
req              2116 drivers/usb/dwc2/gadget.c 	if (hs_req->req.complete) {
req              2118 drivers/usb/dwc2/gadget.c 		usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
req              2132 drivers/usb/dwc2/gadget.c 	if (!hs_ep->req && result >= 0)
req              2164 drivers/usb/dwc2/gadget.c 		ureq = &hs_req->req;
req              2230 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
req              2251 drivers/usb/dwc2/gadget.c 	read_ptr = hs_req->req.actual;
req              2252 drivers/usb/dwc2/gadget.c 	max_req = hs_req->req.length - read_ptr;
req              2255 drivers/usb/dwc2/gadget.c 		__func__, to_read, max_req, read_ptr, hs_req->req.length);
req              2268 drivers/usb/dwc2/gadget.c 	hs_req->req.actual += to_read;
req              2276 drivers/usb/dwc2/gadget.c 		       hs_req->req.buf + read_ptr, to_read);
req              2357 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
req              2358 drivers/usb/dwc2/gadget.c 	struct usb_request *req = &hs_req->req;
req              2392 drivers/usb/dwc2/gadget.c 		req->actual = size_done;
req              2396 drivers/usb/dwc2/gadget.c 	if (req->actual < req->length && size_left == 0) {
req              2401 drivers/usb/dwc2/gadget.c 	if (req->actual < req->length && req->short_not_ok) {
req              2403 drivers/usb/dwc2/gadget.c 			__func__, req->actual, req->length);
req              2432 drivers/usb/dwc2/gadget.c 		req->frame_number = hsotg->frame_number;
req              2627 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
req              2640 drivers/usb/dwc2/gadget.c 	if (hs_req->req.actual < hs_req->req.length) {
req              2660 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = hs_ep->req;
req              2716 drivers/usb/dwc2/gadget.c 	if (hs_req->req.actual != size_done)
req              2718 drivers/usb/dwc2/gadget.c 			__func__, hs_req->req.actual, size_done);
req              2720 drivers/usb/dwc2/gadget.c 	hs_req->req.actual = size_done;
req              2722 drivers/usb/dwc2/gadget.c 		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
req              2724 drivers/usb/dwc2/gadget.c 	if (!size_left && hs_req->req.actual < hs_req->req.length) {
req              3035 drivers/usb/dwc2/gadget.c 			if (idx == 0 && !hs_ep->req)
req              3235 drivers/usb/dwc2/gadget.c 	ep->req = NULL;
req              3238 drivers/usb/dwc2/gadget.c 		struct dwc2_hsotg_req *req = get_ep_head(ep);
req              3240 drivers/usb/dwc2/gadget.c 		dwc2_hsotg_complete_request(hsotg, ep, req, result);
req              4217 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *req, *treq;
req              4219 drivers/usb/dwc2/gadget.c 	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
req              4220 drivers/usb/dwc2/gadget.c 		if (req == test)
req              4232 drivers/usb/dwc2/gadget.c static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
req              4234 drivers/usb/dwc2/gadget.c 	struct dwc2_hsotg_req *hs_req = our_req(req);
req              4239 drivers/usb/dwc2/gadget.c 	dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
req              4249 drivers/usb/dwc2/gadget.c 	if (req == &hs_ep->req->req)
req                32 drivers/usb/dwc3/ep0.c 		struct dwc3_ep *dep, struct dwc3_request *req);
req                88 drivers/usb/dwc3/ep0.c 		struct dwc3_request *req)
req                92 drivers/usb/dwc3/ep0.c 	req->request.actual	= 0;
req                93 drivers/usb/dwc3/ep0.c 	req->request.status	= -EINPROGRESS;
req                94 drivers/usb/dwc3/ep0.c 	req->epnum		= dep->number;
req                96 drivers/usb/dwc3/ep0.c 	list_add_tail(&req->list, &dep->pending_list);
req               117 drivers/usb/dwc3/ep0.c 		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
req               180 drivers/usb/dwc3/ep0.c 		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
req               191 drivers/usb/dwc3/ep0.c 	struct dwc3_request		*req = to_dwc3_request(request);
req               213 drivers/usb/dwc3/ep0.c 	ret = __dwc3_gadget_ep0_queue(dep, req);
req               236 drivers/usb/dwc3/ep0.c 		struct dwc3_request	*req;
req               238 drivers/usb/dwc3/ep0.c 		req = next_request(&dep->pending_list);
req               239 drivers/usb/dwc3/ep0.c 		dwc3_gadget_giveback(dep, req, -ECONNRESET);
req               301 drivers/usb/dwc3/ep0.c static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
req               653 drivers/usb/dwc3/ep0.c static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
req               670 drivers/usb/dwc3/ep0.c 	memcpy(&timing, req->buf, sizeof(timing));
req               943 drivers/usb/dwc3/ep0.c 		struct dwc3_ep *dep, struct dwc3_request *req)
req               947 drivers/usb/dwc3/ep0.c 	req->direction = !!dep->number;
req               949 drivers/usb/dwc3/ep0.c 	if (req->request.length == 0) {
req               953 drivers/usb/dwc3/ep0.c 	} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
req               959 drivers/usb/dwc3/ep0.c 				&req->request, dep->number);
req               964 drivers/usb/dwc3/ep0.c 		rem = req->request.length % maxpacket;
req               968 drivers/usb/dwc3/ep0.c 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req               969 drivers/usb/dwc3/ep0.c 					 req->request.length,
req               973 drivers/usb/dwc3/ep0.c 		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
req               981 drivers/usb/dwc3/ep0.c 	} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
req               982 drivers/usb/dwc3/ep0.c 		   req->request.length && req->request.zero) {
req               985 drivers/usb/dwc3/ep0.c 				&req->request, dep->number);
req               990 drivers/usb/dwc3/ep0.c 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req               991 drivers/usb/dwc3/ep0.c 					 req->request.length,
req               995 drivers/usb/dwc3/ep0.c 		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
req              1004 drivers/usb/dwc3/ep0.c 				&req->request, dep->number);
req              1008 drivers/usb/dwc3/ep0.c 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req              1009 drivers/usb/dwc3/ep0.c 				req->request.length, DWC3_TRBCTL_CONTROL_DATA,
req              1012 drivers/usb/dwc3/ep0.c 		req->trb = &dwc->ep0_trb[dep->trb_enqueue];
req               173 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req, int status)
req               177 drivers/usb/dwc3/gadget.c 	list_del(&req->list);
req               178 drivers/usb/dwc3/gadget.c 	req->remaining = 0;
req               179 drivers/usb/dwc3/gadget.c 	req->needs_extra_trb = false;
req               181 drivers/usb/dwc3/gadget.c 	if (req->request.status == -EINPROGRESS)
req               182 drivers/usb/dwc3/gadget.c 		req->request.status = status;
req               184 drivers/usb/dwc3/gadget.c 	if (req->trb)
req               186 drivers/usb/dwc3/gadget.c 				&req->request, req->direction);
req               188 drivers/usb/dwc3/gadget.c 	req->trb = NULL;
req               189 drivers/usb/dwc3/gadget.c 	trace_dwc3_gadget_giveback(req);
req               205 drivers/usb/dwc3/gadget.c void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
req               210 drivers/usb/dwc3/gadget.c 	dwc3_gadget_del_and_unmap_request(dep, req, status);
req               211 drivers/usb/dwc3/gadget.c 	req->status = DWC3_REQUEST_STATUS_COMPLETED;
req               214 drivers/usb/dwc3/gadget.c 	usb_gadget_giveback_request(&dep->endpoint, &req->request);
req               694 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req;
req               700 drivers/usb/dwc3/gadget.c 		req = next_request(&dep->started_list);
req               702 drivers/usb/dwc3/gadget.c 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
req               706 drivers/usb/dwc3/gadget.c 		req = next_request(&dep->pending_list);
req               708 drivers/usb/dwc3/gadget.c 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
req               712 drivers/usb/dwc3/gadget.c 		req = next_request(&dep->cancelled_list);
req               714 drivers/usb/dwc3/gadget.c 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
req               836 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req;
req               839 drivers/usb/dwc3/gadget.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               840 drivers/usb/dwc3/gadget.c 	if (!req)
req               843 drivers/usb/dwc3/gadget.c 	req->direction	= dep->direction;
req               844 drivers/usb/dwc3/gadget.c 	req->epnum	= dep->number;
req               845 drivers/usb/dwc3/gadget.c 	req->dep	= dep;
req               846 drivers/usb/dwc3/gadget.c 	req->status	= DWC3_REQUEST_STATUS_UNKNOWN;
req               848 drivers/usb/dwc3/gadget.c 	trace_dwc3_alloc_request(req);
req               850 drivers/usb/dwc3/gadget.c 	return &req->request;
req               856 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req = to_dwc3_request(request);
req               858 drivers/usb/dwc3/gadget.c 	trace_dwc3_free_request(req);
req               859 drivers/usb/dwc3/gadget.c 	kfree(req);
req              1024 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req, unsigned chain, unsigned node)
req              1029 drivers/usb/dwc3/gadget.c 	unsigned		stream_id = req->request.stream_id;
req              1030 drivers/usb/dwc3/gadget.c 	unsigned		short_not_ok = req->request.short_not_ok;
req              1031 drivers/usb/dwc3/gadget.c 	unsigned		no_interrupt = req->request.no_interrupt;
req              1033 drivers/usb/dwc3/gadget.c 	if (req->request.num_sgs > 0) {
req              1034 drivers/usb/dwc3/gadget.c 		length = sg_dma_len(req->start_sg);
req              1035 drivers/usb/dwc3/gadget.c 		dma = sg_dma_address(req->start_sg);
req              1037 drivers/usb/dwc3/gadget.c 		length = req->request.length;
req              1038 drivers/usb/dwc3/gadget.c 		dma = req->request.dma;
req              1043 drivers/usb/dwc3/gadget.c 	if (!req->trb) {
req              1044 drivers/usb/dwc3/gadget.c 		dwc3_gadget_move_started_request(req);
req              1045 drivers/usb/dwc3/gadget.c 		req->trb = trb;
req              1046 drivers/usb/dwc3/gadget.c 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
req              1049 drivers/usb/dwc3/gadget.c 	req->num_trbs++;
req              1056 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req)
req              1058 drivers/usb/dwc3/gadget.c 	struct scatterlist *sg = req->start_sg;
req              1062 drivers/usb/dwc3/gadget.c 	unsigned int remaining = req->request.num_mapped_sgs
req              1063 drivers/usb/dwc3/gadget.c 		- req->num_queued_sgs;
req              1066 drivers/usb/dwc3/gadget.c 		unsigned int length = req->request.length;
req              1085 drivers/usb/dwc3/gadget.c 			req->needs_extra_trb = true;
req              1088 drivers/usb/dwc3/gadget.c 			dwc3_prepare_one_trb(dep, req, true, i);
req              1092 drivers/usb/dwc3/gadget.c 			req->num_trbs++;
req              1095 drivers/usb/dwc3/gadget.c 					req->request.stream_id,
req              1096 drivers/usb/dwc3/gadget.c 					req->request.short_not_ok,
req              1097 drivers/usb/dwc3/gadget.c 					req->request.no_interrupt);
req              1099 drivers/usb/dwc3/gadget.c 			dwc3_prepare_one_trb(dep, req, chain, i);
req              1110 drivers/usb/dwc3/gadget.c 			req->start_sg = sg_next(s);
req              1112 drivers/usb/dwc3/gadget.c 		req->num_queued_sgs++;
req              1120 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req)
req              1122 drivers/usb/dwc3/gadget.c 	unsigned int length = req->request.length;
req              1130 drivers/usb/dwc3/gadget.c 		req->needs_extra_trb = true;
req              1133 drivers/usb/dwc3/gadget.c 		dwc3_prepare_one_trb(dep, req, true, 0);
req              1137 drivers/usb/dwc3/gadget.c 		req->num_trbs++;
req              1139 drivers/usb/dwc3/gadget.c 				false, 1, req->request.stream_id,
req              1140 drivers/usb/dwc3/gadget.c 				req->request.short_not_ok,
req              1141 drivers/usb/dwc3/gadget.c 				req->request.no_interrupt);
req              1142 drivers/usb/dwc3/gadget.c 	} else if (req->request.zero && req->request.length &&
req              1143 drivers/usb/dwc3/gadget.c 		   (IS_ALIGNED(req->request.length, maxp))) {
req              1147 drivers/usb/dwc3/gadget.c 		req->needs_extra_trb = true;
req              1150 drivers/usb/dwc3/gadget.c 		dwc3_prepare_one_trb(dep, req, true, 0);
req              1154 drivers/usb/dwc3/gadget.c 		req->num_trbs++;
req              1156 drivers/usb/dwc3/gadget.c 				false, 1, req->request.stream_id,
req              1157 drivers/usb/dwc3/gadget.c 				req->request.short_not_ok,
req              1158 drivers/usb/dwc3/gadget.c 				req->request.no_interrupt);
req              1160 drivers/usb/dwc3/gadget.c 		dwc3_prepare_one_trb(dep, req, false, 0);
req              1174 drivers/usb/dwc3/gadget.c 	struct dwc3_request	*req, *n;
req              1188 drivers/usb/dwc3/gadget.c 	list_for_each_entry(req, &dep->started_list, list) {
req              1189 drivers/usb/dwc3/gadget.c 		if (req->num_pending_sgs > 0)
req              1190 drivers/usb/dwc3/gadget.c 			dwc3_prepare_one_trb_sg(dep, req);
req              1196 drivers/usb/dwc3/gadget.c 	list_for_each_entry_safe(req, n, &dep->pending_list, list) {
req              1200 drivers/usb/dwc3/gadget.c 		ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
req              1205 drivers/usb/dwc3/gadget.c 		req->sg			= req->request.sg;
req              1206 drivers/usb/dwc3/gadget.c 		req->start_sg		= req->sg;
req              1207 drivers/usb/dwc3/gadget.c 		req->num_queued_sgs	= 0;
req              1208 drivers/usb/dwc3/gadget.c 		req->num_pending_sgs	= req->request.num_mapped_sgs;
req              1210 drivers/usb/dwc3/gadget.c 		if (req->num_pending_sgs > 0)
req              1211 drivers/usb/dwc3/gadget.c 			dwc3_prepare_one_trb_sg(dep, req);
req              1213 drivers/usb/dwc3/gadget.c 			dwc3_prepare_one_trb_linear(dep, req);
req              1223 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req;
req              1234 drivers/usb/dwc3/gadget.c 	req = next_request(&dep->started_list);
req              1235 drivers/usb/dwc3/gadget.c 	if (!req) {
req              1243 drivers/usb/dwc3/gadget.c 		params.param0 = upper_32_bits(req->trb_dma);
req              1244 drivers/usb/dwc3/gadget.c 		params.param1 = lower_32_bits(req->trb_dma);
req              1248 drivers/usb/dwc3/gadget.c 			cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
req              1264 drivers/usb/dwc3/gadget.c 		if (req->trb)
req              1265 drivers/usb/dwc3/gadget.c 			memset(req->trb, 0, sizeof(struct dwc3_trb));
req              1266 drivers/usb/dwc3/gadget.c 		dwc3_gadget_del_and_unmap_request(dep, req, ret);
req              1428 drivers/usb/dwc3/gadget.c static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
req              1438 drivers/usb/dwc3/gadget.c 	if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
req              1439 drivers/usb/dwc3/gadget.c 				&req->request, req->dep->name))
req              1442 drivers/usb/dwc3/gadget.c 	if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
req              1444 drivers/usb/dwc3/gadget.c 				dep->name, &req->request))
req              1449 drivers/usb/dwc3/gadget.c 	req->request.actual	= 0;
req              1450 drivers/usb/dwc3/gadget.c 	req->request.status	= -EINPROGRESS;
req              1452 drivers/usb/dwc3/gadget.c 	trace_dwc3_ep_queue(req);
req              1454 drivers/usb/dwc3/gadget.c 	list_add_tail(&req->list, &dep->pending_list);
req              1455 drivers/usb/dwc3/gadget.c 	req->status = DWC3_REQUEST_STATUS_QUEUED;
req              1489 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req = to_dwc3_request(request);
req              1498 drivers/usb/dwc3/gadget.c 	ret = __dwc3_gadget_ep_queue(dep, req);
req              1504 drivers/usb/dwc3/gadget.c static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
req              1518 drivers/usb/dwc3/gadget.c 	for (i = 0; i < req->num_trbs; i++) {
req              1526 drivers/usb/dwc3/gadget.c 	req->num_trbs = 0;
req              1531 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req;
req              1534 drivers/usb/dwc3/gadget.c 	list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
req              1535 drivers/usb/dwc3/gadget.c 		dwc3_gadget_ep_skip_trbs(dep, req);
req              1536 drivers/usb/dwc3/gadget.c 		dwc3_gadget_giveback(dep, req, -ECONNRESET);
req              1543 drivers/usb/dwc3/gadget.c 	struct dwc3_request		*req = to_dwc3_request(request);
req              1552 drivers/usb/dwc3/gadget.c 	trace_dwc3_ep_dequeue(req);
req              1557 drivers/usb/dwc3/gadget.c 		if (r == req)
req              1561 drivers/usb/dwc3/gadget.c 	if (r != req) {
req              1563 drivers/usb/dwc3/gadget.c 			if (r == req)
req              1566 drivers/usb/dwc3/gadget.c 		if (r == req) {
req              1573 drivers/usb/dwc3/gadget.c 			dwc3_gadget_move_cancelled_request(req);
req              1586 drivers/usb/dwc3/gadget.c 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
req              2406 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req, struct dwc3_trb *trb,
req              2414 drivers/usb/dwc3/gadget.c 	req->num_trbs--;
req              2439 drivers/usb/dwc3/gadget.c 		req->request.frame_number = frame_number;
req              2448 drivers/usb/dwc3/gadget.c 	if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
req              2454 drivers/usb/dwc3/gadget.c 	req->remaining += count;
req              2470 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req, const struct dwc3_event_depevt *event,
req              2474 drivers/usb/dwc3/gadget.c 	struct scatterlist *sg = req->sg;
req              2476 drivers/usb/dwc3/gadget.c 	unsigned int pending = req->num_pending_sgs;
req              2483 drivers/usb/dwc3/gadget.c 		req->sg = sg_next(s);
req              2484 drivers/usb/dwc3/gadget.c 		req->num_pending_sgs--;
req              2486 drivers/usb/dwc3/gadget.c 		ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
req              2496 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req, const struct dwc3_event_depevt *event,
req              2501 drivers/usb/dwc3/gadget.c 	return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
req              2505 drivers/usb/dwc3/gadget.c static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
req              2507 drivers/usb/dwc3/gadget.c 	return req->num_pending_sgs == 0;
req              2512 drivers/usb/dwc3/gadget.c 		struct dwc3_request *req, int status)
req              2516 drivers/usb/dwc3/gadget.c 	if (req->num_pending_sgs)
req              2517 drivers/usb/dwc3/gadget.c 		ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
req              2520 drivers/usb/dwc3/gadget.c 		ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
req              2523 drivers/usb/dwc3/gadget.c 	if (req->needs_extra_trb) {
req              2524 drivers/usb/dwc3/gadget.c 		ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
req              2526 drivers/usb/dwc3/gadget.c 		req->needs_extra_trb = false;
req              2529 drivers/usb/dwc3/gadget.c 	req->request.actual = req->request.length - req->remaining;
req              2531 drivers/usb/dwc3/gadget.c 	if (!dwc3_gadget_ep_request_completed(req)) {
req              2536 drivers/usb/dwc3/gadget.c 	dwc3_gadget_giveback(dep, req, status);
req              2545 drivers/usb/dwc3/gadget.c 	struct dwc3_request	*req;
req              2548 drivers/usb/dwc3/gadget.c 	list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
req              2552 drivers/usb/dwc3/gadget.c 				req, status);
req                80 drivers/usb/dwc3/gadget.h static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
req                82 drivers/usb/dwc3/gadget.h 	struct dwc3_ep		*dep = req->dep;
req                84 drivers/usb/dwc3/gadget.h 	req->status = DWC3_REQUEST_STATUS_STARTED;
req                85 drivers/usb/dwc3/gadget.h 	list_move_tail(&req->list, &dep->started_list);
req                95 drivers/usb/dwc3/gadget.h static inline void dwc3_gadget_move_cancelled_request(struct dwc3_request *req)
req                97 drivers/usb/dwc3/gadget.h 	struct dwc3_ep		*dep = req->dep;
req                99 drivers/usb/dwc3/gadget.h 	req->status = DWC3_REQUEST_STATUS_CANCELLED;
req               100 drivers/usb/dwc3/gadget.h 	list_move_tail(&req->list, &dep->cancelled_list);
req               103 drivers/usb/dwc3/gadget.h void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
req               102 drivers/usb/dwc3/trace.h 	TP_PROTO(struct dwc3_request *req),
req               103 drivers/usb/dwc3/trace.h 	TP_ARGS(req),
req               105 drivers/usb/dwc3/trace.h 		__string(name, req->dep->name)
req               106 drivers/usb/dwc3/trace.h 		__field(struct dwc3_request *, req)
req               115 drivers/usb/dwc3/trace.h 		__assign_str(name, req->dep->name);
req               116 drivers/usb/dwc3/trace.h 		__entry->req = req;
req               117 drivers/usb/dwc3/trace.h 		__entry->actual = req->request.actual;
req               118 drivers/usb/dwc3/trace.h 		__entry->length = req->request.length;
req               119 drivers/usb/dwc3/trace.h 		__entry->status = req->request.status;
req               120 drivers/usb/dwc3/trace.h 		__entry->zero = req->request.zero;
req               121 drivers/usb/dwc3/trace.h 		__entry->short_not_ok = req->request.short_not_ok;
req               122 drivers/usb/dwc3/trace.h 		__entry->no_interrupt = req->request.no_interrupt;
req               125 drivers/usb/dwc3/trace.h 		__get_str(name), __entry->req, __entry->actual, __entry->length,
req               134 drivers/usb/dwc3/trace.h 	TP_PROTO(struct dwc3_request *req),
req               135 drivers/usb/dwc3/trace.h 	TP_ARGS(req)
req               139 drivers/usb/dwc3/trace.h 	TP_PROTO(struct dwc3_request *req),
req               140 drivers/usb/dwc3/trace.h 	TP_ARGS(req)
req               144 drivers/usb/dwc3/trace.h 	TP_PROTO(struct dwc3_request *req),
req               145 drivers/usb/dwc3/trace.h 	TP_ARGS(req)
req               149 drivers/usb/dwc3/trace.h 	TP_PROTO(struct dwc3_request *req),
req               150 drivers/usb/dwc3/trace.h 	TP_ARGS(req)
req               154 drivers/usb/dwc3/trace.h 	TP_PROTO(struct dwc3_request *req),
req               155 drivers/usb/dwc3/trace.h 	TP_ARGS(req)
req               333 drivers/usb/early/ehci-dbgp.c 	struct usb_ctrlrequest req;
req               342 drivers/usb/early/ehci-dbgp.c 	req.bRequestType = requesttype;
req               343 drivers/usb/early/ehci-dbgp.c 	req.bRequest = request;
req               344 drivers/usb/early/ehci-dbgp.c 	req.wValue = cpu_to_le16(value);
req               345 drivers/usb/early/ehci-dbgp.c 	req.wIndex = cpu_to_le16(index);
req               346 drivers/usb/early/ehci-dbgp.c 	req.wLength = cpu_to_le16(size);
req               352 drivers/usb/early/ehci-dbgp.c 	ctrl = dbgp_len_update(ctrl, sizeof(req));
req               357 drivers/usb/early/ehci-dbgp.c 	dbgp_set_data(&req, sizeof(req));
req               558 drivers/usb/gadget/composite.c 			return config_buf(c, speed, cdev->req->buf, type);
req               616 drivers/usb/gadget/composite.c 	struct usb_bos_descriptor	*bos = cdev->req->buf;
req               652 drivers/usb/gadget/composite.c 	usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
req               668 drivers/usb/gadget/composite.c 		ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
req               688 drivers/usb/gadget/composite.c 		ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
req               735 drivers/usb/gadget/composite.c 	struct usb_qualifier_descriptor	*qual = cdev->req->buf;
req              1379 drivers/usb/gadget/composite.c static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
req              1383 drivers/usb/gadget/composite.c 	if (req->status || req->actual != req->length)
req              1386 drivers/usb/gadget/composite.c 				req->status, req->actual, req->length);
req              1395 drivers/usb/gadget/composite.c 	if (!req->context)
req              1398 drivers/usb/gadget/composite.c 	cdev = req->context;
req              1400 drivers/usb/gadget/composite.c 	if (cdev->req == req)
req              1402 drivers/usb/gadget/composite.c 	else if (cdev->os_desc_req == req)
req              1405 drivers/usb/gadget/composite.c 		WARN(1, "unknown request %p\n", req);
req              1409 drivers/usb/gadget/composite.c 		struct usb_request *req, gfp_t gfp_flags)
req              1413 drivers/usb/gadget/composite.c 	ret = usb_ep_queue(cdev->gadget->ep0, req, gfp_flags);
req              1415 drivers/usb/gadget/composite.c 		if (cdev->req == req)
req              1417 drivers/usb/gadget/composite.c 		else if (cdev->os_desc_req == req)
req              1420 drivers/usb/gadget/composite.c 			WARN(1, "unknown request %p\n", req);
req              1587 drivers/usb/gadget/composite.c 	struct usb_request		*req = cdev->req;
req              1601 drivers/usb/gadget/composite.c 	req->zero = 0;
req              1602 drivers/usb/gadget/composite.c 	req->context = cdev;
req              1603 drivers/usb/gadget/composite.c 	req->complete = composite_setup_complete;
req              1604 drivers/usb/gadget/composite.c 	req->length = 0;
req              1642 drivers/usb/gadget/composite.c 			memcpy(req->buf, &cdev->desc, value);
req              1663 drivers/usb/gadget/composite.c 			value = get_string(cdev, req->buf,
req              1698 drivers/usb/gadget/composite.c 				memcpy(req->buf, config->descriptors[0], value);
req              1724 drivers/usb/gadget/composite.c 			*(u8 *)req->buf = cdev->config->bConfigurationValue;
req              1726 drivers/usb/gadget/composite.c 			*(u8 *)req->buf = 0;
req              1772 drivers/usb/gadget/composite.c 		*((u8 *)req->buf) = value;
req              1781 drivers/usb/gadget/composite.c 			*((u8 *)req->buf) = gadget->host_request_flag;
req              1798 drivers/usb/gadget/composite.c 		put_unaligned_le16(0, req->buf);
req              1807 drivers/usb/gadget/composite.c 		put_unaligned_le16(status & 0x0000ffff, req->buf);
req              1852 drivers/usb/gadget/composite.c 			req = cdev->os_desc_req;
req              1853 drivers/usb/gadget/composite.c 			req->context = cdev;
req              1854 drivers/usb/gadget/composite.c 			req->complete = composite_setup_complete;
req              1855 drivers/usb/gadget/composite.c 			buf = req->buf;
req              1975 drivers/usb/gadget/composite.c 		req->length = value;
req              1976 drivers/usb/gadget/composite.c 		req->context = cdev;
req              1977 drivers/usb/gadget/composite.c 		req->zero = value < w_length;
req              1978 drivers/usb/gadget/composite.c 		value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
req              1981 drivers/usb/gadget/composite.c 			req->status = 0;
req              1982 drivers/usb/gadget/composite.c 			composite_setup_complete(gadget->ep0, req);
req              2106 drivers/usb/gadget/composite.c 	cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
req              2107 drivers/usb/gadget/composite.c 	if (!cdev->req)
req              2110 drivers/usb/gadget/composite.c 	cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
req              2111 drivers/usb/gadget/composite.c 	if (!cdev->req->buf)
req              2118 drivers/usb/gadget/composite.c 	cdev->req->complete = composite_setup_complete;
req              2119 drivers/usb/gadget/composite.c 	cdev->req->context = cdev;
req              2139 drivers/usb/gadget/composite.c 	kfree(cdev->req->buf);
req              2141 drivers/usb/gadget/composite.c 	usb_ep_free_request(gadget->ep0, cdev->req);
req              2142 drivers/usb/gadget/composite.c 	cdev->req = NULL;
req              2188 drivers/usb/gadget/composite.c 	if (cdev->req) {
req              2190 drivers/usb/gadget/composite.c 			usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
req              2192 drivers/usb/gadget/composite.c 		kfree(cdev->req->buf);
req              2193 drivers/usb/gadget/composite.c 		cdev->req->buf = NULL;
req              2194 drivers/usb/gadget/composite.c 		usb_ep_free_request(cdev->gadget->ep0, cdev->req);
req              2195 drivers/usb/gadget/composite.c 		cdev->req = NULL;
req              2406 drivers/usb/gadget/composite.c 	struct usb_request	*req = cdev->req;
req              2417 drivers/usb/gadget/composite.c 		req->length = 0;
req              2418 drivers/usb/gadget/composite.c 		req->context = cdev;
req              2419 drivers/usb/gadget/composite.c 		value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
req              2422 drivers/usb/gadget/composite.c 			req->status = 0;
req              2423 drivers/usb/gadget/composite.c 			composite_setup_complete(cdev->gadget->ep0, req);
req               306 drivers/usb/gadget/function/f_acm.c 		struct usb_request *req)
req               311 drivers/usb/gadget/function/f_acm.c 	if (req->status != 0) {
req               313 drivers/usb/gadget/function/f_acm.c 			acm->port_num, req->status);
req               318 drivers/usb/gadget/function/f_acm.c 	if (req->actual != sizeof(acm->port_line_coding)) {
req               320 drivers/usb/gadget/function/f_acm.c 			acm->port_num, req->actual);
req               323 drivers/usb/gadget/function/f_acm.c 		struct usb_cdc_line_coding	*value = req->buf;
req               340 drivers/usb/gadget/function/f_acm.c 	struct usb_request	*req = cdev->req;
req               365 drivers/usb/gadget/function/f_acm.c 		req->complete = acm_complete_set_line_coding;
req               376 drivers/usb/gadget/function/f_acm.c 		memcpy(req->buf, &acm->port_line_coding, value);
req               408 drivers/usb/gadget/function/f_acm.c 		req->zero = 0;
req               409 drivers/usb/gadget/function/f_acm.c 		req->length = value;
req               410 drivers/usb/gadget/function/f_acm.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               494 drivers/usb/gadget/function/f_acm.c 	struct usb_request		*req;
req               500 drivers/usb/gadget/function/f_acm.c 	req = acm->notify_req;
req               504 drivers/usb/gadget/function/f_acm.c 	req->length = len;
req               505 drivers/usb/gadget/function/f_acm.c 	notify = req->buf;
req               518 drivers/usb/gadget/function/f_acm.c 	status = usb_ep_queue(ep, req, GFP_ATOMIC);
req               525 drivers/usb/gadget/function/f_acm.c 		acm->notify_req = req;
req               552 drivers/usb/gadget/function/f_acm.c static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
req               554 drivers/usb/gadget/function/f_acm.c 	struct f_acm		*acm = req->context;
req               561 drivers/usb/gadget/function/f_acm.c 	if (req->status != -ESHUTDOWN)
req               563 drivers/usb/gadget/function/f_acm.c 	acm->notify_req = req;
req               377 drivers/usb/gadget/function/f_ecm.c 	struct usb_request		*req = ecm->notify_req;
req               387 drivers/usb/gadget/function/f_ecm.c 	event = req->buf;
req               399 drivers/usb/gadget/function/f_ecm.c 		req->length = sizeof *event;
req               410 drivers/usb/gadget/function/f_ecm.c 		req->length = ECM_STATUS_BYTECOUNT;
req               413 drivers/usb/gadget/function/f_ecm.c 		data = req->buf + sizeof *event;
req               425 drivers/usb/gadget/function/f_ecm.c 	status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
req               443 drivers/usb/gadget/function/f_ecm.c static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
req               445 drivers/usb/gadget/function/f_ecm.c 	struct f_ecm			*ecm = req->context;
req               447 drivers/usb/gadget/function/f_ecm.c 	struct usb_cdc_notification	*event = req->buf;
req               449 drivers/usb/gadget/function/f_ecm.c 	switch (req->status) {
req               461 drivers/usb/gadget/function/f_ecm.c 			event->bNotificationType, req->status);
req               472 drivers/usb/gadget/function/f_ecm.c 	struct usb_request	*req = cdev->req;
req               519 drivers/usb/gadget/function/f_ecm.c 		req->zero = 0;
req               520 drivers/usb/gadget/function/f_ecm.c 		req->length = value;
req               521 drivers/usb/gadget/function/f_ecm.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               323 drivers/usb/gadget/function/f_eem.c static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
req               325 drivers/usb/gadget/function/f_eem.c 	struct sk_buff *skb = (struct sk_buff *)req->context;
req               415 drivers/usb/gadget/function/f_eem.c 			struct usb_request	*req = cdev->req;
req               444 drivers/usb/gadget/function/f_eem.c 				skb_copy_bits(skb2, 0, req->buf, skb2->len);
req               445 drivers/usb/gadget/function/f_eem.c 				req->length = skb2->len;
req               446 drivers/usb/gadget/function/f_eem.c 				req->complete = eem_cmd_complete;
req               447 drivers/usb/gadget/function/f_eem.c 				req->zero = 1;
req               448 drivers/usb/gadget/function/f_eem.c 				req->context = skb2;
req               449 drivers/usb/gadget/function/f_eem.c 				if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
req               119 drivers/usb/gadget/function/f_fs.c 	struct usb_request		*req;	/* P: epfile->mutex */
req               225 drivers/usb/gadget/function/f_fs.c 	struct usb_request *req;
req               268 drivers/usb/gadget/function/f_fs.c static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
req               270 drivers/usb/gadget/function/f_fs.c 	struct ffs_data *ffs = req->context;
req               278 drivers/usb/gadget/function/f_fs.c 	struct usb_request *req = ffs->ep0req;
req               281 drivers/usb/gadget/function/f_fs.c 	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
req               285 drivers/usb/gadget/function/f_fs.c 	req->buf      = data;
req               286 drivers/usb/gadget/function/f_fs.c 	req->length   = len;
req               293 drivers/usb/gadget/function/f_fs.c 	if (req->buf == NULL)
req               294 drivers/usb/gadget/function/f_fs.c 		req->buf = (void *)0xDEADBABE;
req               298 drivers/usb/gadget/function/f_fs.c 	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
req               304 drivers/usb/gadget/function/f_fs.c 		usb_ep_dequeue(ffs->gadget->ep0, req);
req               309 drivers/usb/gadget/function/f_fs.c 	return req->status ? req->status : req->actual;
req               706 drivers/usb/gadget/function/f_fs.c static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
req               709 drivers/usb/gadget/function/f_fs.c 	if (likely(req->context)) {
req               711 drivers/usb/gadget/function/f_fs.c 		ep->status = req->status ? req->status : req->actual;
req               712 drivers/usb/gadget/function/f_fs.c 		complete(req->context);
req               822 drivers/usb/gadget/function/f_fs.c 	int ret = io_data->req->status ? io_data->req->status :
req               823 drivers/usb/gadget/function/f_fs.c 					 io_data->req->actual;
req               841 drivers/usb/gadget/function/f_fs.c 	usb_ep_free_request(io_data->ep, io_data->req);
req               850 drivers/usb/gadget/function/f_fs.c 					 struct usb_request *req)
req               852 drivers/usb/gadget/function/f_fs.c 	struct ffs_io_data *io_data = req->context;
req               946 drivers/usb/gadget/function/f_fs.c 	struct usb_request *req;
req              1058 drivers/usb/gadget/function/f_fs.c 		req = ep->req;
req              1060 drivers/usb/gadget/function/f_fs.c 			req->buf = NULL;
req              1061 drivers/usb/gadget/function/f_fs.c 			req->sg	= io_data->sgt.sgl;
req              1062 drivers/usb/gadget/function/f_fs.c 			req->num_sgs = io_data->sgt.nents;
req              1064 drivers/usb/gadget/function/f_fs.c 			req->buf = data;
req              1065 drivers/usb/gadget/function/f_fs.c 			req->num_sgs = 0;
req              1067 drivers/usb/gadget/function/f_fs.c 		req->length = data_len;
req              1071 drivers/usb/gadget/function/f_fs.c 		req->context  = &done;
req              1072 drivers/usb/gadget/function/f_fs.c 		req->complete = ffs_epfile_io_complete;
req              1074 drivers/usb/gadget/function/f_fs.c 		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
req              1087 drivers/usb/gadget/function/f_fs.c 			usb_ep_dequeue(ep->ep, req);
req              1100 drivers/usb/gadget/function/f_fs.c 	} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
req              1104 drivers/usb/gadget/function/f_fs.c 			req->buf = NULL;
req              1105 drivers/usb/gadget/function/f_fs.c 			req->sg	= io_data->sgt.sgl;
req              1106 drivers/usb/gadget/function/f_fs.c 			req->num_sgs = io_data->sgt.nents;
req              1108 drivers/usb/gadget/function/f_fs.c 			req->buf = data;
req              1109 drivers/usb/gadget/function/f_fs.c 			req->num_sgs = 0;
req              1111 drivers/usb/gadget/function/f_fs.c 		req->length = data_len;
req              1115 drivers/usb/gadget/function/f_fs.c 		io_data->req = req;
req              1118 drivers/usb/gadget/function/f_fs.c 		req->context  = io_data;
req              1119 drivers/usb/gadget/function/f_fs.c 		req->complete = ffs_epfile_async_io_complete;
req              1121 drivers/usb/gadget/function/f_fs.c 		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
req              1123 drivers/usb/gadget/function/f_fs.c 			io_data->req = NULL;
req              1124 drivers/usb/gadget/function/f_fs.c 			usb_ep_free_request(ep->ep, req);
req              1173 drivers/usb/gadget/function/f_fs.c 	if (likely(io_data && io_data->ep && io_data->req))
req              1174 drivers/usb/gadget/function/f_fs.c 		value = usb_ep_dequeue(io_data->ep, io_data->req);
req              2863 drivers/usb/gadget/function/f_fs.c 		struct usb_request *req;
req              2884 drivers/usb/gadget/function/f_fs.c 		req = usb_ep_alloc_request(ep, GFP_KERNEL);
req              2885 drivers/usb/gadget/function/f_fs.c 		if (unlikely(!req))
req              2889 drivers/usb/gadget/function/f_fs.c 		ffs_ep->req = req;
req              3589 drivers/usb/gadget/function/f_fs.c 		if (ep->ep && ep->req)
req              3590 drivers/usb/gadget/function/f_fs.c 			usb_ep_free_request(ep->ep, ep->req);
req              3591 drivers/usb/gadget/function/f_fs.c 		ep->req = NULL;
req                34 drivers/usb/gadget/function/f_hid.c 	struct usb_request	*req;
req                58 drivers/usb/gadget/function/f_hid.c 	struct usb_request		*req;
req               248 drivers/usb/gadget/function/f_hid.c 	struct usb_request *req;
req               284 drivers/usb/gadget/function/f_hid.c 	req = list->req;
req               285 drivers/usb/gadget/function/f_hid.c 	count = min_t(unsigned int, count, req->actual - list->pos);
req               289 drivers/usb/gadget/function/f_hid.c 	count -= copy_to_user(buffer, req->buf + list->pos, count);
req               298 drivers/usb/gadget/function/f_hid.c 	if (list->pos == req->actual) {
req               301 drivers/usb/gadget/function/f_hid.c 		req->length = hidg->report_length;
req               302 drivers/usb/gadget/function/f_hid.c 		ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
req               304 drivers/usb/gadget/function/f_hid.c 			free_ep_req(hidg->out_ep, req);
req               318 drivers/usb/gadget/function/f_hid.c static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
req               323 drivers/usb/gadget/function/f_hid.c 	if (req->status != 0) {
req               325 drivers/usb/gadget/function/f_hid.c 			"End Point Request ERROR: %d\n", req->status);
req               338 drivers/usb/gadget/function/f_hid.c 	struct usb_request *req;
req               363 drivers/usb/gadget/function/f_hid.c 	req = hidg->req;
req               367 drivers/usb/gadget/function/f_hid.c 	status = copy_from_user(req->buf, buffer, count);
req               379 drivers/usb/gadget/function/f_hid.c 	if (!hidg->req) {
req               380 drivers/usb/gadget/function/f_hid.c 		free_ep_req(hidg->in_ep, req);
req               388 drivers/usb/gadget/function/f_hid.c 	req->status   = 0;
req               389 drivers/usb/gadget/function/f_hid.c 	req->zero     = 0;
req               390 drivers/usb/gadget/function/f_hid.c 	req->length   = count;
req               391 drivers/usb/gadget/function/f_hid.c 	req->complete = f_hidg_req_complete;
req               392 drivers/usb/gadget/function/f_hid.c 	req->context  = hidg;
req               396 drivers/usb/gadget/function/f_hid.c 	status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
req               461 drivers/usb/gadget/function/f_hid.c static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
req               463 drivers/usb/gadget/function/f_hid.c 	struct f_hidg *hidg = (struct f_hidg *) req->context;
req               468 drivers/usb/gadget/function/f_hid.c 	switch (req->status) {
req               476 drivers/usb/gadget/function/f_hid.c 		req_list->req = req;
req               485 drivers/usb/gadget/function/f_hid.c 		ERROR(cdev, "Set report failed %d\n", req->status);
req               491 drivers/usb/gadget/function/f_hid.c 		free_ep_req(ep, req);
req               501 drivers/usb/gadget/function/f_hid.c 	struct usb_request		*req  = cdev->req;
req               519 drivers/usb/gadget/function/f_hid.c 		memset(req->buf, 0x0, length);
req               528 drivers/usb/gadget/function/f_hid.c 		((u8 *) req->buf)[0] = hidg->protocol;
req               569 drivers/usb/gadget/function/f_hid.c 			memcpy(req->buf, &hidg_desc_copy, length);
req               577 drivers/usb/gadget/function/f_hid.c 			memcpy(req->buf, hidg->report_desc, length);
req               600 drivers/usb/gadget/function/f_hid.c 	req->zero = 0;
req               601 drivers/usb/gadget/function/f_hid.c 	req->length = length;
req               602 drivers/usb/gadget/function/f_hid.c 	status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               619 drivers/usb/gadget/function/f_hid.c 		free_ep_req(hidg->out_ep, list->req);
req               627 drivers/usb/gadget/function/f_hid.c 		free_ep_req(hidg->in_ep, hidg->req);
req               631 drivers/usb/gadget/function/f_hid.c 	hidg->req = NULL;
req               691 drivers/usb/gadget/function/f_hid.c 			struct usb_request *req =
req               694 drivers/usb/gadget/function/f_hid.c 			if (req) {
req               695 drivers/usb/gadget/function/f_hid.c 				req->complete = hidg_set_report_complete;
req               696 drivers/usb/gadget/function/f_hid.c 				req->context  = hidg;
req               697 drivers/usb/gadget/function/f_hid.c 				status = usb_ep_queue(hidg->out_ep, req,
req               702 drivers/usb/gadget/function/f_hid.c 					free_ep_req(hidg->out_ep, req);
req               713 drivers/usb/gadget/function/f_hid.c 		hidg->req = req_in;
req               817 drivers/usb/gadget/function/f_hid.c 	hidg->req = NULL;
req               844 drivers/usb/gadget/function/f_hid.c 	if (hidg->req != NULL)
req               845 drivers/usb/gadget/function/f_hid.c 		free_ep_req(hidg->in_ep, hidg->req);
req               235 drivers/usb/gadget/function/f_loopback.c static void loopback_complete(struct usb_ep *ep, struct usb_request *req)
req               239 drivers/usb/gadget/function/f_loopback.c 	int			status = req->status;
req               248 drivers/usb/gadget/function/f_loopback.c 			struct usb_request *in_req = req->context;
req               250 drivers/usb/gadget/function/f_loopback.c 			in_req->zero = (req->actual < req->length);
req               251 drivers/usb/gadget/function/f_loopback.c 			in_req->length = req->actual;
req               253 drivers/usb/gadget/function/f_loopback.c 			req = in_req;
req               259 drivers/usb/gadget/function/f_loopback.c 			req = req->context;
req               264 drivers/usb/gadget/function/f_loopback.c 		status = usb_ep_queue(ep, req, GFP_ATOMIC);
req               276 drivers/usb/gadget/function/f_loopback.c 				status, req->actual, req->length);
req               290 drivers/usb/gadget/function/f_loopback.c 				    req->context);
req               291 drivers/usb/gadget/function/f_loopback.c 		free_ep_req(ep, req);
req               443 drivers/usb/gadget/function/f_mass_storage.c static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
req               446 drivers/usb/gadget/function/f_mass_storage.c 	struct fsg_buffhd	*bh = req->context;
req               448 drivers/usb/gadget/function/f_mass_storage.c 	if (req->status || req->actual != req->length)
req               450 drivers/usb/gadget/function/f_mass_storage.c 		    req->status, req->actual, req->length);
req               451 drivers/usb/gadget/function/f_mass_storage.c 	if (req->status == -ECONNRESET)		/* Request was cancelled */
req               459 drivers/usb/gadget/function/f_mass_storage.c static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
req               462 drivers/usb/gadget/function/f_mass_storage.c 	struct fsg_buffhd	*bh = req->context;
req               464 drivers/usb/gadget/function/f_mass_storage.c 	dump_msg(common, "bulk-out", req->buf, req->actual);
req               465 drivers/usb/gadget/function/f_mass_storage.c 	if (req->status || req->actual != bh->bulk_out_intended_length)
req               467 drivers/usb/gadget/function/f_mass_storage.c 		    req->status, req->actual, bh->bulk_out_intended_length);
req               468 drivers/usb/gadget/function/f_mass_storage.c 	if (req->status == -ECONNRESET)		/* Request was cancelled */
req               490 drivers/usb/gadget/function/f_mass_storage.c 	struct usb_request	*req = fsg->common->ep0req;
req               499 drivers/usb/gadget/function/f_mass_storage.c 	req->context = NULL;
req               500 drivers/usb/gadget/function/f_mass_storage.c 	req->length = 0;
req               529 drivers/usb/gadget/function/f_mass_storage.c 		*(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
req               532 drivers/usb/gadget/function/f_mass_storage.c 		req->length = min((u16)1, w_length);
req               550 drivers/usb/gadget/function/f_mass_storage.c 			   struct usb_request *req)
req               555 drivers/usb/gadget/function/f_mass_storage.c 		dump_msg(fsg, "bulk-in", req->buf, req->length);
req               557 drivers/usb/gadget/function/f_mass_storage.c 	rc = usb_ep_queue(ep, req, GFP_KERNEL);
req               561 drivers/usb/gadget/function/f_mass_storage.c 		req->status = rc;
req               568 drivers/usb/gadget/function/f_mass_storage.c 				!(rc == -EOPNOTSUPP && req->length == 0))
req              2078 drivers/usb/gadget/function/f_mass_storage.c 	struct usb_request	*req = bh->outreq;
req              2079 drivers/usb/gadget/function/f_mass_storage.c 	struct bulk_cb_wrap	*cbw = req->buf;
req              2083 drivers/usb/gadget/function/f_mass_storage.c 	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
req              2087 drivers/usb/gadget/function/f_mass_storage.c 	if (req->actual != US_BULK_CB_WRAP_LEN ||
req              2091 drivers/usb/gadget/function/f_mass_storage.c 				req->actual,
req              2686 drivers/usb/gadget/function/f_mass_storage.c 	common->ep0req = cdev->req;
req               256 drivers/usb/gadget/function/f_midi.c static void f_midi_handle_out_data(struct usb_ep *ep, struct usb_request *req)
req               259 drivers/usb/gadget/function/f_midi.c 	u8 *buf = req->buf;
req               261 drivers/usb/gadget/function/f_midi.c 	for (i = 0; i + 3 < req->actual; i += 4)
req               270 drivers/usb/gadget/function/f_midi.c f_midi_complete(struct usb_ep *ep, struct usb_request *req)
req               274 drivers/usb/gadget/function/f_midi.c 	int status = req->status;
req               280 drivers/usb/gadget/function/f_midi.c 			f_midi_handle_out_data(ep, req);
req               284 drivers/usb/gadget/function/f_midi.c 			req->length = 0;
req               295 drivers/usb/gadget/function/f_midi.c 				req->actual, req->length);
req               297 drivers/usb/gadget/function/f_midi.c 			f_midi_handle_out_data(ep, req);
req               300 drivers/usb/gadget/function/f_midi.c 			free_ep_req(ep, req);
req               309 drivers/usb/gadget/function/f_midi.c 				status, req->actual, req->length);
req               315 drivers/usb/gadget/function/f_midi.c 	status = usb_ep_queue(ep, req, GFP_ATOMIC);
req               318 drivers/usb/gadget/function/f_midi.c 				ep->name, req->length, status);
req               383 drivers/usb/gadget/function/f_midi.c 		struct usb_request *req =
req               386 drivers/usb/gadget/function/f_midi.c 		if (req == NULL)
req               389 drivers/usb/gadget/function/f_midi.c 		req->length = 0;
req               390 drivers/usb/gadget/function/f_midi.c 		req->complete = f_midi_complete;
req               392 drivers/usb/gadget/function/f_midi.c 		kfifo_put(&midi->in_req_fifo, req);
req               397 drivers/usb/gadget/function/f_midi.c 		struct usb_request *req =
req               400 drivers/usb/gadget/function/f_midi.c 		if (req == NULL)
req               403 drivers/usb/gadget/function/f_midi.c 		req->complete = f_midi_complete;
req               404 drivers/usb/gadget/function/f_midi.c 		err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
req               408 drivers/usb/gadget/function/f_midi.c 			if (req->buf != NULL)
req               409 drivers/usb/gadget/function/f_midi.c 				free_ep_req(midi->out_ep, req);
req               421 drivers/usb/gadget/function/f_midi.c 	struct usb_request *req = NULL;
req               433 drivers/usb/gadget/function/f_midi.c 	while (kfifo_get(&midi->in_req_fifo, &req))
req               434 drivers/usb/gadget/function/f_midi.c 		free_ep_req(midi->in_ep, req);
req               447 drivers/usb/gadget/function/f_midi.c static void f_midi_transmit_byte(struct usb_request *req,
req               591 drivers/usb/gadget/function/f_midi.c 		unsigned int length = req->length;
req               592 drivers/usb/gadget/function/f_midi.c 		u8 *buf = (u8 *)req->buf + length;
req               595 drivers/usb/gadget/function/f_midi.c 		req->length = length + sizeof(p);
req               608 drivers/usb/gadget/function/f_midi.c 	struct usb_request *req = NULL;
req               617 drivers/usb/gadget/function/f_midi.c 	len = kfifo_peek(&midi->in_req_fifo, &req);
req               629 drivers/usb/gadget/function/f_midi.c 	if (req->length > 0)
req               639 drivers/usb/gadget/function/f_midi.c 		while (req->length + 3 < midi->buflen) {
req               646 drivers/usb/gadget/function/f_midi.c 			f_midi_transmit_byte(req, port, b);
req               655 drivers/usb/gadget/function/f_midi.c 	if (req->length <= 0)
req               658 drivers/usb/gadget/function/f_midi.c 	err = usb_ep_queue(ep, req, GFP_ATOMIC);
req               662 drivers/usb/gadget/function/f_midi.c 		req->length = 0; /* Re-use request next time. */
req               666 drivers/usb/gadget/function/f_midi.c 		kfifo_put(&midi->in_req_fifo, req);
req               544 drivers/usb/gadget/function/f_ncm.c 	struct usb_request		*req = ncm->notify_req;
req               554 drivers/usb/gadget/function/f_ncm.c 	event = req->buf;
req               566 drivers/usb/gadget/function/f_ncm.c 		req->length = sizeof *event;
req               577 drivers/usb/gadget/function/f_ncm.c 		req->length = NCM_STATUS_BYTECOUNT;
req               580 drivers/usb/gadget/function/f_ncm.c 		data = req->buf + sizeof *event;
req               599 drivers/usb/gadget/function/f_ncm.c 	status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
req               626 drivers/usb/gadget/function/f_ncm.c static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
req               628 drivers/usb/gadget/function/f_ncm.c 	struct f_ncm			*ncm = req->context;
req               630 drivers/usb/gadget/function/f_ncm.c 	struct usb_cdc_notification	*event = req->buf;
req               633 drivers/usb/gadget/function/f_ncm.c 	switch (req->status) {
req               646 drivers/usb/gadget/function/f_ncm.c 			event->bNotificationType, req->status);
req               654 drivers/usb/gadget/function/f_ncm.c static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
req               658 drivers/usb/gadget/function/f_ncm.c 	struct usb_function	*f = req->context;
req               662 drivers/usb/gadget/function/f_ncm.c 	req->context = NULL;
req               663 drivers/usb/gadget/function/f_ncm.c 	if (req->status || req->actual != req->length) {
req               668 drivers/usb/gadget/function/f_ncm.c 	in_size = get_unaligned_le32(req->buf);
req               688 drivers/usb/gadget/function/f_ncm.c 	struct usb_request	*req = cdev->req;
req               733 drivers/usb/gadget/function/f_ncm.c 		memcpy(req->buf, &ntb_parameters, value);
req               742 drivers/usb/gadget/function/f_ncm.c 		put_unaligned_le32(ncm->port.fixed_in_len, req->buf);
req               753 drivers/usb/gadget/function/f_ncm.c 		req->complete = ncm_ep0out_complete;
req               754 drivers/usb/gadget/function/f_ncm.c 		req->length = w_length;
req               755 drivers/usb/gadget/function/f_ncm.c 		req->context = f;
req               757 drivers/usb/gadget/function/f_ncm.c 		value = req->length;
req               769 drivers/usb/gadget/function/f_ncm.c 		put_unaligned_le16(format, req->buf);
req               803 drivers/usb/gadget/function/f_ncm.c 		put_unaligned_le16(is_crc, req->buf);
req               850 drivers/usb/gadget/function/f_ncm.c 		req->zero = 0;
req               851 drivers/usb/gadget/function/f_ncm.c 		req->length = value;
req               852 drivers/usb/gadget/function/f_ncm.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               200 drivers/usb/gadget/function/f_phonet.c static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
req               204 drivers/usb/gadget/function/f_phonet.c 	struct sk_buff *skb = req->context;
req               206 drivers/usb/gadget/function/f_phonet.c 	switch (req->status) {
req               228 drivers/usb/gadget/function/f_phonet.c 	struct usb_request *req;
req               239 drivers/usb/gadget/function/f_phonet.c 	req = fp->in_req;
req               240 drivers/usb/gadget/function/f_phonet.c 	req->buf = skb->data;
req               241 drivers/usb/gadget/function/f_phonet.c 	req->length = skb->len;
req               242 drivers/usb/gadget/function/f_phonet.c 	req->complete = pn_tx_complete;
req               243 drivers/usb/gadget/function/f_phonet.c 	req->zero = 1;
req               244 drivers/usb/gadget/function/f_phonet.c 	req->context = skb;
req               246 drivers/usb/gadget/function/f_phonet.c 	if (unlikely(usb_ep_queue(fp->in_ep, req, GFP_ATOMIC)))
req               292 drivers/usb/gadget/function/f_phonet.c pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
req               301 drivers/usb/gadget/function/f_phonet.c 	req->buf = page_address(page);
req               302 drivers/usb/gadget/function/f_phonet.c 	req->length = PAGE_SIZE;
req               303 drivers/usb/gadget/function/f_phonet.c 	req->context = page;
req               305 drivers/usb/gadget/function/f_phonet.c 	err = usb_ep_queue(fp->out_ep, req, gfp_flags);
req               311 drivers/usb/gadget/function/f_phonet.c static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
req               315 drivers/usb/gadget/function/f_phonet.c 	struct page *page = req->context;
req               318 drivers/usb/gadget/function/f_phonet.c 	int status = req->status;
req               326 drivers/usb/gadget/function/f_phonet.c 		if (req->actual < req->length) /* Last fragment */
req               341 drivers/usb/gadget/function/f_phonet.c 				skb->len <= 1, req->actual, PAGE_SIZE);
req               344 drivers/usb/gadget/function/f_phonet.c 		if (req->actual < req->length) { /* Last fragment */
req               357 drivers/usb/gadget/function/f_phonet.c 		req = NULL;
req               371 drivers/usb/gadget/function/f_phonet.c 	if (req)
req               372 drivers/usb/gadget/function/f_phonet.c 		pn_rx_submit(fp, req, GFP_ATOMIC);
req               543 drivers/usb/gadget/function/f_phonet.c 		struct usb_request *req;
req               545 drivers/usb/gadget/function/f_phonet.c 		req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
req               546 drivers/usb/gadget/function/f_phonet.c 		if (!req)
req               549 drivers/usb/gadget/function/f_phonet.c 		req->complete = pn_rx_complete;
req               550 drivers/usb/gadget/function/f_phonet.c 		fp->out_reqv[i] = req;
req               224 drivers/usb/gadget/function/f_printer.c 	struct usb_request	*req;
req               226 drivers/usb/gadget/function/f_printer.c 	req = usb_ep_alloc_request(ep, gfp_flags);
req               228 drivers/usb/gadget/function/f_printer.c 	if (req != NULL) {
req               229 drivers/usb/gadget/function/f_printer.c 		req->length = len;
req               230 drivers/usb/gadget/function/f_printer.c 		req->buf = kmalloc(len, gfp_flags);
req               231 drivers/usb/gadget/function/f_printer.c 		if (req->buf == NULL) {
req               232 drivers/usb/gadget/function/f_printer.c 			usb_ep_free_request(ep, req);
req               237 drivers/usb/gadget/function/f_printer.c 	return req;
req               241 drivers/usb/gadget/function/f_printer.c printer_req_free(struct usb_ep *ep, struct usb_request *req)
req               243 drivers/usb/gadget/function/f_printer.c 	if (ep != NULL && req != NULL) {
req               244 drivers/usb/gadget/function/f_printer.c 		kfree(req->buf);
req               245 drivers/usb/gadget/function/f_printer.c 		usb_ep_free_request(ep, req);
req               251 drivers/usb/gadget/function/f_printer.c static void rx_complete(struct usb_ep *ep, struct usb_request *req)
req               254 drivers/usb/gadget/function/f_printer.c 	int			status = req->status;
req               259 drivers/usb/gadget/function/f_printer.c 	list_del_init(&req->list);	/* Remode from Active List */
req               265 drivers/usb/gadget/function/f_printer.c 		if (req->actual > 0) {
req               266 drivers/usb/gadget/function/f_printer.c 			list_add_tail(&req->list, &dev->rx_buffers);
req               267 drivers/usb/gadget/function/f_printer.c 			DBG(dev, "G_Printer : rx length %d\n", req->actual);
req               269 drivers/usb/gadget/function/f_printer.c 			list_add(&req->list, &dev->rx_reqs);
req               277 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->rx_reqs);
req               283 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->rx_reqs);
req               292 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->rx_reqs);
req               300 drivers/usb/gadget/function/f_printer.c static void tx_complete(struct usb_ep *ep, struct usb_request *req)
req               304 drivers/usb/gadget/function/f_printer.c 	switch (req->status) {
req               306 drivers/usb/gadget/function/f_printer.c 		VDBG(dev, "tx err %d\n", req->status);
req               319 drivers/usb/gadget/function/f_printer.c 	list_del_init(&req->list);
req               320 drivers/usb/gadget/function/f_printer.c 	list_add(&req->list, &dev->tx_reqs);
req               377 drivers/usb/gadget/function/f_printer.c 	struct usb_request              *req;
req               382 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->rx_reqs.next,
req               384 drivers/usb/gadget/function/f_printer.c 		list_del_init(&req->list);
req               392 drivers/usb/gadget/function/f_printer.c 		req->length = USB_BUFSIZE;
req               393 drivers/usb/gadget/function/f_printer.c 		req->complete = rx_complete;
req               397 drivers/usb/gadget/function/f_printer.c 		error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
req               401 drivers/usb/gadget/function/f_printer.c 			list_add(&req->list, &dev->rx_reqs);
req               405 drivers/usb/gadget/function/f_printer.c 		else if (list_empty(&req->list))
req               406 drivers/usb/gadget/function/f_printer.c 			list_add(&req->list, &dev->rx_reqs_active);
req               417 drivers/usb/gadget/function/f_printer.c 	struct usb_request		*req;
req               478 drivers/usb/gadget/function/f_printer.c 			req = container_of(dev->rx_buffers.next,
req               480 drivers/usb/gadget/function/f_printer.c 			list_del_init(&req->list);
req               482 drivers/usb/gadget/function/f_printer.c 			if (req->actual && req->buf) {
req               483 drivers/usb/gadget/function/f_printer.c 				current_rx_req = req;
req               484 drivers/usb/gadget/function/f_printer.c 				current_rx_bytes = req->actual;
req               485 drivers/usb/gadget/function/f_printer.c 				current_rx_buf = req->buf;
req               487 drivers/usb/gadget/function/f_printer.c 				list_add(&req->list, &dev->rx_reqs);
req               553 drivers/usb/gadget/function/f_printer.c 	struct usb_request	*req;
req               594 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->tx_reqs.next, struct usb_request,
req               596 drivers/usb/gadget/function/f_printer.c 		list_del_init(&req->list);
req               598 drivers/usb/gadget/function/f_printer.c 		req->complete = tx_complete;
req               599 drivers/usb/gadget/function/f_printer.c 		req->length = size;
req               604 drivers/usb/gadget/function/f_printer.c 			req->zero = 0;
req               609 drivers/usb/gadget/function/f_printer.c 			req->zero = ((len % dev->in_ep->maxpacket) == 0);
req               614 drivers/usb/gadget/function/f_printer.c 		if (copy_from_user(req->buf, buf, size)) {
req               615 drivers/usb/gadget/function/f_printer.c 			list_add(&req->list, &dev->tx_reqs);
req               628 drivers/usb/gadget/function/f_printer.c 			list_add(&req->list, &dev->tx_reqs);
req               634 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->tx_reqs_active);
req               638 drivers/usb/gadget/function/f_printer.c 		value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
req               641 drivers/usb/gadget/function/f_printer.c 			list_del(&req->list);
req               642 drivers/usb/gadget/function/f_printer.c 			list_add(&req->list, &dev->tx_reqs);
req               841 drivers/usb/gadget/function/f_printer.c 	struct usb_request	*req;
req               859 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->rx_buffers.next, struct usb_request,
req               861 drivers/usb/gadget/function/f_printer.c 		list_del_init(&req->list);
req               862 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->rx_reqs);
req               866 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->rx_buffers.next, struct usb_request,
req               868 drivers/usb/gadget/function/f_printer.c 		list_del_init(&req->list);
req               869 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->rx_reqs);
req               873 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->tx_reqs_active.next,
req               875 drivers/usb/gadget/function/f_printer.c 		list_del_init(&req->list);
req               876 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->tx_reqs);
req               938 drivers/usb/gadget/function/f_printer.c 	struct usb_request	*req = cdev->req;
req               939 drivers/usb/gadget/function/f_printer.c 	u8			*buf = req->buf;
req              1002 drivers/usb/gadget/function/f_printer.c 		req->length = value;
req              1003 drivers/usb/gadget/function/f_printer.c 		req->zero = value < wLength;
req              1004 drivers/usb/gadget/function/f_printer.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req              1007 drivers/usb/gadget/function/f_printer.c 			req->status = 0;
req              1022 drivers/usb/gadget/function/f_printer.c 	struct usb_request *req;
req              1065 drivers/usb/gadget/function/f_printer.c 		req = printer_req_alloc(dev->in_ep, USB_BUFSIZE, GFP_KERNEL);
req              1066 drivers/usb/gadget/function/f_printer.c 		if (!req)
req              1068 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->tx_reqs);
req              1072 drivers/usb/gadget/function/f_printer.c 		req = printer_req_alloc(dev->out_ep, USB_BUFSIZE, GFP_KERNEL);
req              1073 drivers/usb/gadget/function/f_printer.c 		if (!req)
req              1075 drivers/usb/gadget/function/f_printer.c 		list_add(&req->list, &dev->rx_reqs);
req              1107 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->rx_reqs.next, struct usb_request, list);
req              1108 drivers/usb/gadget/function/f_printer.c 		list_del(&req->list);
req              1109 drivers/usb/gadget/function/f_printer.c 		printer_req_free(dev->out_ep, req);
req              1114 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->tx_reqs.next, struct usb_request, list);
req              1115 drivers/usb/gadget/function/f_printer.c 		list_del(&req->list);
req              1116 drivers/usb/gadget/function/f_printer.c 		printer_req_free(dev->in_ep, req);
req              1363 drivers/usb/gadget/function/f_printer.c 	struct usb_request	*req;
req              1378 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->tx_reqs.next, struct usb_request,
req              1380 drivers/usb/gadget/function/f_printer.c 		list_del(&req->list);
req              1381 drivers/usb/gadget/function/f_printer.c 		printer_req_free(dev->in_ep, req);
req              1388 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->rx_reqs.next,
req              1390 drivers/usb/gadget/function/f_printer.c 		list_del(&req->list);
req              1391 drivers/usb/gadget/function/f_printer.c 		printer_req_free(dev->out_ep, req);
req              1395 drivers/usb/gadget/function/f_printer.c 		req = container_of(dev->rx_buffers.next,
req              1397 drivers/usb/gadget/function/f_printer.c 		list_del(&req->list);
req              1398 drivers/usb/gadget/function/f_printer.c 		printer_req_free(dev->out_ep, req);
req               386 drivers/usb/gadget/function/f_rndis.c 	struct usb_request		*req = rndis->notify_req;
req               388 drivers/usb/gadget/function/f_rndis.c 	__le32				*data = req->buf;
req               402 drivers/usb/gadget/function/f_rndis.c 	status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
req               409 drivers/usb/gadget/function/f_rndis.c static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
req               411 drivers/usb/gadget/function/f_rndis.c 	struct f_rndis			*rndis = req->context;
req               413 drivers/usb/gadget/function/f_rndis.c 	int				status = req->status;
req               428 drivers/usb/gadget/function/f_rndis.c 			req->actual, req->length);
req               439 drivers/usb/gadget/function/f_rndis.c 		status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
req               448 drivers/usb/gadget/function/f_rndis.c static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
req               450 drivers/usb/gadget/function/f_rndis.c 	struct f_rndis			*rndis = req->context;
req               455 drivers/usb/gadget/function/f_rndis.c 	status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
req               458 drivers/usb/gadget/function/f_rndis.c 			status, req->actual, req->length);
req               467 drivers/usb/gadget/function/f_rndis.c 	struct usb_request	*req = cdev->req;
req               487 drivers/usb/gadget/function/f_rndis.c 		req->complete = rndis_command_complete;
req               488 drivers/usb/gadget/function/f_rndis.c 		req->context = rndis;
req               503 drivers/usb/gadget/function/f_rndis.c 				memcpy(req->buf, buf, n);
req               504 drivers/usb/gadget/function/f_rndis.c 				req->complete = rndis_response_complete;
req               505 drivers/usb/gadget/function/f_rndis.c 				req->context = rndis;
req               525 drivers/usb/gadget/function/f_rndis.c 		req->zero = (value < w_length);
req               526 drivers/usb/gadget/function/f_rndis.c 		req->length = value;
req               527 drivers/usb/gadget/function/f_rndis.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               463 drivers/usb/gadget/function/f_sourcesink.c static int check_read_data(struct f_sourcesink *ss, struct usb_request *req)
req               466 drivers/usb/gadget/function/f_sourcesink.c 	u8			*buf = req->buf;
req               473 drivers/usb/gadget/function/f_sourcesink.c 	for (i = 0; i < req->actual; i++, buf++) {
req               501 drivers/usb/gadget/function/f_sourcesink.c static void reinit_write_data(struct usb_ep *ep, struct usb_request *req)
req               504 drivers/usb/gadget/function/f_sourcesink.c 	u8		*buf = req->buf;
req               510 drivers/usb/gadget/function/f_sourcesink.c 		memset(req->buf, 0, req->length);
req               513 drivers/usb/gadget/function/f_sourcesink.c 		for  (i = 0; i < req->length; i++)
req               521 drivers/usb/gadget/function/f_sourcesink.c static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
req               525 drivers/usb/gadget/function/f_sourcesink.c 	int				status = req->status;
req               537 drivers/usb/gadget/function/f_sourcesink.c 			check_read_data(ss, req);
req               539 drivers/usb/gadget/function/f_sourcesink.c 				memset(req->buf, 0x55, req->length);
req               548 drivers/usb/gadget/function/f_sourcesink.c 				req->actual, req->length);
req               550 drivers/usb/gadget/function/f_sourcesink.c 			check_read_data(ss, req);
req               551 drivers/usb/gadget/function/f_sourcesink.c 		free_ep_req(ep, req);
req               561 drivers/usb/gadget/function/f_sourcesink.c 				status, req->actual, req->length);
req               567 drivers/usb/gadget/function/f_sourcesink.c 	status = usb_ep_queue(ep, req, GFP_ATOMIC);
req               570 drivers/usb/gadget/function/f_sourcesink.c 				ep->name, req->length, status);
req               580 drivers/usb/gadget/function/f_sourcesink.c 	struct usb_request	*req;
req               607 drivers/usb/gadget/function/f_sourcesink.c 		req = ss_alloc_ep_req(ep, size);
req               608 drivers/usb/gadget/function/f_sourcesink.c 		if (!req)
req               611 drivers/usb/gadget/function/f_sourcesink.c 		req->complete = source_sink_complete;
req               613 drivers/usb/gadget/function/f_sourcesink.c 			reinit_write_data(ep, req);
req               615 drivers/usb/gadget/function/f_sourcesink.c 			memset(req->buf, 0x55, req->length);
req               617 drivers/usb/gadget/function/f_sourcesink.c 		status = usb_ep_queue(ep, req, GFP_ATOMIC);
req               625 drivers/usb/gadget/function/f_sourcesink.c 			free_ep_req(ep, req);
req               765 drivers/usb/gadget/function/f_sourcesink.c 	struct usb_request	*req = c->cdev->req;
req               771 drivers/usb/gadget/function/f_sourcesink.c 	req->length = USB_COMP_EP0_BUFSIZ;
req               793 drivers/usb/gadget/function/f_sourcesink.c 		if (w_length > req->length)
req               803 drivers/usb/gadget/function/f_sourcesink.c 		if (w_length > req->length)
req               821 drivers/usb/gadget/function/f_sourcesink.c 		req->zero = 0;
req               822 drivers/usb/gadget/function/f_sourcesink.c 		req->length = value;
req               823 drivers/usb/gadget/function/f_sourcesink.c 		value = usb_ep_queue(c->cdev->gadget->ep0, req, GFP_ATOMIC);
req                53 drivers/usb/gadget/function/f_tcm.c 	ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
req                59 drivers/usb/gadget/function/f_tcm.c static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
req                61 drivers/usb/gadget/function/f_tcm.c 	struct usbg_cmd *cmd = req->context;
req                65 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0) {
req                83 drivers/usb/gadget/function/f_tcm.c 	fu->bot_status.req->context = cmd;
req                84 drivers/usb/gadget/function/f_tcm.c 	ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
req                89 drivers/usb/gadget/function/f_tcm.c static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
req                91 drivers/usb/gadget/function/f_tcm.c 	struct usbg_cmd *cmd = req->context;
req                94 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0)
req                99 drivers/usb/gadget/function/f_tcm.c 			req->length = ep->maxpacket;
req               102 drivers/usb/gadget/function/f_tcm.c 			req->length = cmd->data_len;
req               106 drivers/usb/gadget/function/f_tcm.c 		usb_ep_queue(ep, req, GFP_ATOMIC);
req               116 drivers/usb/gadget/function/f_tcm.c 	struct usb_request *req;
req               124 drivers/usb/gadget/function/f_tcm.c 			req = fu->bot_req_in;
req               127 drivers/usb/gadget/function/f_tcm.c 			req = fu->bot_req_out;
req               131 drivers/usb/gadget/function/f_tcm.c 			req->length = ep->maxpacket;
req               134 drivers/usb/gadget/function/f_tcm.c 			req->length = cmd->data_len;
req               137 drivers/usb/gadget/function/f_tcm.c 		req->complete = bot_err_compl;
req               138 drivers/usb/gadget/function/f_tcm.c 		req->context = cmd;
req               139 drivers/usb/gadget/function/f_tcm.c 		req->buf = fu->cmd.buf;
req               140 drivers/usb/gadget/function/f_tcm.c 		usb_ep_queue(ep, req, GFP_KERNEL);
req               166 drivers/usb/gadget/function/f_tcm.c 		fu->bot_status.req->context = cmd;
req               168 drivers/usb/gadget/function/f_tcm.c 		ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
req               192 drivers/usb/gadget/function/f_tcm.c static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
req               194 drivers/usb/gadget/function/f_tcm.c 	struct usbg_cmd *cmd = req->context;
req               196 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0)
req               290 drivers/usb/gadget/function/f_tcm.c static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
req               292 drivers/usb/gadget/function/f_tcm.c 	struct f_uas *fu = req->context;
req               297 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0)
req               300 drivers/usb/gadget/function/f_tcm.c 	ret = bot_submit_command(fu, req->buf, req->actual);
req               317 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
req               318 drivers/usb/gadget/function/f_tcm.c 	if (!fu->cmd.req)
req               321 drivers/usb/gadget/function/f_tcm.c 	fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
req               322 drivers/usb/gadget/function/f_tcm.c 	if (!fu->bot_status.req)
req               325 drivers/usb/gadget/function/f_tcm.c 	fu->bot_status.req->buf = &fu->bot_status.csw;
req               326 drivers/usb/gadget/function/f_tcm.c 	fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
req               327 drivers/usb/gadget/function/f_tcm.c 	fu->bot_status.req->complete = bot_status_complete;
req               334 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->complete = bot_cmd_complete;
req               335 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->buf = fu->cmd.buf;
req               336 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->length = fu->ep_out->maxpacket;
req               337 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->context = fu;
req               347 drivers/usb/gadget/function/f_tcm.c 	usb_ep_free_request(fu->ep_in, fu->bot_status.req);
req               349 drivers/usb/gadget/function/f_tcm.c 	usb_ep_free_request(fu->ep_out, fu->cmd.req);
req               350 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req = NULL;
req               375 drivers/usb/gadget/function/f_tcm.c 	usb_ep_free_request(fu->ep_out, fu->cmd.req);
req               376 drivers/usb/gadget/function/f_tcm.c 	usb_ep_free_request(fu->ep_in, fu->bot_status.req);
req               382 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req = NULL;
req               383 drivers/usb/gadget/function/f_tcm.c 	fu->bot_status.req = NULL;
req               453 drivers/usb/gadget/function/f_tcm.c 		ret_lun = cdev->req->buf;
req               455 drivers/usb/gadget/function/f_tcm.c 		cdev->req->length = 1;
req               456 drivers/usb/gadget/function/f_tcm.c 		return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
req               485 drivers/usb/gadget/function/f_tcm.c 	usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
req               487 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req = NULL;
req               508 drivers/usb/gadget/function/f_tcm.c static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
req               563 drivers/usb/gadget/function/f_tcm.c static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
req               565 drivers/usb/gadget/function/f_tcm.c 	struct usbg_cmd *cmd = req->context;
req               570 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0)
req               602 drivers/usb/gadget/function/f_tcm.c 		usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
req               719 drivers/usb/gadget/function/f_tcm.c static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
req               721 drivers/usb/gadget/function/f_tcm.c 	struct f_uas *fu = req->context;
req               724 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0)
req               727 drivers/usb/gadget/function/f_tcm.c 	ret = usbg_submit_command(fu, req->buf, req->actual);
req               736 drivers/usb/gadget/function/f_tcm.c 	usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
req               766 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
req               767 drivers/usb/gadget/function/f_tcm.c 	if (!fu->cmd.req)
req               774 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->complete = uasp_cmd_complete;
req               775 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->buf = fu->cmd.buf;
req               776 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->length = fu->ep_cmd->maxpacket;
req               777 drivers/usb/gadget/function/f_tcm.c 	fu->cmd.req->context = fu;
req               781 drivers/usb/gadget/function/f_tcm.c 	usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
req               821 drivers/usb/gadget/function/f_tcm.c 	ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
req               952 drivers/usb/gadget/function/f_tcm.c static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
req               954 drivers/usb/gadget/function/f_tcm.c 	struct usbg_cmd *cmd = req->context;
req               957 drivers/usb/gadget/function/f_tcm.c 	if (req->status < 0) {
req               962 drivers/usb/gadget/function/f_tcm.c 	if (req->num_sgs == 0) {
req               976 drivers/usb/gadget/function/f_tcm.c static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
req               987 drivers/usb/gadget/function/f_tcm.c 		req->buf = cmd->data_buf;
req               989 drivers/usb/gadget/function/f_tcm.c 		req->buf = NULL;
req               990 drivers/usb/gadget/function/f_tcm.c 		req->num_sgs = se_cmd->t_data_nents;
req               991 drivers/usb/gadget/function/f_tcm.c 		req->sg = se_cmd->t_data_sg;
req               994 drivers/usb/gadget/function/f_tcm.c 	req->complete = usbg_data_write_cmpl;
req               995 drivers/usb/gadget/function/f_tcm.c 	req->length = se_cmd->data_length;
req               996 drivers/usb/gadget/function/f_tcm.c 	req->context = cmd;
req               389 drivers/usb/gadget/function/f_uac1.c 	struct usb_request	*req = cdev->req;
req               418 drivers/usb/gadget/function/f_uac1.c 		req->zero = 0;
req               419 drivers/usb/gadget/function/f_uac1.c 		req->length = value;
req               420 drivers/usb/gadget/function/f_uac1.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               321 drivers/usb/gadget/function/f_uac1_legacy.c static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
req               323 drivers/usb/gadget/function/f_uac1_legacy.c 	struct f_audio *audio = req->context;
req               338 drivers/usb/gadget/function/f_uac1_legacy.c 	if (audio_buf_size - copy_buf->actual < req->actual) {
req               346 drivers/usb/gadget/function/f_uac1_legacy.c 	memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
req               347 drivers/usb/gadget/function/f_uac1_legacy.c 	copy_buf->actual += req->actual;
req               350 drivers/usb/gadget/function/f_uac1_legacy.c 	err = usb_ep_queue(ep, req, GFP_ATOMIC);
req               358 drivers/usb/gadget/function/f_uac1_legacy.c static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
req               360 drivers/usb/gadget/function/f_uac1_legacy.c 	struct f_audio *audio = req->context;
req               361 drivers/usb/gadget/function/f_uac1_legacy.c 	int status = req->status;
req               369 drivers/usb/gadget/function/f_uac1_legacy.c 			f_audio_out_ep_complete(ep, req);
req               371 drivers/usb/gadget/function/f_uac1_legacy.c 			memcpy(&data, req->buf, req->length);
req               387 drivers/usb/gadget/function/f_uac1_legacy.c 	struct usb_request	*req = cdev->req;
req               412 drivers/usb/gadget/function/f_uac1_legacy.c 	req->context = audio;
req               413 drivers/usb/gadget/function/f_uac1_legacy.c 	req->complete = f_audio_complete;
req               423 drivers/usb/gadget/function/f_uac1_legacy.c 	struct usb_request	*req = cdev->req;
req               448 drivers/usb/gadget/function/f_uac1_legacy.c 	req->context = audio;
req               449 drivers/usb/gadget/function/f_uac1_legacy.c 	req->complete = f_audio_complete;
req               451 drivers/usb/gadget/function/f_uac1_legacy.c 	memcpy(req->buf, &value, len);
req               524 drivers/usb/gadget/function/f_uac1_legacy.c 	struct usb_request	*req = cdev->req;
req               561 drivers/usb/gadget/function/f_uac1_legacy.c 		req->zero = 0;
req               562 drivers/usb/gadget/function/f_uac1_legacy.c 		req->length = value;
req               563 drivers/usb/gadget/function/f_uac1_legacy.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               577 drivers/usb/gadget/function/f_uac1_legacy.c 	struct usb_request *req;
req               618 drivers/usb/gadget/function/f_uac1_legacy.c 				req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
req               619 drivers/usb/gadget/function/f_uac1_legacy.c 				if (req) {
req               620 drivers/usb/gadget/function/f_uac1_legacy.c 					req->buf = kzalloc(req_buf_size,
req               622 drivers/usb/gadget/function/f_uac1_legacy.c 					if (req->buf) {
req               623 drivers/usb/gadget/function/f_uac1_legacy.c 						req->length = req_buf_size;
req               624 drivers/usb/gadget/function/f_uac1_legacy.c 						req->context = audio;
req               625 drivers/usb/gadget/function/f_uac1_legacy.c 						req->complete =
req               628 drivers/usb/gadget/function/f_uac1_legacy.c 							req, GFP_ATOMIC);
req               814 drivers/usb/gadget/function/f_uac2.c 	struct usb_request *req = fn->config->cdev->req;
req               839 drivers/usb/gadget/function/f_uac2.c 		memcpy(req->buf, &c, value);
req               841 drivers/usb/gadget/function/f_uac2.c 		*(u8 *)req->buf = 1;
req               855 drivers/usb/gadget/function/f_uac2.c 	struct usb_request *req = fn->config->cdev->req;
req               884 drivers/usb/gadget/function/f_uac2.c 		memcpy(req->buf, &r, value);
req               945 drivers/usb/gadget/function/f_uac2.c 	struct usb_request *req = cdev->req;
req               960 drivers/usb/gadget/function/f_uac2.c 		req->length = value;
req               961 drivers/usb/gadget/function/f_uac2.c 		req->zero = value < w_length;
req               962 drivers/usb/gadget/function/f_uac2.c 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
req               966 drivers/usb/gadget/function/f_uac2.c 			req->status = 0;
req               205 drivers/usb/gadget/function/f_uvc.c uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
req               207 drivers/usb/gadget/function/f_uvc.c 	struct uvc_device *uvc = req->context;
req               216 drivers/usb/gadget/function/f_uvc.c 		uvc_event->data.length = req->actual;
req               217 drivers/usb/gadget/function/f_uvc.c 		memcpy(&uvc_event->data.data, req->buf, req->actual);
req               246 drivers/usb/gadget/function/f_uvc.c 	memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
req                99 drivers/usb/gadget/function/tcm.h 	struct usb_request	*req;
req               104 drivers/usb/gadget/function/tcm.h 	struct usb_request	*req;
req                28 drivers/usb/gadget/function/u_audio.c 	struct usb_request *req;
req                79 drivers/usb/gadget/function/u_audio.c static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
req                84 drivers/usb/gadget/function/u_audio.c 	int status = req->status;
req                85 drivers/usb/gadget/function/u_audio.c 	struct uac_req *ur = req->context;
req                92 drivers/usb/gadget/function/u_audio.c 	if (!prm->ep_enabled || req->status == -ESHUTDOWN)
req               101 drivers/usb/gadget/function/u_audio.c 			__func__, status, req->actual, req->length);
req               126 drivers/usb/gadget/function/u_audio.c 		req->length = uac->p_pktsize;
req               135 drivers/usb/gadget/function/u_audio.c 			req->length += uac->p_framesize;
req               140 drivers/usb/gadget/function/u_audio.c 		req->actual = req->length;
req               151 drivers/usb/gadget/function/u_audio.c 		if (unlikely(pending < req->actual)) {
req               152 drivers/usb/gadget/function/u_audio.c 			memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
req               153 drivers/usb/gadget/function/u_audio.c 			memcpy(req->buf + pending, runtime->dma_area,
req               154 drivers/usb/gadget/function/u_audio.c 			       req->actual - pending);
req               156 drivers/usb/gadget/function/u_audio.c 			memcpy(req->buf, runtime->dma_area + hw_ptr,
req               157 drivers/usb/gadget/function/u_audio.c 			       req->actual);
req               160 drivers/usb/gadget/function/u_audio.c 		if (unlikely(pending < req->actual)) {
req               161 drivers/usb/gadget/function/u_audio.c 			memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
req               162 drivers/usb/gadget/function/u_audio.c 			memcpy(runtime->dma_area, req->buf + pending,
req               163 drivers/usb/gadget/function/u_audio.c 			       req->actual - pending);
req               165 drivers/usb/gadget/function/u_audio.c 			memcpy(runtime->dma_area + hw_ptr, req->buf,
req               166 drivers/usb/gadget/function/u_audio.c 			       req->actual);
req               172 drivers/usb/gadget/function/u_audio.c 	prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
req               177 drivers/usb/gadget/function/u_audio.c 	if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
req               181 drivers/usb/gadget/function/u_audio.c 	if (usb_ep_queue(ep, req, GFP_ATOMIC))
req               353 drivers/usb/gadget/function/u_audio.c 		if (prm->ureq[i].req) {
req               354 drivers/usb/gadget/function/u_audio.c 			usb_ep_dequeue(ep, prm->ureq[i].req);
req               355 drivers/usb/gadget/function/u_audio.c 			usb_ep_free_request(ep, prm->ureq[i].req);
req               356 drivers/usb/gadget/function/u_audio.c 			prm->ureq[i].req = NULL;
req               370 drivers/usb/gadget/function/u_audio.c 	struct usb_request *req;
req               385 drivers/usb/gadget/function/u_audio.c 		if (!prm->ureq[i].req) {
req               386 drivers/usb/gadget/function/u_audio.c 			req = usb_ep_alloc_request(ep, GFP_ATOMIC);
req               387 drivers/usb/gadget/function/u_audio.c 			if (req == NULL)
req               390 drivers/usb/gadget/function/u_audio.c 			prm->ureq[i].req = req;
req               393 drivers/usb/gadget/function/u_audio.c 			req->zero = 0;
req               394 drivers/usb/gadget/function/u_audio.c 			req->context = &prm->ureq[i];
req               395 drivers/usb/gadget/function/u_audio.c 			req->length = req_len;
req               396 drivers/usb/gadget/function/u_audio.c 			req->complete = u_audio_iso_complete;
req               397 drivers/usb/gadget/function/u_audio.c 			req->buf = prm->rbuf + i * prm->max_psize;
req               400 drivers/usb/gadget/function/u_audio.c 		if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
req               421 drivers/usb/gadget/function/u_audio.c 	struct usb_request *req;
req               461 drivers/usb/gadget/function/u_audio.c 		if (!prm->ureq[i].req) {
req               462 drivers/usb/gadget/function/u_audio.c 			req = usb_ep_alloc_request(ep, GFP_ATOMIC);
req               463 drivers/usb/gadget/function/u_audio.c 			if (req == NULL)
req               466 drivers/usb/gadget/function/u_audio.c 			prm->ureq[i].req = req;
req               469 drivers/usb/gadget/function/u_audio.c 			req->zero = 0;
req               470 drivers/usb/gadget/function/u_audio.c 			req->context = &prm->ureq[i];
req               471 drivers/usb/gadget/function/u_audio.c 			req->length = req_len;
req               472 drivers/usb/gadget/function/u_audio.c 			req->complete = u_audio_iso_complete;
req               473 drivers/usb/gadget/function/u_audio.c 			req->buf = prm->rbuf + i * prm->max_psize;
req               476 drivers/usb/gadget/function/u_audio.c 		if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
req               172 drivers/usb/gadget/function/u_ether.c static void rx_complete(struct usb_ep *ep, struct usb_request *req);
req               175 drivers/usb/gadget/function/u_ether.c rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
req               233 drivers/usb/gadget/function/u_ether.c 	req->buf = skb->data;
req               234 drivers/usb/gadget/function/u_ether.c 	req->length = size;
req               235 drivers/usb/gadget/function/u_ether.c 	req->complete = rx_complete;
req               236 drivers/usb/gadget/function/u_ether.c 	req->context = skb;
req               238 drivers/usb/gadget/function/u_ether.c 	retval = usb_ep_queue(out, req, gfp_flags);
req               247 drivers/usb/gadget/function/u_ether.c 		list_add(&req->list, &dev->rx_reqs);
req               253 drivers/usb/gadget/function/u_ether.c static void rx_complete(struct usb_ep *ep, struct usb_request *req)
req               255 drivers/usb/gadget/function/u_ether.c 	struct sk_buff	*skb = req->context, *skb2;
req               257 drivers/usb/gadget/function/u_ether.c 	int		status = req->status;
req               263 drivers/usb/gadget/function/u_ether.c 		skb_put(skb, req->actual);
req               337 drivers/usb/gadget/function/u_ether.c 		list_add(&req->list, &dev->rx_reqs);
req               339 drivers/usb/gadget/function/u_ether.c 		req = NULL;
req               341 drivers/usb/gadget/function/u_ether.c 	if (req)
req               342 drivers/usb/gadget/function/u_ether.c 		rx_submit(dev, req, GFP_ATOMIC);
req               348 drivers/usb/gadget/function/u_ether.c 	struct usb_request	*req;
req               355 drivers/usb/gadget/function/u_ether.c 	list_for_each_entry(req, list, list) {
req               360 drivers/usb/gadget/function/u_ether.c 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
req               361 drivers/usb/gadget/function/u_ether.c 		if (!req)
req               363 drivers/usb/gadget/function/u_ether.c 		list_add(&req->list, list);
req               372 drivers/usb/gadget/function/u_ether.c 		next = req->list.next;
req               373 drivers/usb/gadget/function/u_ether.c 		list_del(&req->list);
req               374 drivers/usb/gadget/function/u_ether.c 		usb_ep_free_request(ep, req);
req               379 drivers/usb/gadget/function/u_ether.c 		req = container_of(next, struct usb_request, list);
req               405 drivers/usb/gadget/function/u_ether.c 	struct usb_request	*req;
req               411 drivers/usb/gadget/function/u_ether.c 		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
req               412 drivers/usb/gadget/function/u_ether.c 		list_del_init(&req->list);
req               415 drivers/usb/gadget/function/u_ether.c 		if (rx_submit(dev, req, gfp_flags) < 0) {
req               438 drivers/usb/gadget/function/u_ether.c static void tx_complete(struct usb_ep *ep, struct usb_request *req)
req               440 drivers/usb/gadget/function/u_ether.c 	struct sk_buff	*skb = req->context;
req               443 drivers/usb/gadget/function/u_ether.c 	switch (req->status) {
req               446 drivers/usb/gadget/function/u_ether.c 		VDBG(dev, "tx err %d\n", req->status);
req               459 drivers/usb/gadget/function/u_ether.c 	list_add(&req->list, &dev->tx_reqs);
req               478 drivers/usb/gadget/function/u_ether.c 	struct usb_request	*req = NULL;
req               531 drivers/usb/gadget/function/u_ether.c 	req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
req               532 drivers/usb/gadget/function/u_ether.c 	list_del(&req->list);
req               562 drivers/usb/gadget/function/u_ether.c 	req->buf = skb->data;
req               563 drivers/usb/gadget/function/u_ether.c 	req->context = skb;
req               564 drivers/usb/gadget/function/u_ether.c 	req->complete = tx_complete;
req               571 drivers/usb/gadget/function/u_ether.c 		req->zero = 0;
req               573 drivers/usb/gadget/function/u_ether.c 		req->zero = 1;
req               579 drivers/usb/gadget/function/u_ether.c 	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
req               582 drivers/usb/gadget/function/u_ether.c 	req->length = length;
req               584 drivers/usb/gadget/function/u_ether.c 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
req               602 drivers/usb/gadget/function/u_ether.c 		list_add(&req->list, &dev->tx_reqs);
req              1129 drivers/usb/gadget/function/u_ether.c 	struct usb_request	*req;
req              1147 drivers/usb/gadget/function/u_ether.c 		req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
req              1148 drivers/usb/gadget/function/u_ether.c 		list_del(&req->list);
req              1151 drivers/usb/gadget/function/u_ether.c 		usb_ep_free_request(link->in_ep, req);
req              1160 drivers/usb/gadget/function/u_ether.c 		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
req              1161 drivers/usb/gadget/function/u_ether.c 		list_del(&req->list);
req              1164 drivers/usb/gadget/function/u_ether.c 		usb_ep_free_request(link->out_ep, req);
req               161 drivers/usb/gadget/function/u_serial.c 	struct usb_request *req;
req               163 drivers/usb/gadget/function/u_serial.c 	req = usb_ep_alloc_request(ep, kmalloc_flags);
req               165 drivers/usb/gadget/function/u_serial.c 	if (req != NULL) {
req               166 drivers/usb/gadget/function/u_serial.c 		req->length = len;
req               167 drivers/usb/gadget/function/u_serial.c 		req->buf = kmalloc(len, kmalloc_flags);
req               168 drivers/usb/gadget/function/u_serial.c 		if (req->buf == NULL) {
req               169 drivers/usb/gadget/function/u_serial.c 			usb_ep_free_request(ep, req);
req               174 drivers/usb/gadget/function/u_serial.c 	return req;
req               183 drivers/usb/gadget/function/u_serial.c void gs_free_req(struct usb_ep *ep, struct usb_request *req)
req               185 drivers/usb/gadget/function/u_serial.c 	kfree(req->buf);
req               186 drivers/usb/gadget/function/u_serial.c 	usb_ep_free_request(ep, req);
req               240 drivers/usb/gadget/function/u_serial.c 		struct usb_request	*req;
req               246 drivers/usb/gadget/function/u_serial.c 		req = list_entry(pool->next, struct usb_request, list);
req               247 drivers/usb/gadget/function/u_serial.c 		len = gs_send_packet(port, req->buf, in->maxpacket);
req               254 drivers/usb/gadget/function/u_serial.c 		req->length = len;
req               255 drivers/usb/gadget/function/u_serial.c 		list_del(&req->list);
req               256 drivers/usb/gadget/function/u_serial.c 		req->zero = kfifo_is_empty(&port->port_write_buf);
req               259 drivers/usb/gadget/function/u_serial.c 			  port->port_num, len, *((u8 *)req->buf),
req               260 drivers/usb/gadget/function/u_serial.c 			  *((u8 *)req->buf+1), *((u8 *)req->buf+2));
req               271 drivers/usb/gadget/function/u_serial.c 		status = usb_ep_queue(in, req, GFP_ATOMIC);
req               278 drivers/usb/gadget/function/u_serial.c 			list_add(&req->list, pool);
req               307 drivers/usb/gadget/function/u_serial.c 		struct usb_request	*req;
req               319 drivers/usb/gadget/function/u_serial.c 		req = list_entry(pool->next, struct usb_request, list);
req               320 drivers/usb/gadget/function/u_serial.c 		list_del(&req->list);
req               321 drivers/usb/gadget/function/u_serial.c 		req->length = out->maxpacket;
req               327 drivers/usb/gadget/function/u_serial.c 		status = usb_ep_queue(out, req, GFP_ATOMIC);
req               333 drivers/usb/gadget/function/u_serial.c 			list_add(&req->list, pool);
req               368 drivers/usb/gadget/function/u_serial.c 		struct usb_request	*req;
req               370 drivers/usb/gadget/function/u_serial.c 		req = list_first_entry(queue, struct usb_request, list);
req               376 drivers/usb/gadget/function/u_serial.c 		switch (req->status) {
req               385 drivers/usb/gadget/function/u_serial.c 				port->port_num, req->status);
req               393 drivers/usb/gadget/function/u_serial.c 		if (req->actual && tty) {
req               394 drivers/usb/gadget/function/u_serial.c 			char		*packet = req->buf;
req               395 drivers/usb/gadget/function/u_serial.c 			unsigned	size = req->actual;
req               414 drivers/usb/gadget/function/u_serial.c 					  port->port_num, count, req->actual);
req               420 drivers/usb/gadget/function/u_serial.c 		list_move(&req->list, &port->read_pool);
req               448 drivers/usb/gadget/function/u_serial.c static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
req               454 drivers/usb/gadget/function/u_serial.c 	list_add_tail(&req->list, &port->read_queue);
req               459 drivers/usb/gadget/function/u_serial.c static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
req               464 drivers/usb/gadget/function/u_serial.c 	list_add(&req->list, &port->write_pool);
req               467 drivers/usb/gadget/function/u_serial.c 	switch (req->status) {
req               471 drivers/usb/gadget/function/u_serial.c 			__func__, ep->name, req->status);
req               490 drivers/usb/gadget/function/u_serial.c 	struct usb_request	*req;
req               493 drivers/usb/gadget/function/u_serial.c 		req = list_entry(head->next, struct usb_request, list);
req               494 drivers/usb/gadget/function/u_serial.c 		list_del(&req->list);
req               495 drivers/usb/gadget/function/u_serial.c 		gs_free_req(ep, req);
req               506 drivers/usb/gadget/function/u_serial.c 	struct usb_request	*req;
req               514 drivers/usb/gadget/function/u_serial.c 		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
req               515 drivers/usb/gadget/function/u_serial.c 		if (!req)
req               517 drivers/usb/gadget/function/u_serial.c 		req->complete = fn;
req               518 drivers/usb/gadget/function/u_serial.c 		list_add_tail(&req->list, head);
req               899 drivers/usb/gadget/function/u_serial.c 	struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC);
req               900 drivers/usb/gadget/function/u_serial.c 	if (!req)
req               903 drivers/usb/gadget/function/u_serial.c 	req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC);
req               904 drivers/usb/gadget/function/u_serial.c 	if (!req->buf) {
req               905 drivers/usb/gadget/function/u_serial.c 		usb_ep_free_request(ep, req);
req               909 drivers/usb/gadget/function/u_serial.c 	return req;
req               912 drivers/usb/gadget/function/u_serial.c static void gs_request_free(struct usb_request *req, struct usb_ep *ep)
req               914 drivers/usb/gadget/function/u_serial.c 	if (!req)
req               917 drivers/usb/gadget/function/u_serial.c 	kfree(req->buf);
req               918 drivers/usb/gadget/function/u_serial.c 	usb_ep_free_request(ep, req);
req               921 drivers/usb/gadget/function/u_serial.c static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
req               925 drivers/usb/gadget/function/u_serial.c 	switch (req->status) {
req               928 drivers/usb/gadget/function/u_serial.c 			__func__, ep->name, req->status);
req               977 drivers/usb/gadget/function/u_serial.c 	struct usb_request *req = info->console_req;
req               979 drivers/usb/gadget/function/u_serial.c 	gs_request_free(req, ep);
req               987 drivers/usb/gadget/function/u_serial.c 	struct usb_request *req;
req               998 drivers/usb/gadget/function/u_serial.c 		req = info->console_req;
req              1010 drivers/usb/gadget/function/u_serial.c 			xfer = kfifo_out(&info->con_buf, req->buf, size);
req              1011 drivers/usb/gadget/function/u_serial.c 			req->length = xfer;
req              1014 drivers/usb/gadget/function/u_serial.c 			ret = usb_ep_queue(ep, req, GFP_ATOMIC);
req                54 drivers/usb/gadget/function/u_serial.h void gs_free_req(struct usb_ep *, struct usb_request *req);
req                90 drivers/usb/gadget/function/uvc.h 	struct usb_request *req[UVC_NUM_REQUESTS];
req                95 drivers/usb/gadget/function/uvc.h 	void (*encode) (struct usb_request *req, struct uvc_video *video,
req                36 drivers/usb/gadget/function/uvc_v4l2.c 	struct usb_request *req = uvc->control_req;
req                41 drivers/usb/gadget/function/uvc_v4l2.c 	req->length = min_t(unsigned int, uvc->event_length, data->length);
req                42 drivers/usb/gadget/function/uvc_v4l2.c 	req->zero = data->length < uvc->event_length;
req                44 drivers/usb/gadget/function/uvc_v4l2.c 	memcpy(req->buf, data->data, req->length);
req                46 drivers/usb/gadget/function/uvc_v4l2.c 	return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
req                58 drivers/usb/gadget/function/uvc_video.c uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
req                61 drivers/usb/gadget/function/uvc_video.c 	void *mem = req->buf;
req                80 drivers/usb/gadget/function/uvc_video.c 	req->length = video->req_size - len;
req                81 drivers/usb/gadget/function/uvc_video.c 	req->zero = video->payload_size == video->max_payload_size;
req                98 drivers/usb/gadget/function/uvc_video.c uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
req               101 drivers/usb/gadget/function/uvc_video.c 	void *mem = req->buf;
req               114 drivers/usb/gadget/function/uvc_video.c 	req->length = video->req_size - len;
req               128 drivers/usb/gadget/function/uvc_video.c static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
req               132 drivers/usb/gadget/function/uvc_video.c 	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
req               176 drivers/usb/gadget/function/uvc_video.c uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
req               178 drivers/usb/gadget/function/uvc_video.c 	struct uvc_video *video = req->context;
req               184 drivers/usb/gadget/function/uvc_video.c 	switch (req->status) {
req               196 drivers/usb/gadget/function/uvc_video.c 			  req->status);
req               208 drivers/usb/gadget/function/uvc_video.c 	video->encode(req, video, buf);
req               210 drivers/usb/gadget/function/uvc_video.c 	ret = uvcg_video_ep_queue(video, req);
req               222 drivers/usb/gadget/function/uvc_video.c 	list_add_tail(&req->list, &video->req_free);
req               232 drivers/usb/gadget/function/uvc_video.c 		if (video->req[i]) {
req               233 drivers/usb/gadget/function/uvc_video.c 			usb_ep_free_request(video->ep, video->req[i]);
req               234 drivers/usb/gadget/function/uvc_video.c 			video->req[i] = NULL;
req               266 drivers/usb/gadget/function/uvc_video.c 		video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL);
req               267 drivers/usb/gadget/function/uvc_video.c 		if (video->req[i] == NULL)
req               270 drivers/usb/gadget/function/uvc_video.c 		video->req[i]->buf = video->req_buffer[i];
req               271 drivers/usb/gadget/function/uvc_video.c 		video->req[i]->length = 0;
req               272 drivers/usb/gadget/function/uvc_video.c 		video->req[i]->complete = uvc_video_complete;
req               273 drivers/usb/gadget/function/uvc_video.c 		video->req[i]->context = video;
req               275 drivers/usb/gadget/function/uvc_video.c 		list_add_tail(&video->req[i]->list, &video->req_free);
req               300 drivers/usb/gadget/function/uvc_video.c 	struct usb_request *req;
req               318 drivers/usb/gadget/function/uvc_video.c 		req = list_first_entry(&video->req_free, struct usb_request,
req               320 drivers/usb/gadget/function/uvc_video.c 		list_del(&req->list);
req               333 drivers/usb/gadget/function/uvc_video.c 		video->encode(req, video, buf);
req               336 drivers/usb/gadget/function/uvc_video.c 		ret = uvcg_video_ep_queue(video, req);
req               346 drivers/usb/gadget/function/uvc_video.c 	list_add_tail(&req->list, &video->req_free);
req               367 drivers/usb/gadget/function/uvc_video.c 			if (video->req[i])
req               368 drivers/usb/gadget/function/uvc_video.c 				usb_ep_dequeue(video->ep, video->req[i]);
req                28 drivers/usb/gadget/legacy/dbgp.c 	struct usb_request *req;
req                92 drivers/usb/gadget/legacy/dbgp.c static void dbgp_complete(struct usb_ep *ep, struct usb_request *req)
req                96 drivers/usb/gadget/legacy/dbgp.c 	int status = req->status;
req               108 drivers/usb/gadget/legacy/dbgp.c 	dbgp_consume(req->buf, req->actual);
req               110 drivers/usb/gadget/legacy/dbgp.c 	req->length = DBGP_REQ_LEN;
req               111 drivers/usb/gadget/legacy/dbgp.c 	err = usb_ep_queue(ep, req, GFP_ATOMIC);
req               120 drivers/usb/gadget/legacy/dbgp.c 	kfree(req->buf);
req               121 drivers/usb/gadget/legacy/dbgp.c 	usb_ep_free_request(dbgp.o_ep, req);
req               131 drivers/usb/gadget/legacy/dbgp.c 	struct usb_request *req;
req               133 drivers/usb/gadget/legacy/dbgp.c 	req = usb_ep_alloc_request(ep, GFP_KERNEL);
req               134 drivers/usb/gadget/legacy/dbgp.c 	if (!req) {
req               140 drivers/usb/gadget/legacy/dbgp.c 	req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
req               141 drivers/usb/gadget/legacy/dbgp.c 	if (!req->buf) {
req               147 drivers/usb/gadget/legacy/dbgp.c 	req->complete = dbgp_complete;
req               148 drivers/usb/gadget/legacy/dbgp.c 	req->length = DBGP_REQ_LEN;
req               149 drivers/usb/gadget/legacy/dbgp.c 	err = usb_ep_queue(ep, req, GFP_ATOMIC);
req               158 drivers/usb/gadget/legacy/dbgp.c 	kfree(req->buf);
req               160 drivers/usb/gadget/legacy/dbgp.c 	usb_ep_free_request(dbgp.o_ep, req);
req               224 drivers/usb/gadget/legacy/dbgp.c 	if (dbgp.req) {
req               225 drivers/usb/gadget/legacy/dbgp.c 		kfree(dbgp.req->buf);
req               226 drivers/usb/gadget/legacy/dbgp.c 		usb_ep_free_request(gadget->ep0, dbgp.req);
req               227 drivers/usb/gadget/legacy/dbgp.c 		dbgp.req = NULL;
req               284 drivers/usb/gadget/legacy/dbgp.c 	dbgp.req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
req               285 drivers/usb/gadget/legacy/dbgp.c 	if (!dbgp.req) {
req               291 drivers/usb/gadget/legacy/dbgp.c 	dbgp.req->buf = kmalloc(DBGP_REQ_EP0_LEN, GFP_KERNEL);
req               292 drivers/usb/gadget/legacy/dbgp.c 	if (!dbgp.req->buf) {
req               298 drivers/usb/gadget/legacy/dbgp.c 	dbgp.req->length = DBGP_REQ_EP0_LEN;
req               331 drivers/usb/gadget/legacy/dbgp.c 				struct usb_request *req)
req               334 drivers/usb/gadget/legacy/dbgp.c 		req->status, req->actual, req->length);
req               340 drivers/usb/gadget/legacy/dbgp.c 	struct usb_request *req = dbgp.req;
req               382 drivers/usb/gadget/legacy/dbgp.c 	req->length = min(length, len);
req               383 drivers/usb/gadget/legacy/dbgp.c 	req->zero = len < req->length;
req               384 drivers/usb/gadget/legacy/dbgp.c 	if (data && req->length)
req               385 drivers/usb/gadget/legacy/dbgp.c 		memcpy(req->buf, data, req->length);
req               387 drivers/usb/gadget/legacy/dbgp.c 	req->complete = dbgp_setup_complete;
req               388 drivers/usb/gadget/legacy/dbgp.c 	return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
req               138 drivers/usb/gadget/legacy/inode.c 	struct usb_request		*req;
req               196 drivers/usb/gadget/legacy/inode.c 	struct usb_request		*req;
req               271 drivers/usb/gadget/legacy/inode.c static void epio_complete (struct usb_ep *ep, struct usb_request *req)
req               275 drivers/usb/gadget/legacy/inode.c 	if (!req->context)
req               277 drivers/usb/gadget/legacy/inode.c 	if (req->status)
req               278 drivers/usb/gadget/legacy/inode.c 		epdata->status = req->status;
req               280 drivers/usb/gadget/legacy/inode.c 		epdata->status = req->actual;
req               281 drivers/usb/gadget/legacy/inode.c 	complete ((struct completion *)req->context);
req               335 drivers/usb/gadget/legacy/inode.c 		struct usb_request	*req = epdata->req;
req               337 drivers/usb/gadget/legacy/inode.c 		req->context = &done;
req               338 drivers/usb/gadget/legacy/inode.c 		req->complete = epio_complete;
req               339 drivers/usb/gadget/legacy/inode.c 		req->buf = buf;
req               340 drivers/usb/gadget/legacy/inode.c 		req->length = len;
req               341 drivers/usb/gadget/legacy/inode.c 		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
req               353 drivers/usb/gadget/legacy/inode.c 				usb_ep_dequeue (epdata->ep, epdata->req);
req               428 drivers/usb/gadget/legacy/inode.c 	struct usb_request	*req;
req               448 drivers/usb/gadget/legacy/inode.c 	if (likely(epdata && epdata->ep && priv->req))
req               449 drivers/usb/gadget/legacy/inode.c 		value = usb_ep_dequeue (epdata->ep, priv->req);
req               479 drivers/usb/gadget/legacy/inode.c static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
req               481 drivers/usb/gadget/legacy/inode.c 	struct kiocb		*iocb = req->context;
req               487 drivers/usb/gadget/legacy/inode.c 	priv->req = NULL;
req               494 drivers/usb/gadget/legacy/inode.c 	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
req               495 drivers/usb/gadget/legacy/inode.c 		kfree(req->buf);
req               501 drivers/usb/gadget/legacy/inode.c 		iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
req               502 drivers/usb/gadget/legacy/inode.c 				req->status);
req               505 drivers/usb/gadget/legacy/inode.c 		if (unlikely(0 != req->status))
req               507 drivers/usb/gadget/legacy/inode.c 				ep->name, req->status, req->actual);
req               509 drivers/usb/gadget/legacy/inode.c 		priv->buf = req->buf;
req               510 drivers/usb/gadget/legacy/inode.c 		priv->actual = req->actual;
req               515 drivers/usb/gadget/legacy/inode.c 	usb_ep_free_request(ep, req);
req               526 drivers/usb/gadget/legacy/inode.c 	struct usb_request *req;
req               546 drivers/usb/gadget/legacy/inode.c 	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
req               548 drivers/usb/gadget/legacy/inode.c 	if (unlikely(!req))
req               551 drivers/usb/gadget/legacy/inode.c 	priv->req = req;
req               552 drivers/usb/gadget/legacy/inode.c 	req->buf = buf;
req               553 drivers/usb/gadget/legacy/inode.c 	req->length = len;
req               554 drivers/usb/gadget/legacy/inode.c 	req->complete = ep_aio_complete;
req               555 drivers/usb/gadget/legacy/inode.c 	req->context = iocb;
req               556 drivers/usb/gadget/legacy/inode.c 	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
req               558 drivers/usb/gadget/legacy/inode.c 		usb_ep_free_request(epdata->ep, req);
req               851 drivers/usb/gadget/legacy/inode.c static void clean_req (struct usb_ep *ep, struct usb_request *req)
req               855 drivers/usb/gadget/legacy/inode.c 	if (req->buf != dev->rbuf) {
req               856 drivers/usb/gadget/legacy/inode.c 		kfree(req->buf);
req               857 drivers/usb/gadget/legacy/inode.c 		req->buf = dev->rbuf;
req               859 drivers/usb/gadget/legacy/inode.c 	req->complete = epio_complete;
req               863 drivers/usb/gadget/legacy/inode.c static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
req               872 drivers/usb/gadget/legacy/inode.c 		dev->setup_out_error = (req->status != 0);
req               880 drivers/usb/gadget/legacy/inode.c 	if (free && req->buf != &dev->rbuf)
req               881 drivers/usb/gadget/legacy/inode.c 		clean_req (ep, req);
req               882 drivers/usb/gadget/legacy/inode.c 	req->complete = epio_complete;
req               886 drivers/usb/gadget/legacy/inode.c static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
req               895 drivers/usb/gadget/legacy/inode.c 		req->buf = kmalloc(len, GFP_ATOMIC);
req               896 drivers/usb/gadget/legacy/inode.c 	if (req->buf == NULL) {
req               897 drivers/usb/gadget/legacy/inode.c 		req->buf = dev->rbuf;
req               900 drivers/usb/gadget/legacy/inode.c 	req->complete = ep0_complete;
req               901 drivers/usb/gadget/legacy/inode.c 	req->length = len;
req               902 drivers/usb/gadget/legacy/inode.c 	req->zero = 0;
req               937 drivers/usb/gadget/legacy/inode.c 			struct usb_request	*req = dev->req;
req               939 drivers/usb/gadget/legacy/inode.c 			if ((retval = setup_req (ep, req, 0)) == 0) {
req               942 drivers/usb/gadget/legacy/inode.c 				retval = usb_ep_queue (ep, req, GFP_KERNEL);
req               985 drivers/usb/gadget/legacy/inode.c 				len = min (len, (size_t)dev->req->actual);
req               988 drivers/usb/gadget/legacy/inode.c 				if (copy_to_user (buf, dev->req->buf, len))
req               994 drivers/usb/gadget/legacy/inode.c 				clean_req (dev->gadget->ep0, dev->req);
req              1134 drivers/usb/gadget/legacy/inode.c 			retval = setup_req (dev->gadget->ep0, dev->req, len);
req              1139 drivers/usb/gadget/legacy/inode.c 				if (copy_from_user (dev->req->buf, buf, len))
req              1143 drivers/usb/gadget/legacy/inode.c 						dev->req->zero = 1;
req              1145 drivers/usb/gadget/legacy/inode.c 						dev->gadget->ep0, dev->req,
req              1151 drivers/usb/gadget/legacy/inode.c 					clean_req (dev->gadget->ep0, dev->req);
req              1316 drivers/usb/gadget/legacy/inode.c 		dev->req->buf = dev->hs_config;
req              1319 drivers/usb/gadget/legacy/inode.c 		dev->req->buf = dev->config;
req              1322 drivers/usb/gadget/legacy/inode.c 	((u8 *)dev->req->buf) [1] = type;
req              1330 drivers/usb/gadget/legacy/inode.c 	struct usb_request		*req = dev->req;
req              1362 drivers/usb/gadget/legacy/inode.c 	req->buf = dev->rbuf;
req              1363 drivers/usb/gadget/legacy/inode.c 	req->context = NULL;
req              1374 drivers/usb/gadget/legacy/inode.c 			req->buf = dev->dev;
req              1452 drivers/usb/gadget/legacy/inode.c 		*(u8 *)req->buf = dev->current_config;
req              1476 drivers/usb/gadget/legacy/inode.c 				value = setup_req (gadget->ep0, dev->req,
req              1483 drivers/usb/gadget/legacy/inode.c 				value = usb_ep_queue (gadget->ep0, dev->req,
req              1488 drivers/usb/gadget/legacy/inode.c 					clean_req (gadget->ep0, dev->req);
req              1507 drivers/usb/gadget/legacy/inode.c 		req->length = value;
req              1508 drivers/usb/gadget/legacy/inode.c 		req->zero = value < w_length;
req              1512 drivers/usb/gadget/legacy/inode.c 		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
req              1518 drivers/usb/gadget/legacy/inode.c 			req->status = 0;
req              1553 drivers/usb/gadget/legacy/inode.c 		usb_ep_free_request (ep->ep, ep->req);
req              1598 drivers/usb/gadget/legacy/inode.c 		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
req              1599 drivers/usb/gadget/legacy/inode.c 		if (!data->req)
req              1611 drivers/usb/gadget/legacy/inode.c 	usb_ep_free_request (ep, data->req);
req              1642 drivers/usb/gadget/legacy/inode.c 	if (dev->req)
req              1643 drivers/usb/gadget/legacy/inode.c 		usb_ep_free_request (gadget->ep0, dev->req);
req              1668 drivers/usb/gadget/legacy/inode.c 	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
req              1669 drivers/usb/gadget/legacy/inode.c 	if (!dev->req)
req              1671 drivers/usb/gadget/legacy/inode.c 	dev->req->context = NULL;
req              1672 drivers/usb/gadget/legacy/inode.c 	dev->req->complete = epio_complete;
req                16 drivers/usb/gadget/u_f.c 	struct usb_request      *req;
req                18 drivers/usb/gadget/u_f.c 	req = usb_ep_alloc_request(ep, GFP_ATOMIC);
req                19 drivers/usb/gadget/u_f.c 	if (req) {
req                20 drivers/usb/gadget/u_f.c 		req->length = usb_endpoint_dir_out(ep->desc) ?
req                22 drivers/usb/gadget/u_f.c 		req->buf = kmalloc(req->length, GFP_ATOMIC);
req                23 drivers/usb/gadget/u_f.c 		if (!req->buf) {
req                24 drivers/usb/gadget/u_f.c 			usb_ep_free_request(ep, req);
req                25 drivers/usb/gadget/u_f.c 			req = NULL;
req                28 drivers/usb/gadget/u_f.c 	return req;
req                62 drivers/usb/gadget/u_f.h static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
req                64 drivers/usb/gadget/u_f.h 	WARN_ON(req->buf == NULL);
req                65 drivers/usb/gadget/u_f.h 	kfree(req->buf);
req                66 drivers/usb/gadget/u_f.h 	req->buf = NULL;
req                67 drivers/usb/gadget/u_f.h 	usb_ep_free_request(ep, req);
req               478 drivers/usb/gadget/udc/amd5536udc.h 	struct usb_request		req;
req               507 drivers/usb/gadget/udc/amd5536udc.h 	struct udc_request		*req;
req                35 drivers/usb/gadget/udc/aspeed-vhub/core.c void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
req                38 drivers/usb/gadget/udc/aspeed-vhub/core.c 	bool internal = req->internal;
req                40 drivers/usb/gadget/udc/aspeed-vhub/core.c 	EPVDBG(ep, "completing request @%p, status %d\n", req, status);
req                42 drivers/usb/gadget/udc/aspeed-vhub/core.c 	list_del_init(&req->queue);
req                44 drivers/usb/gadget/udc/aspeed-vhub/core.c 	if (req->req.status == -EINPROGRESS)
req                45 drivers/usb/gadget/udc/aspeed-vhub/core.c 		req->req.status = status;
req                47 drivers/usb/gadget/udc/aspeed-vhub/core.c 	if (req->req.dma) {
req                50 drivers/usb/gadget/udc/aspeed-vhub/core.c 						 &req->req, ep->epn.is_in);
req                51 drivers/usb/gadget/udc/aspeed-vhub/core.c 		req->req.dma = 0;
req                60 drivers/usb/gadget/udc/aspeed-vhub/core.c 		usb_gadget_giveback_request(&ep->ep, &req->req);
req                67 drivers/usb/gadget/udc/aspeed-vhub/core.c 	struct ast_vhub_req *req;
req                72 drivers/usb/gadget/udc/aspeed-vhub/core.c 		req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
req                73 drivers/usb/gadget/udc/aspeed-vhub/core.c 		ast_vhub_done(ep, req, status);
req                83 drivers/usb/gadget/udc/aspeed-vhub/core.c 	struct ast_vhub_req *req;
req                85 drivers/usb/gadget/udc/aspeed-vhub/core.c 	req = kzalloc(sizeof(*req), gfp_flags);
req                86 drivers/usb/gadget/udc/aspeed-vhub/core.c 	if (!req)
req                88 drivers/usb/gadget/udc/aspeed-vhub/core.c 	return &req->req;
req                93 drivers/usb/gadget/udc/aspeed-vhub/core.c 	struct ast_vhub_req *req = to_ast_req(u_req);
req                95 drivers/usb/gadget/udc/aspeed-vhub/core.c 	kfree(req);
req                37 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	struct usb_request *req = &ep->ep0.req.req;
req                46 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (WARN_ON(req->status == -EINPROGRESS))
req                49 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->buf = ptr;
req                50 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->length = len;
req                51 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->complete = NULL;
req                52 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->zero = true;
req                60 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
req               183 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 				 struct ast_vhub_req *req)
req               191 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (req->req.length == 0)
req               192 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		req->last_desc = 1;
req               195 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (req->last_desc >= 0) {
req               197 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		       req->req.actual, req->req.length);
req               200 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		ast_vhub_done(ep, req, 0);
req               208 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	chunk = req->req.length - req->req.actual;
req               211 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
req               212 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		req->last_desc = 1;
req               215 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	       chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
req               221 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (chunk && req->req.buf)
req               222 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
req               230 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->req.actual += chunk;
req               241 drivers/usb/gadget/udc/aspeed-vhub/ep0.c static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
req               248 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	remain = req->req.length - req->req.actual;
req               259 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (len && req->req.buf)
req               260 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		memcpy(req->req.buf + req->req.actual, ep->buf, len);
req               261 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->req.actual += len;
req               267 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		ast_vhub_done(ep, req, rc);
req               274 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	struct ast_vhub_req *req;
req               284 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
req               287 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
req               292 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		if (req) {
req               312 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		if (!req) {
req               320 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 			ast_vhub_ep0_do_send(ep, req);
req               322 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 			ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
req               326 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		if (req) {
req               361 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	struct ast_vhub_req *req = to_ast_req(u_req);
req               368 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (!u_req || (!u_req->complete && !req->internal)) {
req               372 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 				 u_req->complete, req->internal);
req               386 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (u_req->length && !u_req->buf && !req->internal) {
req               391 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	EPVDBG(ep, "enqueue req @%p\n", req);
req               399 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->last_desc = -1;
req               400 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req->active = false;
req               416 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	list_add_tail(&req->queue, &ep->queue);
req               420 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		ast_vhub_ep0_do_send(ep, req);
req               426 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		ast_vhub_done(ep, req, 0);
req               441 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	struct ast_vhub_req *req;
req               448 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
req               451 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (req && u_req == &req->req) {
req               452 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		EPVDBG(ep, "dequeue req @%p\n", req);
req               458 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		ast_vhub_done(ep, req, -ECONNRESET);
req               502 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	INIT_LIST_HEAD(&ep->ep0.req.queue);
req               503 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	ep->ep0.req.internal = true;
req                46 drivers/usb/gadget/udc/aspeed-vhub/epn.c static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
req                48 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	unsigned int act = req->req.actual;
req                49 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	unsigned int len = req->req.length;
req                53 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	WARN_ON(req->active);
req                59 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
req                60 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		req->last_desc = 1;
req                63 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	       req, act, len, chunk, req->last_desc);
req                66 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (!req->req.dma) {
req                70 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			memcpy(ep->buf, req->req.buf + act, chunk);
req                76 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			vhub_dma_workaround(req->req.buf);
req                77 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
req                81 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->active = true;
req                90 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	struct ast_vhub_req *req;
req                98 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
req               101 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	       stat, ep->epn.is_in, req, req ? req->active : 0);
req               104 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (!req)
req               111 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (!req->active)
req               121 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->active = false;
req               127 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (!req->req.dma && !ep->epn.is_in && len)
req               128 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		memcpy(req->req.buf + req->req.actual, ep->buf, len);
req               131 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->req.actual += len;
req               135 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		req->last_desc = 1;
req               138 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (req->last_desc >= 0) {
req               139 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		ast_vhub_done(ep, req, 0);
req               140 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
req               147 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		if (!req || req->active)
req               152 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	ast_vhub_epn_kick(ep, req);
req               167 drivers/usb/gadget/udc/aspeed-vhub/epn.c 				   struct ast_vhub_req *req)
req               170 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	unsigned int act = req->act_count;
req               171 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	unsigned int len = req->req.length;
req               175 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->active = true;
req               178 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (req->last_desc >= 0)
req               185 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
req               204 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
req               205 drivers/usb/gadget/udc/aspeed-vhub/epn.c 				req->last_desc = d_num;
req               211 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		       act, len, chunk, req->last_desc, d_num,
req               215 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		desc->w0 = cpu_to_le32(req->req.dma + act);
req               228 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
req               232 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		req->act_count = act = act + chunk;
req               248 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	struct ast_vhub_req *req;
req               262 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
req               282 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		       d_num, len, req, req ? req->active : 0);
req               285 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		if (!req || !req->active)
req               289 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		req->req.actual += len;
req               292 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		is_last_desc = req->last_desc == d_num;
req               294 drivers/usb/gadget/udc/aspeed-vhub/epn.c 					   (req->req.actual >= req->req.length &&
req               295 drivers/usb/gadget/udc/aspeed-vhub/epn.c 					    !req->req.zero)),
req               298 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		      is_last_desc, len, req->req.actual, req->req.length,
req               299 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		      req->req.zero, ep->ep.maxpacket);
req               312 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			ast_vhub_done(ep, req, 0);
req               313 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			req = list_first_entry_or_null(&ep->queue,
req               321 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (req)
req               322 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		ast_vhub_epn_kick_desc(ep, req);
req               336 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	struct ast_vhub_req *req = to_ast_req(u_req);
req               348 drivers/usb/gadget/udc/aspeed-vhub/epn.c 				 u_req->complete, req->internal);
req               389 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	EPVDBG(ep, "enqueue req @%p\n", req);
req               398 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->act_count = 0;
req               399 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->active = false;
req               400 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req->last_desc = -1;
req               405 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	list_add_tail(&req->queue, &ep->queue);
req               408 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			ast_vhub_epn_kick_desc(ep, req);
req               410 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			ast_vhub_epn_kick(ep, req);
req               471 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	struct ast_vhub_req *req;
req               478 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	list_for_each_entry (req, &ep->queue, queue) {
req               479 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		if (&req->req == u_req)
req               483 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (&req->req == u_req) {
req               485 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		       req, req->active);
req               486 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		if (req->active)
req               488 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		ast_vhub_done(ep, req, -ECONNRESET);
req               235 drivers/usb/gadget/udc/aspeed-vhub/vhub.h 	struct usb_request	req;
req               253 drivers/usb/gadget/udc/aspeed-vhub/vhub.h #define to_ast_req(__ureq) container_of(__ureq, struct ast_vhub_req, req)
req               307 drivers/usb/gadget/udc/aspeed-vhub/vhub.h 			struct ast_vhub_req	req;
req               499 drivers/usb/gadget/udc/aspeed-vhub/vhub.h void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
req               112 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request	*req;
req               153 drivers/usb/gadget/udc/at91_udc.c 	else list_for_each_entry (req, &ep->queue, queue) {
req               154 drivers/usb/gadget/udc/at91_udc.c 		unsigned	length = req->req.actual;
req               157 drivers/usb/gadget/udc/at91_udc.c 				&req->req, length,
req               158 drivers/usb/gadget/udc/at91_udc.c 				req->req.length, req->req.buf);
req               259 drivers/usb/gadget/udc/at91_udc.c static void done(struct at91_ep *ep, struct at91_request *req, int status)
req               264 drivers/usb/gadget/udc/at91_udc.c 	list_del_init(&req->queue);
req               265 drivers/usb/gadget/udc/at91_udc.c 	if (req->req.status == -EINPROGRESS)
req               266 drivers/usb/gadget/udc/at91_udc.c 		req->req.status = status;
req               268 drivers/usb/gadget/udc/at91_udc.c 		status = req->req.status;
req               270 drivers/usb/gadget/udc/at91_udc.c 		VDBG("%s done %p, status %d\n", ep->ep.name, req, status);
req               274 drivers/usb/gadget/udc/at91_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               310 drivers/usb/gadget/udc/at91_udc.c static int read_fifo (struct at91_ep *ep, struct at91_request *req)
req               318 drivers/usb/gadget/udc/at91_udc.c 	buf = req->req.buf + req->req.actual;
req               319 drivers/usb/gadget/udc/at91_udc.c 	bufferspace = req->req.length - req->req.actual;
req               335 drivers/usb/gadget/udc/at91_udc.c 		req->req.status = -EOVERFLOW;
req               354 drivers/usb/gadget/udc/at91_udc.c 	req->req.actual += count;
req               359 drivers/usb/gadget/udc/at91_udc.c 	PACKET("%s %p out/%d%s\n", ep->ep.name, &req->req, count,
req               367 drivers/usb/gadget/udc/at91_udc.c 		done(ep, req, 0);
req               385 drivers/usb/gadget/udc/at91_udc.c static int write_fifo(struct at91_ep *ep, struct at91_request *req)
req               416 drivers/usb/gadget/udc/at91_udc.c 	buf = req->req.buf + req->req.actual;
req               418 drivers/usb/gadget/udc/at91_udc.c 	total = req->req.length - req->req.actual;
req               424 drivers/usb/gadget/udc/at91_udc.c 		is_last = (count < ep->ep.maxpacket) || !req->req.zero;
req               444 drivers/usb/gadget/udc/at91_udc.c 	req->req.actual += count;
req               446 drivers/usb/gadget/udc/at91_udc.c 	PACKET("%s %p in/%d%s\n", ep->ep.name, &req->req, count,
req               449 drivers/usb/gadget/udc/at91_udc.c 		done(ep, req, 0);
req               455 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request *req;
req               464 drivers/usb/gadget/udc/at91_udc.c 		req = list_entry(ep->queue.next, struct at91_request, queue);
req               465 drivers/usb/gadget/udc/at91_udc.c 		done(ep, req, status);
req               585 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request *req;
req               587 drivers/usb/gadget/udc/at91_udc.c 	req = kzalloc(sizeof (struct at91_request), gfp_flags);
req               588 drivers/usb/gadget/udc/at91_udc.c 	if (!req)
req               591 drivers/usb/gadget/udc/at91_udc.c 	INIT_LIST_HEAD(&req->queue);
req               592 drivers/usb/gadget/udc/at91_udc.c 	return &req->req;
req               597 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request *req;
req               599 drivers/usb/gadget/udc/at91_udc.c 	req = container_of(_req, struct at91_request, req);
req               600 drivers/usb/gadget/udc/at91_udc.c 	BUG_ON(!list_empty(&req->queue));
req               601 drivers/usb/gadget/udc/at91_udc.c 	kfree(req);
req               607 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request	*req;
req               613 drivers/usb/gadget/udc/at91_udc.c 	req = container_of(_req, struct at91_request, req);
req               617 drivers/usb/gadget/udc/at91_udc.c 			|| !_req->buf || !list_empty(&req->queue)) {
req               670 drivers/usb/gadget/udc/at91_udc.c 			if (req->req.length == 0) {
req               684 drivers/usb/gadget/udc/at91_udc.c 			status = write_fifo(ep, req);
req               686 drivers/usb/gadget/udc/at91_udc.c 			status = read_fifo(ep, req);
req               695 drivers/usb/gadget/udc/at91_udc.c 	if (req && !status) {
req               696 drivers/usb/gadget/udc/at91_udc.c 		list_add_tail (&req->queue, &ep->queue);
req               707 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request	*req;
req               720 drivers/usb/gadget/udc/at91_udc.c 	list_for_each_entry (req, &ep->queue, queue) {
req               721 drivers/usb/gadget/udc/at91_udc.c 		if (&req->req == _req)
req               724 drivers/usb/gadget/udc/at91_udc.c 	if (&req->req != _req) {
req               729 drivers/usb/gadget/udc/at91_udc.c 	done(ep, req, -ECONNRESET);
req              1005 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request	*req;
req              1010 drivers/usb/gadget/udc/at91_udc.c 		req = list_entry(ep->queue.next,
req              1013 drivers/usb/gadget/udc/at91_udc.c 		req = NULL;
req              1021 drivers/usb/gadget/udc/at91_udc.c 		if (req)
req              1022 drivers/usb/gadget/udc/at91_udc.c 			return write_fifo(ep, req);
req              1027 drivers/usb/gadget/udc/at91_udc.c 			if (ep->is_iso && req)
req              1028 drivers/usb/gadget/udc/at91_udc.c 				req->req.status = -EILSEQ;
req              1034 drivers/usb/gadget/udc/at91_udc.c 		if (req && (csr & RX_DATA_READY))
req              1035 drivers/usb/gadget/udc/at91_udc.c 			return read_fifo(ep, req);
req              1276 drivers/usb/gadget/udc/at91_udc.c 	struct at91_request	*req;
req              1295 drivers/usb/gadget/udc/at91_udc.c 		req = NULL;
req              1297 drivers/usb/gadget/udc/at91_udc.c 		req = list_entry(ep0->queue.next, struct at91_request, queue);
req              1305 drivers/usb/gadget/udc/at91_udc.c 		if (req && ep0->is_in) {
req              1349 drivers/usb/gadget/udc/at91_udc.c 			if (req) {
req              1388 drivers/usb/gadget/udc/at91_udc.c 			if (req)
req              1389 drivers/usb/gadget/udc/at91_udc.c 				done(ep0, req, 0);
req               155 drivers/usb/gadget/udc/at91_udc.h 	struct usb_request		req;
req                39 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req, *req_copy;
req                48 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req                49 drivers/usb/gadget/udc/atmel_usba_udc.c 		req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
req                61 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, req_copy, queue_data, queue) {
req                62 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del(&req->queue);
req                63 drivers/usb/gadget/udc/atmel_usba_udc.c 		kfree(req);
req                87 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req, *tmp_req;
req                95 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, queue, queue) {
req                98 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->req.buf, req->req.length,
req                99 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->req.no_interrupt ? 'i' : 'I',
req               100 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->req.zero ? 'Z' : 'z',
req               101 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->req.short_not_ok ? 's' : 'S',
req               102 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->req.status,
req               103 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->submitted ? 'F' : 'f',
req               104 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->using_dma ? 'D' : 'd',
req               105 drivers/usb/gadget/udc/atmel_usba_udc.c 				req->last_transaction ? 'L' : 'l');
req               110 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del(&req->queue);
req               111 drivers/usb/gadget/udc/atmel_usba_udc.c 		kfree(req);
req               129 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req, *tmp_req;
req               131 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
req               132 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del(&req->queue);
req               133 drivers/usb/gadget/udc/atmel_usba_udc.c 		kfree(req);
req               406 drivers/usb/gadget/udc/atmel_usba_udc.c static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
req               410 drivers/usb/gadget/udc/atmel_usba_udc.c 	transaction_len = req->req.length - req->req.actual;
req               411 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->last_transaction = 1;
req               414 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->last_transaction = 0;
req               415 drivers/usb/gadget/udc/atmel_usba_udc.c 	} else if (transaction_len == ep->ep.maxpacket && req->req.zero)
req               416 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->last_transaction = 0;
req               419 drivers/usb/gadget/udc/atmel_usba_udc.c 		ep->ep.name, req, transaction_len,
req               420 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->last_transaction ? ", done" : "");
req               422 drivers/usb/gadget/udc/atmel_usba_udc.c 	memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
req               424 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->req.actual += transaction_len;
req               427 drivers/usb/gadget/udc/atmel_usba_udc.c static void submit_request(struct usba_ep *ep, struct usba_request *req)
req               430 drivers/usb/gadget/udc/atmel_usba_udc.c 		ep->ep.name, req, req->req.length);
req               432 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->req.actual = 0;
req               433 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->submitted = 1;
req               435 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (req->using_dma) {
req               436 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->req.length == 0) {
req               441 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->req.zero)
req               446 drivers/usb/gadget/udc/atmel_usba_udc.c 		usba_dma_writel(ep, ADDRESS, req->req.dma);
req               447 drivers/usb/gadget/udc/atmel_usba_udc.c 		usba_dma_writel(ep, CONTROL, req->ctrl);
req               449 drivers/usb/gadget/udc/atmel_usba_udc.c 		next_fifo_transaction(ep, req);
req               450 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->last_transaction) {
req               464 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req               471 drivers/usb/gadget/udc/atmel_usba_udc.c 	req = list_entry(ep->queue.next, struct usba_request, queue);
req               472 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (!req->submitted)
req               473 drivers/usb/gadget/udc/atmel_usba_udc.c 		submit_request(ep, req);
req               486 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req               501 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next,
req               508 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->req.actual + bytecount >= req->req.length) {
req               510 drivers/usb/gadget/udc/atmel_usba_udc.c 			bytecount = req->req.length - req->req.actual;
req               513 drivers/usb/gadget/udc/atmel_usba_udc.c 		memcpy_fromio(req->req.buf + req->req.actual,
req               515 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->req.actual += bytecount;
req               521 drivers/usb/gadget/udc/atmel_usba_udc.c 			req->req.status = 0;
req               522 drivers/usb/gadget/udc/atmel_usba_udc.c 			list_del_init(&req->queue);
req               525 drivers/usb/gadget/udc/atmel_usba_udc.c 			usb_gadget_giveback_request(&ep->ep, &req->req);
req               540 drivers/usb/gadget/udc/atmel_usba_udc.c request_complete(struct usba_ep *ep, struct usba_request *req, int status)
req               544 drivers/usb/gadget/udc/atmel_usba_udc.c 	WARN_ON(!list_empty(&req->queue));
req               546 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (req->req.status == -EINPROGRESS)
req               547 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->req.status = status;
req               549 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (req->using_dma)
req               550 drivers/usb/gadget/udc/atmel_usba_udc.c 		usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
req               554 drivers/usb/gadget/udc/atmel_usba_udc.c 		ep->ep.name, req, req->req.status, req->req.actual);
req               557 drivers/usb/gadget/udc/atmel_usba_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               564 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req, *tmp_req;
req               566 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, list, queue) {
req               567 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del_init(&req->queue);
req               568 drivers/usb/gadget/udc/atmel_usba_udc.c 		request_complete(ep, req, status);
req               708 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req               712 drivers/usb/gadget/udc/atmel_usba_udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               713 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (!req)
req               716 drivers/usb/gadget/udc/atmel_usba_udc.c 	INIT_LIST_HEAD(&req->queue);
req               718 drivers/usb/gadget/udc/atmel_usba_udc.c 	return &req->req;
req               724 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req = to_usba_req(_req);
req               728 drivers/usb/gadget/udc/atmel_usba_udc.c 	kfree(req);
req               732 drivers/usb/gadget/udc/atmel_usba_udc.c 		struct usba_request *req, gfp_t gfp_flags)
req               738 drivers/usb/gadget/udc/atmel_usba_udc.c 		ep->ep.name, req->req.length, &req->req.dma,
req               739 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->req.zero ? 'Z' : 'z',
req               740 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->req.short_not_ok ? 'S' : 's',
req               741 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->req.no_interrupt ? 'I' : 'i');
req               743 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (req->req.length > 0x10000) {
req               745 drivers/usb/gadget/udc/atmel_usba_udc.c 		DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
req               749 drivers/usb/gadget/udc/atmel_usba_udc.c 	ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
req               753 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->using_dma = 1;
req               754 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
req               759 drivers/usb/gadget/udc/atmel_usba_udc.c 		req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
req               770 drivers/usb/gadget/udc/atmel_usba_udc.c 			submit_request(ep, req);
req               772 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_add_tail(&req->queue, &ep->queue);
req               783 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req = to_usba_req(_req);
req               790 drivers/usb/gadget/udc/atmel_usba_udc.c 			ep->ep.name, req, _req->length);
req               796 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->submitted = 0;
req               797 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->using_dma = 0;
req               798 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->last_transaction = 0;
req               804 drivers/usb/gadget/udc/atmel_usba_udc.c 		return queue_dma(udc, ep, req, gfp_flags);
req               810 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_add_tail(&req->queue, &ep->queue);
req               827 drivers/usb/gadget/udc/atmel_usba_udc.c usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
req               829 drivers/usb/gadget/udc/atmel_usba_udc.c 	req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
req               868 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req               873 drivers/usb/gadget/udc/atmel_usba_udc.c 			ep->ep.name, req);
req               877 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req               878 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (&req->req == _req)
req               882 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (&req->req != _req) {
req               887 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (req->using_dma) {
req               892 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (ep->queue.next == &req->queue) {
req               903 drivers/usb/gadget/udc/atmel_usba_udc.c 			usba_update_req(ep, req, status);
req               911 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_del_init(&req->queue);
req               913 drivers/usb/gadget/udc/atmel_usba_udc.c 	request_complete(ep, req, -ECONNRESET);
req              1136 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req, *tmp_req;
req              1141 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
req              1142 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del_init(&req->queue);
req              1143 drivers/usb/gadget/udc/atmel_usba_udc.c 		request_complete(ep, req, -ECONNRESET);
req              1430 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req              1441 drivers/usb/gadget/udc/atmel_usba_udc.c 	req = NULL;
req              1443 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next,
req              1447 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->submitted)
req              1448 drivers/usb/gadget/udc/atmel_usba_udc.c 			next_fifo_transaction(ep, req);
req              1450 drivers/usb/gadget/udc/atmel_usba_udc.c 			submit_request(ep, req);
req              1452 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->last_transaction) {
req              1475 drivers/usb/gadget/udc/atmel_usba_udc.c 			if (req) {
req              1476 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
req              1477 drivers/usb/gadget/udc/atmel_usba_udc.c 				request_complete(ep, req, 0);
req              1505 drivers/usb/gadget/udc/atmel_usba_udc.c 			if (req) {
req              1506 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
req              1507 drivers/usb/gadget/udc/atmel_usba_udc.c 				request_complete(ep, req, 0);
req              1555 drivers/usb/gadget/udc/atmel_usba_udc.c 			if (req) {
req              1556 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
req              1557 drivers/usb/gadget/udc/atmel_usba_udc.c 				request_complete(ep, req, status);
req              1618 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req              1636 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next, struct usba_request, queue);
req              1638 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (req->using_dma) {
req              1644 drivers/usb/gadget/udc/atmel_usba_udc.c 			list_del_init(&req->queue);
req              1646 drivers/usb/gadget/udc/atmel_usba_udc.c 			request_complete(ep, req, 0);
req              1648 drivers/usb/gadget/udc/atmel_usba_udc.c 			if (req->submitted)
req              1649 drivers/usb/gadget/udc/atmel_usba_udc.c 				next_fifo_transaction(ep, req);
req              1651 drivers/usb/gadget/udc/atmel_usba_udc.c 				submit_request(ep, req);
req              1653 drivers/usb/gadget/udc/atmel_usba_udc.c 			if (req->last_transaction) {
req              1654 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
req              1656 drivers/usb/gadget/udc/atmel_usba_udc.c 				request_complete(ep, req, 0);
req              1671 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct usba_request *req;
req              1700 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next, struct usba_request, queue);
req              1701 drivers/usb/gadget/udc/atmel_usba_udc.c 		usba_update_req(ep, req, status);
req              1703 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del_init(&req->queue);
req              1705 drivers/usb/gadget/udc/atmel_usba_udc.c 		request_complete(ep, req, 0);
req               294 drivers/usb/gadget/udc/atmel_usba_udc.h 	struct usb_request			req;
req               355 drivers/usb/gadget/udc/atmel_usba_udc.h static inline struct usba_request *to_usba_req(struct usb_request *req)
req               357 drivers/usb/gadget/udc/atmel_usba_udc.h 	return container_of(req, struct usba_request, req);
req               256 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request		req;
req               349 drivers/usb/gadget/udc/bcm63xx_udc.c static inline struct bcm63xx_req *our_req(struct usb_request *req)
req               351 drivers/usb/gadget/udc/bcm63xx_udc.c 	return container_of(req, struct bcm63xx_req, req);
req               598 drivers/usb/gadget/udc/bcm63xx_udc.c 	unsigned int bytes_left = breq->req.length - breq->offset;
req               606 drivers/usb/gadget/udc/bcm63xx_udc.c 	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
req               648 drivers/usb/gadget/udc/bcm63xx_udc.c 		d->address = breq->req.dma + breq->offset;
req              1094 drivers/usb/gadget/udc/bcm63xx_udc.c 			usb_gadget_unmap_request(&udc->gadget, &breq->req,
req              1097 drivers/usb/gadget/udc/bcm63xx_udc.c 			breq->req.status = -ESHUTDOWN;
req              1100 drivers/usb/gadget/udc/bcm63xx_udc.c 			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
req              1123 drivers/usb/gadget/udc/bcm63xx_udc.c 	return &breq->req;
req              1132 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req)
req              1134 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct bcm63xx_req *breq = our_req(req);
req              1152 drivers/usb/gadget/udc/bcm63xx_udc.c static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
req              1157 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct bcm63xx_req *breq = our_req(req);
req              1161 drivers/usb/gadget/udc/bcm63xx_udc.c 	if (unlikely(!req || !req->complete || !req->buf || !ep))
req              1164 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->actual = 0;
req              1165 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->status = 0;
req              1173 drivers/usb/gadget/udc/bcm63xx_udc.c 		udc->ep0_reply = req;
req              1184 drivers/usb/gadget/udc/bcm63xx_udc.c 	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
req              1205 drivers/usb/gadget/udc/bcm63xx_udc.c static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
req              1209 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct bcm63xx_req *breq = our_req(req), *cur;
req              1220 drivers/usb/gadget/udc/bcm63xx_udc.c 	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
req              1240 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->status = -ESHUTDOWN;
req              1241 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->complete(ep, req);
req              1384 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req)
req              1386 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct bcm63xx_req *breq = our_req(req);
req              1390 drivers/usb/gadget/udc/bcm63xx_udc.c 	udc->ep0_request = req;
req              1392 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->actual = 0;
req              1394 drivers/usb/gadget/udc/bcm63xx_udc.c 	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
req              1405 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req, int status)
req              1407 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->status = status;
req              1409 drivers/usb/gadget/udc/bcm63xx_udc.c 		req->actual = 0;
req              1410 drivers/usb/gadget/udc/bcm63xx_udc.c 	if (req->complete) {
req              1412 drivers/usb/gadget/udc/bcm63xx_udc.c 		req->complete(&udc->bep[0].ep, req);
req              1425 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req = udc->ep0_reply;
req              1428 drivers/usb/gadget/udc/bcm63xx_udc.c 	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
req              1429 drivers/usb/gadget/udc/bcm63xx_udc.c 	if (udc->ep0_request == req) {
req              1433 drivers/usb/gadget/udc/bcm63xx_udc.c 	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
req              1443 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req = udc->ep0_request;
req              1448 drivers/usb/gadget/udc/bcm63xx_udc.c 	return req->actual;
req              1463 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req = &udc->ep0_ctrl_req.req;
req              1465 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->buf = udc->ep0_ctrl_buf;
req              1466 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->length = length;
req              1467 drivers/usb/gadget/udc/bcm63xx_udc.c 	req->complete = NULL;
req              1469 drivers/usb/gadget/udc/bcm63xx_udc.c 	bcm63xx_ep0_map_write(udc, ch_idx, req);
req              1554 drivers/usb/gadget/udc/bcm63xx_udc.c 			&udc->ep0_ctrl_req.req, 0);
req              2046 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct usb_request *req = NULL;
req              2061 drivers/usb/gadget/udc/bcm63xx_udc.c 		req = udc->ep0_request;
req              2062 drivers/usb/gadget/udc/bcm63xx_udc.c 		breq = our_req(req);
req              2066 drivers/usb/gadget/udc/bcm63xx_udc.c 			req->actual += rc;
req              2068 drivers/usb/gadget/udc/bcm63xx_udc.c 			if (req->actual >= req->length || breq->bd_bytes > rc) {
req              2074 drivers/usb/gadget/udc/bcm63xx_udc.c 				req->actual = min(req->actual, req->length);
req              2082 drivers/usb/gadget/udc/bcm63xx_udc.c 		req = &breq->req;
req              2085 drivers/usb/gadget/udc/bcm63xx_udc.c 			req->actual += rc;
req              2087 drivers/usb/gadget/udc/bcm63xx_udc.c 			if (req->actual >= req->length || breq->bd_bytes > rc) {
req              2091 drivers/usb/gadget/udc/bcm63xx_udc.c 				req->actual = min(req->actual, req->length);
req              2108 drivers/usb/gadget/udc/bcm63xx_udc.c 		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
req              2109 drivers/usb/gadget/udc/bcm63xx_udc.c 		if (req->complete)
req              2110 drivers/usb/gadget/udc/bcm63xx_udc.c 			req->complete(&bep->ep, req);
req               320 drivers/usb/gadget/udc/bdc/bdc.h 	struct bdc_req *req;
req               193 drivers/usb/gadget/udc/bdc/bdc_ep.c static inline int bd_needed_req(struct bdc_req *req)
req               199 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (req->usb_req.length == 0)
req               203 drivers/usb/gadget/udc/bdc/bdc_ep.c 	remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
req               208 drivers/usb/gadget/udc/bdc/bdc_ep.c 	remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
req               384 drivers/usb/gadget/udc/bdc/bdc_ep.c static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
req               389 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->ep->dir = 0;
req               390 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req_len = req->usb_req.length;
req               425 drivers/usb/gadget/udc/bdc/bdc_ep.c static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
req               427 drivers/usb/gadget/udc/bdc/bdc_ep.c 	dma_addr_t buf_add = req->usb_req.dma;
req               436 drivers/usb/gadget/udc/bdc/bdc_ep.c 	ep = req->ep;
req               438 drivers/usb/gadget/udc/bdc/bdc_ep.c 	bd_xfr = &req->bd_xfr;
req               439 drivers/usb/gadget/udc/bdc/bdc_ep.c 	bd_xfr->req = req;
req               442 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req_len = req->usb_req.length;
req               444 drivers/usb/gadget/udc/bdc/bdc_ep.c 	tfs = roundup(req->usb_req.length, maxp);
req               457 drivers/usb/gadget/udc/bdc/bdc_ep.c 				ret = setup_first_bd_ep0(bdc, req, &dword3);
req               462 drivers/usb/gadget/udc/bdc/bdc_ep.c 		if (!req->ep->dir)
req               509 drivers/usb/gadget/udc/bdc/bdc_ep.c static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
req               515 drivers/usb/gadget/udc/bdc/bdc_ep.c 	ep = req->ep;
req               516 drivers/usb/gadget/udc/bdc/bdc_ep.c 	dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
req               520 drivers/usb/gadget/udc/bdc/bdc_ep.c 	num_bds =  bd_needed_req(req);
req               527 drivers/usb/gadget/udc/bdc/bdc_ep.c 	ret = setup_bd_list_xfr(bdc, req, num_bds);
req               530 drivers/usb/gadget/udc/bdc/bdc_ep.c 	list_add_tail(&req->queue, &ep->queue);
req               538 drivers/usb/gadget/udc/bdc/bdc_ep.c static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
req               543 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (req == NULL)
req               547 drivers/usb/gadget/udc/bdc/bdc_ep.c 	list_del(&req->queue);
req               548 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->usb_req.status = status;
req               549 drivers/usb/gadget/udc/bdc/bdc_ep.c 	usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
req               550 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (req->usb_req.complete) {
req               552 drivers/usb/gadget/udc/bdc/bdc_ep.c 		usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
req               560 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req               576 drivers/usb/gadget/udc/bdc/bdc_ep.c 		req = list_entry(ep->queue.next, struct bdc_req,
req               578 drivers/usb/gadget/udc/bdc/bdc_ep.c 		bdc_req_complete(ep, req, -ESHUTDOWN);
req               657 drivers/usb/gadget/udc/bdc/bdc_ep.c static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
req               664 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->usb_req.actual = 0;
req               665 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->usb_req.status = -EINPROGRESS;
req               666 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->epnum = ep->ep_num;
req               683 drivers/usb/gadget/udc/bdc/bdc_ep.c 		if (req->usb_req.length == 0)
req               687 drivers/usb/gadget/udc/bdc/bdc_ep.c 	ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
req               693 drivers/usb/gadget/udc/bdc/bdc_ep.c 	return bdc_queue_xfr(bdc, req);
req               710 drivers/usb/gadget/udc/bdc/bdc_ep.c static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
req               715 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (!req || !ep->usb_ep.desc)
req               720 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->usb_req.actual = 0;
req               721 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->usb_req.status = -EINPROGRESS;
req               722 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->epnum = ep->ep_num;
req               724 drivers/usb/gadget/udc/bdc/bdc_ep.c 	ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
req               730 drivers/usb/gadget/udc/bdc/bdc_ep.c 	return bdc_queue_xfr(bdc, req);
req               734 drivers/usb/gadget/udc/bdc/bdc_ep.c static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
req               755 drivers/usb/gadget/udc/bdc/bdc_ep.c 	start_bdi = req->bd_xfr.start_bdi;
req               756 drivers/usb/gadget/udc/bdc/bdc_ep.c 	end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
req               820 drivers/usb/gadget/udc/bdc/bdc_ep.c 	tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
req               823 drivers/usb/gadget/udc/bdc/bdc_ep.c 			sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
req               829 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (req == first_req)
req               958 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req               976 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req = list_entry(ep->queue.next, struct bdc_req,
req               979 drivers/usb/gadget/udc/bdc/bdc_ep.c 	bd_xfr = &req->bd_xfr;
req              1036 drivers/usb/gadget/udc/bdc/bdc_ep.c 		req->usb_req.actual = actual_length;
req              1038 drivers/usb/gadget/udc/bdc/bdc_ep.c 		req->usb_req.actual = req->usb_req.length -
req              1042 drivers/usb/gadget/udc/bdc/bdc_ep.c 			req->usb_req.length, req->usb_req.actual,
req              1048 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (req->usb_req.actual < req->usb_req.length) {
req              1050 drivers/usb/gadget/udc/bdc/bdc_ep.c 		if (req->usb_req.short_not_ok)
req              1053 drivers/usb/gadget/udc/bdc/bdc_ep.c 	bdc_req_complete(ep, bd_xfr->req, status);
req              1089 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req              1097 drivers/usb/gadget/udc/bdc/bdc_ep.c 		req = list_entry(ep->queue.next, struct bdc_req,
req              1099 drivers/usb/gadget/udc/bdc/bdc_ep.c 		bdc_req_complete(ep, req, -ESHUTDOWN);
req              1711 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req              1724 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req = to_bdc_req(_req);
req              1726 drivers/usb/gadget/udc/bdc/bdc_ep.c 	dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
req              1745 drivers/usb/gadget/udc/bdc/bdc_ep.c 		ret = ep0_queue(ep, req);
req              1747 drivers/usb/gadget/udc/bdc/bdc_ep.c 		ret = ep_queue(ep, req);
req              1757 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req              1767 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req = to_bdc_req(_req);
req              1769 drivers/usb/gadget/udc/bdc/bdc_ep.c 	dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
req              1773 drivers/usb/gadget/udc/bdc/bdc_ep.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1774 drivers/usb/gadget/udc/bdc/bdc_ep.c 		if (&req->usb_req == _req)
req              1777 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (&req->usb_req != _req) {
req              1782 drivers/usb/gadget/udc/bdc/bdc_ep.c 	ret = ep_dequeue(ep, req);
req              1787 drivers/usb/gadget/udc/bdc/bdc_ep.c 	bdc_req_complete(ep, req, -ECONNRESET);
req              1822 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req              1825 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req = kzalloc(sizeof(*req), gfp_flags);
req              1826 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (!req)
req              1830 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->ep = ep;
req              1831 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->epnum = ep->ep_num;
req              1832 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req->usb_req.dma = DMA_ADDR_INVALID;
req              1833 drivers/usb/gadget/udc/bdc/bdc_ep.c 	dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
req              1835 drivers/usb/gadget/udc/bdc/bdc_ep.c 	return &req->usb_req;
req              1841 drivers/usb/gadget/udc/bdc/bdc_ep.c 	struct bdc_req *req;
req              1843 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req = to_bdc_req(_req);
req              1844 drivers/usb/gadget/udc/bdc/bdc_ep.c 	kfree(req);
req               176 drivers/usb/gadget/udc/core.c 	struct usb_request *req = NULL;
req               178 drivers/usb/gadget/udc/core.c 	req = ep->ops->alloc_request(ep, gfp_flags);
req               180 drivers/usb/gadget/udc/core.c 	trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
req               182 drivers/usb/gadget/udc/core.c 	return req;
req               196 drivers/usb/gadget/udc/core.c 				       struct usb_request *req)
req               198 drivers/usb/gadget/udc/core.c 	trace_usb_ep_free_request(ep, req, 0);
req               199 drivers/usb/gadget/udc/core.c 	ep->ops->free_request(ep, req);
req               272 drivers/usb/gadget/udc/core.c 			       struct usb_request *req, gfp_t gfp_flags)
req               281 drivers/usb/gadget/udc/core.c 	ret = ep->ops->queue(ep, req, gfp_flags);
req               284 drivers/usb/gadget/udc/core.c 	trace_usb_ep_queue(ep, req, ret);
req               307 drivers/usb/gadget/udc/core.c int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
req               311 drivers/usb/gadget/udc/core.c 	ret = ep->ops->dequeue(ep, req);
req               312 drivers/usb/gadget/udc/core.c 	trace_usb_ep_dequeue(ep, req, ret);
req               815 drivers/usb/gadget/udc/core.c 		struct usb_request *req, int is_in)
req               817 drivers/usb/gadget/udc/core.c 	if (req->length == 0)
req               820 drivers/usb/gadget/udc/core.c 	if (req->num_sgs) {
req               823 drivers/usb/gadget/udc/core.c 		mapped = dma_map_sg(dev, req->sg, req->num_sgs,
req               830 drivers/usb/gadget/udc/core.c 		req->num_mapped_sgs = mapped;
req               832 drivers/usb/gadget/udc/core.c 		if (is_vmalloc_addr(req->buf)) {
req               835 drivers/usb/gadget/udc/core.c 		} else if (object_is_on_stack(req->buf)) {
req               840 drivers/usb/gadget/udc/core.c 		req->dma = dma_map_single(dev, req->buf, req->length,
req               843 drivers/usb/gadget/udc/core.c 		if (dma_mapping_error(dev, req->dma)) {
req               848 drivers/usb/gadget/udc/core.c 		req->dma_mapped = 1;
req               856 drivers/usb/gadget/udc/core.c 		struct usb_request *req, int is_in)
req               858 drivers/usb/gadget/udc/core.c 	return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
req               863 drivers/usb/gadget/udc/core.c 		struct usb_request *req, int is_in)
req               865 drivers/usb/gadget/udc/core.c 	if (req->length == 0)
req               868 drivers/usb/gadget/udc/core.c 	if (req->num_mapped_sgs) {
req               869 drivers/usb/gadget/udc/core.c 		dma_unmap_sg(dev, req->sg, req->num_sgs,
req               872 drivers/usb/gadget/udc/core.c 		req->num_mapped_sgs = 0;
req               873 drivers/usb/gadget/udc/core.c 	} else if (req->dma_mapped) {
req               874 drivers/usb/gadget/udc/core.c 		dma_unmap_single(dev, req->dma, req->length,
req               876 drivers/usb/gadget/udc/core.c 		req->dma_mapped = 0;
req               882 drivers/usb/gadget/udc/core.c 		struct usb_request *req, int is_in)
req               884 drivers/usb/gadget/udc/core.c 	usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
req               900 drivers/usb/gadget/udc/core.c 		struct usb_request *req)
req               902 drivers/usb/gadget/udc/core.c 	if (likely(req->status == 0))
req               905 drivers/usb/gadget/udc/core.c 	trace_usb_gadget_giveback_request(ep, req, 0);
req               907 drivers/usb/gadget/udc/core.c 	req->complete(ep, req);
req                97 drivers/usb/gadget/udc/dummy_hcd.c 	struct usb_request		req;
req               108 drivers/usb/gadget/udc/dummy_hcd.c 	return container_of(_req, struct dummy_request, req);
req               332 drivers/usb/gadget/udc/dummy_hcd.c 		struct dummy_request	*req;
req               334 drivers/usb/gadget/udc/dummy_hcd.c 		req = list_entry(ep->queue.next, struct dummy_request, queue);
req               335 drivers/usb/gadget/udc/dummy_hcd.c 		list_del_init(&req->queue);
req               336 drivers/usb/gadget/udc/dummy_hcd.c 		req->req.status = -ESHUTDOWN;
req               339 drivers/usb/gadget/udc/dummy_hcd.c 		usb_gadget_giveback_request(&ep->ep, &req->req);
req               657 drivers/usb/gadget/udc/dummy_hcd.c 	struct dummy_request	*req;
req               662 drivers/usb/gadget/udc/dummy_hcd.c 	req = kzalloc(sizeof(*req), mem_flags);
req               663 drivers/usb/gadget/udc/dummy_hcd.c 	if (!req)
req               665 drivers/usb/gadget/udc/dummy_hcd.c 	INIT_LIST_HEAD(&req->queue);
req               666 drivers/usb/gadget/udc/dummy_hcd.c 	return &req->req;
req               671 drivers/usb/gadget/udc/dummy_hcd.c 	struct dummy_request	*req;
req               678 drivers/usb/gadget/udc/dummy_hcd.c 	req = usb_request_to_dummy_request(_req);
req               679 drivers/usb/gadget/udc/dummy_hcd.c 	WARN_ON(!list_empty(&req->queue));
req               680 drivers/usb/gadget/udc/dummy_hcd.c 	kfree(req);
req               683 drivers/usb/gadget/udc/dummy_hcd.c static void fifo_complete(struct usb_ep *ep, struct usb_request *req)
req               691 drivers/usb/gadget/udc/dummy_hcd.c 	struct dummy_request	*req;
req               696 drivers/usb/gadget/udc/dummy_hcd.c 	req = usb_request_to_dummy_request(_req);
req               697 drivers/usb/gadget/udc/dummy_hcd.c 	if (!_req || !list_empty(&req->queue) || !_req->complete)
req               722 drivers/usb/gadget/udc/dummy_hcd.c 		req = &dum->fifo_req;
req               723 drivers/usb/gadget/udc/dummy_hcd.c 		req->req = *_req;
req               724 drivers/usb/gadget/udc/dummy_hcd.c 		req->req.buf = dum->fifo_buf;
req               726 drivers/usb/gadget/udc/dummy_hcd.c 		req->req.context = dum;
req               727 drivers/usb/gadget/udc/dummy_hcd.c 		req->req.complete = fifo_complete;
req               729 drivers/usb/gadget/udc/dummy_hcd.c 		list_add_tail(&req->queue, &ep->queue);
req               736 drivers/usb/gadget/udc/dummy_hcd.c 		list_add_tail(&req->queue, &ep->queue);
req               751 drivers/usb/gadget/udc/dummy_hcd.c 	struct dummy_request	*req = NULL;
req               763 drivers/usb/gadget/udc/dummy_hcd.c 	list_for_each_entry(req, &ep->queue, queue) {
req               764 drivers/usb/gadget/udc/dummy_hcd.c 		if (&req->req == _req) {
req               765 drivers/usb/gadget/udc/dummy_hcd.c 			list_del_init(&req->queue);
req               776 drivers/usb/gadget/udc/dummy_hcd.c 				req, _ep->name, _req->length, _req->buf);
req              1313 drivers/usb/gadget/udc/dummy_hcd.c static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
req              1325 drivers/usb/gadget/udc/dummy_hcd.c 	rbuf = req->req.buf + req->req.actual;
req              1384 drivers/usb/gadget/udc/dummy_hcd.c 	struct dummy_request	*req;
req              1389 drivers/usb/gadget/udc/dummy_hcd.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1395 drivers/usb/gadget/udc/dummy_hcd.c 			if ((urb->stream_id != req->req.stream_id))
req              1407 drivers/usb/gadget/udc/dummy_hcd.c 		dev_len = req->req.length - req->req.actual;
req              1433 drivers/usb/gadget/udc/dummy_hcd.c 			len = dummy_perform_transfer(urb, req, len);
req              1437 drivers/usb/gadget/udc/dummy_hcd.c 				req->req.status = len;
req              1442 drivers/usb/gadget/udc/dummy_hcd.c 				req->req.actual += len;
req              1455 drivers/usb/gadget/udc/dummy_hcd.c 				req->req.status = 0;
req              1458 drivers/usb/gadget/udc/dummy_hcd.c 				req->req.status = 0;
req              1466 drivers/usb/gadget/udc/dummy_hcd.c 					req->req.status = -EOVERFLOW;
req              1468 drivers/usb/gadget/udc/dummy_hcd.c 					req->req.status = 0;
req              1476 drivers/usb/gadget/udc/dummy_hcd.c 			if (req->req.length == req->req.actual) {
req              1477 drivers/usb/gadget/udc/dummy_hcd.c 				if (req->req.zero && to_host)
req              1480 drivers/usb/gadget/udc/dummy_hcd.c 					req->req.status = 0;
req              1492 drivers/usb/gadget/udc/dummy_hcd.c 		if (req->req.status != -EINPROGRESS) {
req              1493 drivers/usb/gadget/udc/dummy_hcd.c 			list_del_init(&req->queue);
req              1496 drivers/usb/gadget/udc/dummy_hcd.c 			usb_gadget_giveback_request(&ep->ep, &req->req);
req              1812 drivers/usb/gadget/udc/dummy_hcd.c 		struct dummy_request	*req;
req              1868 drivers/usb/gadget/udc/dummy_hcd.c 			list_for_each_entry(req, &ep->queue, queue) {
req              1869 drivers/usb/gadget/udc/dummy_hcd.c 				list_del_init(&req->queue);
req              1870 drivers/usb/gadget/udc/dummy_hcd.c 				req->req.status = -EOVERFLOW;
req              1872 drivers/usb/gadget/udc/dummy_hcd.c 						req);
req              1875 drivers/usb/gadget/udc/dummy_hcd.c 				usb_gadget_giveback_request(&ep->ep, &req->req);
req                58 drivers/usb/gadget/udc/fotg210-udc.c static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req,
req                61 drivers/usb/gadget/udc/fotg210-udc.c 	list_del_init(&req->queue);
req                65 drivers/usb/gadget/udc/fotg210-udc.c 		req->req.status = -ESHUTDOWN;
req                67 drivers/usb/gadget/udc/fotg210-udc.c 		req->req.status = status;
req                70 drivers/usb/gadget/udc/fotg210-udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               212 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req;
req               220 drivers/usb/gadget/udc/fotg210-udc.c 		req = list_entry(ep->queue.next,
req               223 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_done(ep, req, -ECONNRESET);
req               233 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req;
req               235 drivers/usb/gadget/udc/fotg210-udc.c 	req = kzalloc(sizeof(struct fotg210_request), gfp_flags);
req               236 drivers/usb/gadget/udc/fotg210-udc.c 	if (!req)
req               239 drivers/usb/gadget/udc/fotg210-udc.c 	INIT_LIST_HEAD(&req->queue);
req               241 drivers/usb/gadget/udc/fotg210-udc.c 	return &req->req;
req               247 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req;
req               249 drivers/usb/gadget/udc/fotg210-udc.c 	req = container_of(_req, struct fotg210_request, req);
req               250 drivers/usb/gadget/udc/fotg210-udc.c 	kfree(req);
req               327 drivers/usb/gadget/udc/fotg210-udc.c 			struct fotg210_request *req)
req               336 drivers/usb/gadget/udc/fotg210-udc.c 			buffer = req->req.buf;
req               337 drivers/usb/gadget/udc/fotg210-udc.c 			length = req->req.length;
req               339 drivers/usb/gadget/udc/fotg210-udc.c 			buffer = req->req.buf + req->req.actual;
req               345 drivers/usb/gadget/udc/fotg210-udc.c 		buffer = req->req.buf + req->req.actual;
req               346 drivers/usb/gadget/udc/fotg210-udc.c 		if (req->req.length - req->req.actual > ep->ep.maxpacket)
req               349 drivers/usb/gadget/udc/fotg210-udc.c 			length = req->req.length;
req               368 drivers/usb/gadget/udc/fotg210-udc.c 	req->req.actual += length;
req               374 drivers/usb/gadget/udc/fotg210-udc.c 				struct fotg210_request *req)
req               376 drivers/usb/gadget/udc/fotg210-udc.c 	if (!req->req.length) {
req               377 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_done(ep, req, 0);
req               381 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_start_dma(ep, req);
req               382 drivers/usb/gadget/udc/fotg210-udc.c 		if ((req->req.length == req->req.actual) ||
req               383 drivers/usb/gadget/udc/fotg210-udc.c 		    (req->req.actual < ep->ep.maxpacket))
req               384 drivers/usb/gadget/udc/fotg210-udc.c 			fotg210_done(ep, req, 0);
req               397 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req;
req               402 drivers/usb/gadget/udc/fotg210-udc.c 	req = container_of(_req, struct fotg210_request, req);
req               412 drivers/usb/gadget/udc/fotg210-udc.c 	list_add_tail(&req->queue, &ep->queue);
req               414 drivers/usb/gadget/udc/fotg210-udc.c 	req->req.actual = 0;
req               415 drivers/usb/gadget/udc/fotg210-udc.c 	req->req.status = -EINPROGRESS;
req               418 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_ep0_queue(ep, req);
req               430 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req;
req               434 drivers/usb/gadget/udc/fotg210-udc.c 	req = container_of(_req, struct fotg210_request, req);
req               438 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_done(ep, req, -ECONNRESET);
req               795 drivers/usb/gadget/udc/fotg210-udc.c 		struct fotg210_request *req;
req               797 drivers/usb/gadget/udc/fotg210-udc.c 		req = list_first_entry(&ep->queue,
req               800 drivers/usb/gadget/udc/fotg210-udc.c 		if (req->req.length)
req               801 drivers/usb/gadget/udc/fotg210-udc.c 			fotg210_start_dma(ep, req);
req               803 drivers/usb/gadget/udc/fotg210-udc.c 		if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
req               804 drivers/usb/gadget/udc/fotg210-udc.c 			fotg210_done(ep, req, 0);
req               815 drivers/usb/gadget/udc/fotg210-udc.c 		struct fotg210_request *req;
req               817 drivers/usb/gadget/udc/fotg210-udc.c 		req = list_entry(ep->queue.next,
req               820 drivers/usb/gadget/udc/fotg210-udc.c 		if (req->req.length)
req               821 drivers/usb/gadget/udc/fotg210-udc.c 			fotg210_start_dma(ep, req);
req               823 drivers/usb/gadget/udc/fotg210-udc.c 		if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
req               824 drivers/usb/gadget/udc/fotg210-udc.c 			fotg210_done(ep, req, 0);
req               840 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req = list_entry(ep->queue.next,
req               843 drivers/usb/gadget/udc/fotg210-udc.c 	if (req->req.length)
req               844 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_start_dma(ep, req);
req               845 drivers/usb/gadget/udc/fotg210-udc.c 	fotg210_done(ep, req, 0);
req               850 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req = list_entry(ep->queue.next,
req               853 drivers/usb/gadget/udc/fotg210-udc.c 	fotg210_start_dma(ep, req);
req               856 drivers/usb/gadget/udc/fotg210-udc.c 	if (req->req.length == req->req.actual ||
req               857 drivers/usb/gadget/udc/fotg210-udc.c 	    req->req.actual < ep->ep.maxpacket)
req               858 drivers/usb/gadget/udc/fotg210-udc.c 		fotg210_done(ep, req, 0);
req               211 drivers/usb/gadget/udc/fotg210.h 	struct usb_request	req;
req                76 drivers/usb/gadget/udc/fsl_qe_udc.c static void done(struct qe_ep *ep, struct qe_req *req, int status)
req                85 drivers/usb/gadget/udc/fsl_qe_udc.c 	list_del_init(&req->queue);
req                88 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (req->req.status == -EINPROGRESS)
req                89 drivers/usb/gadget/udc/fsl_qe_udc.c 		req->req.status = status;
req                91 drivers/usb/gadget/udc/fsl_qe_udc.c 		status = req->req.status;
req                93 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (req->mapped) {
req                95 drivers/usb/gadget/udc/fsl_qe_udc.c 			req->req.dma, req->req.length,
req                99 drivers/usb/gadget/udc/fsl_qe_udc.c 		req->req.dma = DMA_ADDR_INVALID;
req               100 drivers/usb/gadget/udc/fsl_qe_udc.c 		req->mapped = 0;
req               103 drivers/usb/gadget/udc/fsl_qe_udc.c 			req->req.dma, req->req.length,
req               110 drivers/usb/gadget/udc/fsl_qe_udc.c 			ep->ep.name, &req->req, status,
req               111 drivers/usb/gadget/udc/fsl_qe_udc.c 			req->req.actual, req->req.length);
req               117 drivers/usb/gadget/udc/fsl_qe_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               131 drivers/usb/gadget/udc/fsl_qe_udc.c 		struct qe_req *req = NULL;
req               132 drivers/usb/gadget/udc/fsl_qe_udc.c 		req = list_entry(ep->queue.next, struct qe_req, queue);
req               134 drivers/usb/gadget/udc/fsl_qe_udc.c 		done(ep, req, status);
req               782 drivers/usb/gadget/udc/fsl_qe_udc.c static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
req               887 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req;
req               903 drivers/usb/gadget/udc/fsl_qe_udc.c 		req = list_entry(ep->queue.next, struct qe_req, queue);
req               905 drivers/usb/gadget/udc/fsl_qe_udc.c 		cp = (u8 *)(req->req.buf) + req->req.actual;
req               908 drivers/usb/gadget/udc/fsl_qe_udc.c 			req->req.actual += fsize;
req               910 drivers/usb/gadget/udc/fsl_qe_udc.c 					(req->req.actual >= req->req.length)) {
req               912 drivers/usb/gadget/udc/fsl_qe_udc.c 					ep0_req_complete(ep->udc, req);
req               914 drivers/usb/gadget/udc/fsl_qe_udc.c 					done(ep, req, 0);
req              1135 drivers/usb/gadget/udc/fsl_qe_udc.c 		struct qe_req *req = ep->tx_req;
req              1138 drivers/usb/gadget/udc/fsl_qe_udc.c 		last_len = min_t(unsigned, req->req.length - ep->sent,
req              1150 drivers/usb/gadget/udc/fsl_qe_udc.c 		if (req->req.zero) {
req              1152 drivers/usb/gadget/udc/fsl_qe_udc.c 				(req->req.length % ep->ep.maxpacket) != 0)
req              1160 drivers/usb/gadget/udc/fsl_qe_udc.c 		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
req              1188 drivers/usb/gadget/udc/fsl_qe_udc.c 	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
req              1190 drivers/usb/gadget/udc/fsl_qe_udc.c 	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
req              1193 drivers/usb/gadget/udc/fsl_qe_udc.c 		ep->tx_req->req.actual += size;
req              1222 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req = ep->tx_req;
req              1225 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (req == NULL)
req              1228 drivers/usb/gadget/udc/fsl_qe_udc.c 	if ((req->req.length - ep->sent) > 0)
req              1257 drivers/usb/gadget/udc/fsl_qe_udc.c static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
req              1264 drivers/usb/gadget/udc/fsl_qe_udc.c 		done(ep, req, 0);
req              1271 drivers/usb/gadget/udc/fsl_qe_udc.c 		done(ep, req, 0);
req              1276 drivers/usb/gadget/udc/fsl_qe_udc.c 		done(ep, req, 0);
req              1283 drivers/usb/gadget/udc/fsl_qe_udc.c 		done(ep, req, 0);
req              1321 drivers/usb/gadget/udc/fsl_qe_udc.c 		if ((ep->tx_req->req.length - ep->sent) <= 0) {
req              1322 drivers/usb/gadget/udc/fsl_qe_udc.c 			ep->tx_req->req.actual = (unsigned int)ep->sent;
req              1449 drivers/usb/gadget/udc/fsl_qe_udc.c static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
req              1463 drivers/usb/gadget/udc/fsl_qe_udc.c static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
req              1512 drivers/usb/gadget/udc/fsl_qe_udc.c 				cp = (u8 *)(req->req.buf) + req->req.actual;
req              1515 drivers/usb/gadget/udc/fsl_qe_udc.c 					req->req.actual += fsize;
req              1517 drivers/usb/gadget/udc/fsl_qe_udc.c 						|| (req->req.actual >=
req              1518 drivers/usb/gadget/udc/fsl_qe_udc.c 							req->req.length)) {
req              1520 drivers/usb/gadget/udc/fsl_qe_udc.c 						done(ep, req, 0);
req              1553 drivers/usb/gadget/udc/fsl_qe_udc.c static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
req              1561 drivers/usb/gadget/udc/fsl_qe_udc.c 			ep_req_rx(ep, req);
req              1664 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req;
req              1666 drivers/usb/gadget/udc/fsl_qe_udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req              1667 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (!req)
req              1670 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.dma = DMA_ADDR_INVALID;
req              1672 drivers/usb/gadget/udc/fsl_qe_udc.c 	INIT_LIST_HEAD(&req->queue);
req              1674 drivers/usb/gadget/udc/fsl_qe_udc.c 	return &req->req;
req              1679 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req;
req              1681 drivers/usb/gadget/udc/fsl_qe_udc.c 	req = container_of(_req, struct qe_req, req);
req              1684 drivers/usb/gadget/udc/fsl_qe_udc.c 		kfree(req);
req              1690 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req = container_of(_req, struct qe_req, req);
req              1696 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (!_req || !req->req.complete || !req->req.buf
req              1697 drivers/usb/gadget/udc/fsl_qe_udc.c 			|| !list_empty(&req->queue)) {
req              1709 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->ep = ep;
req              1712 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (req->req.dma == DMA_ADDR_INVALID) {
req              1713 drivers/usb/gadget/udc/fsl_qe_udc.c 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
req              1714 drivers/usb/gadget/udc/fsl_qe_udc.c 					req->req.buf,
req              1715 drivers/usb/gadget/udc/fsl_qe_udc.c 					req->req.length,
req              1719 drivers/usb/gadget/udc/fsl_qe_udc.c 		req->mapped = 1;
req              1722 drivers/usb/gadget/udc/fsl_qe_udc.c 					req->req.dma, req->req.length,
req              1726 drivers/usb/gadget/udc/fsl_qe_udc.c 		req->mapped = 0;
req              1729 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.status = -EINPROGRESS;
req              1730 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.actual = 0;
req              1732 drivers/usb/gadget/udc/fsl_qe_udc.c 	list_add_tail(&req->queue, &ep->queue);
req              1734 drivers/usb/gadget/udc/fsl_qe_udc.c 			ep->name, req->req.length);
req              1738 drivers/usb/gadget/udc/fsl_qe_udc.c 		reval = ep_req_send(ep, req);
req              1741 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (ep_index(ep) == 0 && req->req.length > 0) {
req              1749 drivers/usb/gadget/udc/fsl_qe_udc.c 		reval = ep_req_receive(ep, req);
req              1773 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req;
req              1782 drivers/usb/gadget/udc/fsl_qe_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1783 drivers/usb/gadget/udc/fsl_qe_udc.c 		if (&req->req == _req)
req              1787 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (&req->req != _req) {
req              1792 drivers/usb/gadget/udc/fsl_qe_udc.c 	done(ep, req, -ECONNRESET);
req              1928 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req = container_of(_req, struct qe_req, req);
req              1930 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.buf = NULL;
req              1931 drivers/usb/gadget/udc/fsl_qe_udc.c 	kfree(req);
req              1938 drivers/usb/gadget/udc/fsl_qe_udc.c 	struct qe_req *req;
req              1974 drivers/usb/gadget/udc/fsl_qe_udc.c 	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
req              1975 drivers/usb/gadget/udc/fsl_qe_udc.c 					struct qe_req, req);
req              1976 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.length = 2;
req              1977 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.buf = udc->statusbuf;
req              1978 drivers/usb/gadget/udc/fsl_qe_udc.c 	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
req              1979 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.status = -EINPROGRESS;
req              1980 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.actual = 0;
req              1981 drivers/usb/gadget/udc/fsl_qe_udc.c 	req->req.complete = ownercomplete;
req              1986 drivers/usb/gadget/udc/fsl_qe_udc.c 	status = __qe_ep_queue(&ep->ep, &req->req);
req               253 drivers/usb/gadget/udc/fsl_qe_udc.h 	struct usb_request req;
req               157 drivers/usb/gadget/udc/fsl_udc_core.c static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
req               168 drivers/usb/gadget/udc/fsl_udc_core.c 	list_del_init(&req->queue);
req               171 drivers/usb/gadget/udc/fsl_udc_core.c 	if (req->req.status == -EINPROGRESS)
req               172 drivers/usb/gadget/udc/fsl_udc_core.c 		req->req.status = status;
req               174 drivers/usb/gadget/udc/fsl_udc_core.c 		status = req->req.status;
req               177 drivers/usb/gadget/udc/fsl_udc_core.c 	next_td = req->head;
req               178 drivers/usb/gadget/udc/fsl_udc_core.c 	for (j = 0; j < req->dtd_count; j++) {
req               180 drivers/usb/gadget/udc/fsl_udc_core.c 		if (j != req->dtd_count - 1) {
req               186 drivers/usb/gadget/udc/fsl_udc_core.c 	usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
req               190 drivers/usb/gadget/udc/fsl_udc_core.c 			ep->ep.name, &req->req, status,
req               191 drivers/usb/gadget/udc/fsl_udc_core.c 			req->req.actual, req->req.length);
req               197 drivers/usb/gadget/udc/fsl_udc_core.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               216 drivers/usb/gadget/udc/fsl_udc_core.c 		struct fsl_req *req = NULL;
req               218 drivers/usb/gadget/udc/fsl_udc_core.c 		req = list_entry(ep->queue.next, struct fsl_req, queue);
req               219 drivers/usb/gadget/udc/fsl_udc_core.c 		done(ep, req, status);
req               679 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req = NULL;
req               681 drivers/usb/gadget/udc/fsl_udc_core.c 	req = kzalloc(sizeof *req, gfp_flags);
req               682 drivers/usb/gadget/udc/fsl_udc_core.c 	if (!req)
req               685 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.dma = DMA_ADDR_INVALID;
req               686 drivers/usb/gadget/udc/fsl_udc_core.c 	INIT_LIST_HEAD(&req->queue);
req               688 drivers/usb/gadget/udc/fsl_udc_core.c 	return &req->req;
req               693 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req = NULL;
req               695 drivers/usb/gadget/udc/fsl_udc_core.c 	req = container_of(_req, struct fsl_req, req);
req               698 drivers/usb/gadget/udc/fsl_udc_core.c 		kfree(req);
req               723 drivers/usb/gadget/udc/fsl_udc_core.c static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
req               740 drivers/usb/gadget/udc/fsl_udc_core.c 			cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
req               765 drivers/usb/gadget/udc/fsl_udc_core.c 	fsl_prime_ep(ep, req->head);
req               774 drivers/usb/gadget/udc/fsl_udc_core.c static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
req               781 drivers/usb/gadget/udc/fsl_udc_core.c 	*length = min(req->req.length - req->req.actual,
req               795 drivers/usb/gadget/udc/fsl_udc_core.c 	swap_temp = (u32) (req->req.dma + req->req.actual);
req               802 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.actual += *length;
req               805 drivers/usb/gadget/udc/fsl_udc_core.c 	if (req->req.zero) {
req               806 drivers/usb/gadget/udc/fsl_udc_core.c 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
req               810 drivers/usb/gadget/udc/fsl_udc_core.c 	} else if (req->req.length == req->req.actual)
req               821 drivers/usb/gadget/udc/fsl_udc_core.c 	if (*is_last && !req->req.no_interrupt)
req               834 drivers/usb/gadget/udc/fsl_udc_core.c static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
req               843 drivers/usb/gadget/udc/fsl_udc_core.c 		dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
req               849 drivers/usb/gadget/udc/fsl_udc_core.c 			req->head = dtd;
req               856 drivers/usb/gadget/udc/fsl_udc_core.c 		req->dtd_count++;
req               861 drivers/usb/gadget/udc/fsl_udc_core.c 	req->tail = dtd;
req               871 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req = container_of(_req, struct fsl_req, req);
req               877 drivers/usb/gadget/udc/fsl_udc_core.c 	if (!_req || !req->req.complete || !req->req.buf
req               878 drivers/usb/gadget/udc/fsl_udc_core.c 			|| !list_empty(&req->queue)) {
req               887 drivers/usb/gadget/udc/fsl_udc_core.c 		if (req->req.length > ep->ep.maxpacket)
req               895 drivers/usb/gadget/udc/fsl_udc_core.c 	req->ep = ep;
req               897 drivers/usb/gadget/udc/fsl_udc_core.c 	ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
req               901 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.status = -EINPROGRESS;
req               902 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.actual = 0;
req               903 drivers/usb/gadget/udc/fsl_udc_core.c 	req->dtd_count = 0;
req               906 drivers/usb/gadget/udc/fsl_udc_core.c 	if (!fsl_req_to_dtd(req, gfp_flags)) {
req               908 drivers/usb/gadget/udc/fsl_udc_core.c 		fsl_queue_td(ep, req);
req               914 drivers/usb/gadget/udc/fsl_udc_core.c 	if (req != NULL)
req               915 drivers/usb/gadget/udc/fsl_udc_core.c 		list_add_tail(&req->queue, &ep->queue);
req               925 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req;
req               947 drivers/usb/gadget/udc/fsl_udc_core.c 	list_for_each_entry(req, &ep->queue, queue) {
req               948 drivers/usb/gadget/udc/fsl_udc_core.c 		if (&req->req == _req)
req               951 drivers/usb/gadget/udc/fsl_udc_core.c 	if (&req->req != _req) {
req               957 drivers/usb/gadget/udc/fsl_udc_core.c 	if (ep->queue.next == &req->queue) {
req               962 drivers/usb/gadget/udc/fsl_udc_core.c 		if (req->queue.next != &ep->queue) {
req               965 drivers/usb/gadget/udc/fsl_udc_core.c 			next_req = list_entry(req->queue.next, struct fsl_req,
req               975 drivers/usb/gadget/udc/fsl_udc_core.c 		prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
req               976 drivers/usb/gadget/udc/fsl_udc_core.c 		prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
req               979 drivers/usb/gadget/udc/fsl_udc_core.c 	done(ep, req, -ECONNRESET);
req              1252 drivers/usb/gadget/udc/fsl_udc_core.c static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { }
req              1271 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req = udc->status_req;
req              1284 drivers/usb/gadget/udc/fsl_udc_core.c 	req->ep = ep;
req              1285 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.length = 0;
req              1286 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.status = -EINPROGRESS;
req              1287 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.actual = 0;
req              1288 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.complete = fsl_noop_complete;
req              1289 drivers/usb/gadget/udc/fsl_udc_core.c 	req->dtd_count = 0;
req              1291 drivers/usb/gadget/udc/fsl_udc_core.c 	ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
req              1295 drivers/usb/gadget/udc/fsl_udc_core.c 	if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
req              1296 drivers/usb/gadget/udc/fsl_udc_core.c 		fsl_queue_td(ep, req);
req              1300 drivers/usb/gadget/udc/fsl_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
req              1334 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req;
req              1363 drivers/usb/gadget/udc/fsl_udc_core.c 	req = udc->status_req;
req              1365 drivers/usb/gadget/udc/fsl_udc_core.c 	*((u16 *) req->req.buf) = cpu_to_le16(tmp);
req              1367 drivers/usb/gadget/udc/fsl_udc_core.c 	req->ep = ep;
req              1368 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.length = 2;
req              1369 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.status = -EINPROGRESS;
req              1370 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.actual = 0;
req              1371 drivers/usb/gadget/udc/fsl_udc_core.c 	req->req.complete = fsl_noop_complete;
req              1372 drivers/usb/gadget/udc/fsl_udc_core.c 	req->dtd_count = 0;
req              1374 drivers/usb/gadget/udc/fsl_udc_core.c 	ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
req              1379 drivers/usb/gadget/udc/fsl_udc_core.c 	if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
req              1380 drivers/usb/gadget/udc/fsl_udc_core.c 		fsl_queue_td(ep, req);
req              1384 drivers/usb/gadget/udc/fsl_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
req              1522 drivers/usb/gadget/udc/fsl_udc_core.c 		struct fsl_req *req)
req              1531 drivers/usb/gadget/udc/fsl_udc_core.c 	done(ep0, req, 0);
req              1606 drivers/usb/gadget/udc/fsl_udc_core.c 	actual = curr_req->req.length;
req              1665 drivers/usb/gadget/udc/fsl_udc_core.c 	curr_req->req.actual = actual;
req              1712 drivers/usb/gadget/udc/fsl_udc_core.c 			curr_req->req.status = status;
req              2028 drivers/usb/gadget/udc/fsl_udc_core.c 	struct fsl_req *req;
req              2177 drivers/usb/gadget/udc/fsl_udc_core.c 		list_for_each_entry(req, &ep->queue, queue) {
req              2180 drivers/usb/gadget/udc/fsl_udc_core.c 				&req->req, req->req.actual,
req              2181 drivers/usb/gadget/udc/fsl_udc_core.c 				req->req.length, req->req.buf);
req              2196 drivers/usb/gadget/udc/fsl_udc_core.c 				list_for_each_entry(req, &ep->queue, queue) {
req              2200 drivers/usb/gadget/udc/fsl_udc_core.c 						&req->req, req->req.actual,
req              2201 drivers/usb/gadget/udc/fsl_udc_core.c 						req->req.length, req->req.buf);
req              2277 drivers/usb/gadget/udc/fsl_udc_core.c 			struct fsl_req, req);
req              2284 drivers/usb/gadget/udc/fsl_udc_core.c 	udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
req              2285 drivers/usb/gadget/udc/fsl_udc_core.c 	if (!udc->status_req->req.buf) {
req              2572 drivers/usb/gadget/udc/fsl_udc_core.c 	kfree(udc_controller->status_req->req.buf);
req               444 drivers/usb/gadget/udc/fsl_usb2_udc.h 	struct usb_request req;
req                33 drivers/usb/gadget/udc/fusb300_udc.c static void done(struct fusb300_ep *ep, struct fusb300_request *req,
req               251 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req;
req               259 drivers/usb/gadget/udc/fusb300_udc.c 		req = list_entry(ep->queue.next, struct fusb300_request, queue);
req               261 drivers/usb/gadget/udc/fusb300_udc.c 		done(ep, req, -ECONNRESET);
req               271 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req;
req               273 drivers/usb/gadget/udc/fusb300_udc.c 	req = kzalloc(sizeof(struct fusb300_request), gfp_flags);
req               274 drivers/usb/gadget/udc/fusb300_udc.c 	if (!req)
req               276 drivers/usb/gadget/udc/fusb300_udc.c 	INIT_LIST_HEAD(&req->queue);
req               278 drivers/usb/gadget/udc/fusb300_udc.c 	return &req->req;
req               283 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req;
req               285 drivers/usb/gadget/udc/fusb300_udc.c 	req = container_of(_req, struct fusb300_request, req);
req               286 drivers/usb/gadget/udc/fusb300_udc.c 	kfree(req);
req               331 drivers/usb/gadget/udc/fusb300_udc.c 		   struct fusb300_request *req)
req               337 drivers/usb/gadget/udc/fusb300_udc.c 	u32 length = req->req.length - req->req.actual;
req               339 drivers/usb/gadget/udc/fusb300_udc.c 	tmp = req->req.buf + req->req.actual;
req               349 drivers/usb/gadget/udc/fusb300_udc.c 		req->req.actual += SS_CTL_MAX_PACKET_SIZE;
req               378 drivers/usb/gadget/udc/fusb300_udc.c 		req->req.actual += length;
req               399 drivers/usb/gadget/udc/fusb300_udc.c static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req)
req               402 drivers/usb/gadget/udc/fusb300_udc.c 		if (req->req.length) {
req               403 drivers/usb/gadget/udc/fusb300_udc.c 			fusb300_wrcxf(ep, req);
req               406 drivers/usb/gadget/udc/fusb300_udc.c 				__func__, req->req.length);
req               407 drivers/usb/gadget/udc/fusb300_udc.c 		if ((req->req.length == req->req.actual) ||
req               408 drivers/usb/gadget/udc/fusb300_udc.c 		    (req->req.actual < ep->ep.maxpacket))
req               409 drivers/usb/gadget/udc/fusb300_udc.c 			done(ep, req, 0);
req               411 drivers/usb/gadget/udc/fusb300_udc.c 		if (!req->req.length)
req               412 drivers/usb/gadget/udc/fusb300_udc.c 			done(ep, req, 0);
req               423 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req;
req               428 drivers/usb/gadget/udc/fusb300_udc.c 	req = container_of(_req, struct fusb300_request, req);
req               438 drivers/usb/gadget/udc/fusb300_udc.c 	list_add_tail(&req->queue, &ep->queue);
req               440 drivers/usb/gadget/udc/fusb300_udc.c 	req->req.actual = 0;
req               441 drivers/usb/gadget/udc/fusb300_udc.c 	req->req.status = -EINPROGRESS;
req               444 drivers/usb/gadget/udc/fusb300_udc.c 		ep0_queue(ep, req);
req               456 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req;
req               460 drivers/usb/gadget/udc/fusb300_udc.c 	req = container_of(_req, struct fusb300_request, req);
req               464 drivers/usb/gadget/udc/fusb300_udc.c 		done(ep, req, -ECONNRESET);
req               601 drivers/usb/gadget/udc/fusb300_udc.c 			  struct fusb300_request *req,
req               609 drivers/usb/gadget/udc/fusb300_udc.c 	tmp = req->req.buf + req->req.actual;
req               610 drivers/usb/gadget/udc/fusb300_udc.c 	req->req.actual += length;
req               612 drivers/usb/gadget/udc/fusb300_udc.c 	if (req->req.actual > req->req.length)
req               864 drivers/usb/gadget/udc/fusb300_udc.c static void done(struct fusb300_ep *ep, struct fusb300_request *req,
req               867 drivers/usb/gadget/udc/fusb300_udc.c 	list_del_init(&req->queue);
req               871 drivers/usb/gadget/udc/fusb300_udc.c 		req->req.status = -ESHUTDOWN;
req               873 drivers/usb/gadget/udc/fusb300_udc.c 		req->req.status = status;
req               876 drivers/usb/gadget/udc/fusb300_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               939 drivers/usb/gadget/udc/fusb300_udc.c 			struct fusb300_request *req)
req               944 drivers/usb/gadget/udc/fusb300_udc.c 			&req->req, DMA_TO_DEVICE);
req               951 drivers/usb/gadget/udc/fusb300_udc.c 	fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length);
req               956 drivers/usb/gadget/udc/fusb300_udc.c 			&req->req, DMA_TO_DEVICE);
req               961 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req = list_entry(ep->queue.next,
req               964 drivers/usb/gadget/udc/fusb300_udc.c 	if (req->req.length)
req               965 drivers/usb/gadget/udc/fusb300_udc.c 		fusb300_set_idma(ep, req);
req               966 drivers/usb/gadget/udc/fusb300_udc.c 	done(ep, req, 0);
req               972 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req = list_entry(ep->queue.next,
req               977 drivers/usb/gadget/udc/fusb300_udc.c 	fusb300_rdfifo(ep, req, length);
req               980 drivers/usb/gadget/udc/fusb300_udc.c 	if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket))
req               981 drivers/usb/gadget/udc/fusb300_udc.c 		done(ep, req, 0);
req              1012 drivers/usb/gadget/udc/fusb300_udc.c 		struct fusb300_request *req;
req              1014 drivers/usb/gadget/udc/fusb300_udc.c 		req = list_first_entry(&ep->queue,
req              1016 drivers/usb/gadget/udc/fusb300_udc.c 		if (req->req.length)
req              1017 drivers/usb/gadget/udc/fusb300_udc.c 			fusb300_rdcxf(ep->fusb300, req->req.buf,
req              1018 drivers/usb/gadget/udc/fusb300_udc.c 				req->req.length);
req              1019 drivers/usb/gadget/udc/fusb300_udc.c 		done(ep, req, 0);
req              1029 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req;
req              1033 drivers/usb/gadget/udc/fusb300_udc.c 		req = list_entry(ep->queue.next,
req              1035 drivers/usb/gadget/udc/fusb300_udc.c 		if (req->req.length)
req              1036 drivers/usb/gadget/udc/fusb300_udc.c 			fusb300_wrcxf(ep, req);
req              1037 drivers/usb/gadget/udc/fusb300_udc.c 		if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
req              1038 drivers/usb/gadget/udc/fusb300_udc.c 			done(ep, req, 0);
req               634 drivers/usb/gadget/udc/fusb300_udc.h 	struct usb_request	req;
req               269 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request	*req;
req               273 drivers/usb/gadget/udc/goku_udc.c 	req = kzalloc(sizeof *req, gfp_flags);
req               274 drivers/usb/gadget/udc/goku_udc.c 	if (!req)
req               277 drivers/usb/gadget/udc/goku_udc.c 	INIT_LIST_HEAD(&req->queue);
req               278 drivers/usb/gadget/udc/goku_udc.c 	return &req->req;
req               284 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request	*req;
req               289 drivers/usb/gadget/udc/goku_udc.c 	req = container_of(_req, struct goku_request, req);
req               290 drivers/usb/gadget/udc/goku_udc.c 	WARN_ON(!list_empty(&req->queue));
req               291 drivers/usb/gadget/udc/goku_udc.c 	kfree(req);
req               297 drivers/usb/gadget/udc/goku_udc.c done(struct goku_ep *ep, struct goku_request *req, int status)
req               302 drivers/usb/gadget/udc/goku_udc.c 	list_del_init(&req->queue);
req               304 drivers/usb/gadget/udc/goku_udc.c 	if (likely(req->req.status == -EINPROGRESS))
req               305 drivers/usb/gadget/udc/goku_udc.c 		req->req.status = status;
req               307 drivers/usb/gadget/udc/goku_udc.c 		status = req->req.status;
req               312 drivers/usb/gadget/udc/goku_udc.c 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
req               318 drivers/usb/gadget/udc/goku_udc.c 			ep->ep.name, &req->req, status,
req               319 drivers/usb/gadget/udc/goku_udc.c 			req->req.actual, req->req.length);
req               324 drivers/usb/gadget/udc/goku_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               332 drivers/usb/gadget/udc/goku_udc.c write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
req               336 drivers/usb/gadget/udc/goku_udc.c 	length = min(req->req.length - req->req.actual, max);
req               337 drivers/usb/gadget/udc/goku_udc.c 	req->req.actual += length;
req               346 drivers/usb/gadget/udc/goku_udc.c static int write_fifo(struct goku_ep *ep, struct goku_request *req)
req               355 drivers/usb/gadget/udc/goku_udc.c 	buf = req->req.buf + req->req.actual;
req               370 drivers/usb/gadget/udc/goku_udc.c 	count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
req               381 drivers/usb/gadget/udc/goku_udc.c 		if (likely(req->req.length != req->req.actual)
req               382 drivers/usb/gadget/udc/goku_udc.c 				|| req->req.zero)
req               391 drivers/usb/gadget/udc/goku_udc.c 		req->req.length - req->req.actual, req);
req               398 drivers/usb/gadget/udc/goku_udc.c 		done(ep, req, 0);
req               405 drivers/usb/gadget/udc/goku_udc.c static int read_fifo(struct goku_ep *ep, struct goku_request *req)
req               414 drivers/usb/gadget/udc/goku_udc.c 	buf = req->req.buf + req->req.actual;
req               428 drivers/usb/gadget/udc/goku_udc.c 		bufferspace = req->req.length - req->req.actual;
req               446 drivers/usb/gadget/udc/goku_udc.c 		req->req.actual += size;
req               451 drivers/usb/gadget/udc/goku_udc.c 			req, req->req.actual, req->req.length);
req               461 drivers/usb/gadget/udc/goku_udc.c 				if (req->req.status != -EOVERFLOW)
req               464 drivers/usb/gadget/udc/goku_udc.c 				req->req.status = -EOVERFLOW;
req               472 drivers/usb/gadget/udc/goku_udc.c 		if (unlikely(is_short || req->req.actual == req->req.length)) {
req               485 drivers/usb/gadget/udc/goku_udc.c 			done(ep, req, 0);
req               489 drivers/usb/gadget/udc/goku_udc.c 				req = list_entry(ep->queue.next,
req               520 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request	*req;
req               524 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
req               525 drivers/usb/gadget/udc/goku_udc.c 	(ep->is_in ? write_fifo : read_fifo)(ep, req);
req               532 drivers/usb/gadget/udc/goku_udc.c static int start_dma(struct goku_ep *ep, struct goku_request *req)
req               536 drivers/usb/gadget/udc/goku_udc.c 	u32				start = req->req.dma;
req               537 drivers/usb/gadget/udc/goku_udc.c 	u32				end = start + req->req.length - 1;
req               552 drivers/usb/gadget/udc/goku_udc.c 		if (unlikely(req->req.length == 0))
req               554 drivers/usb/gadget/udc/goku_udc.c 		else if ((req->req.length % ep->ep.maxpacket) != 0
req               555 drivers/usb/gadget/udc/goku_udc.c 					|| req->req.zero)
req               589 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request		*req;
req               604 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
req               610 drivers/usb/gadget/udc/goku_udc.c 		req->req.actual = readl(&regs->in_dma_current);
req               618 drivers/usb/gadget/udc/goku_udc.c 		req->req.actual = readl(&regs->out_dma_current);
req               620 drivers/usb/gadget/udc/goku_udc.c 	req->req.actual -= req->req.dma;
req               621 drivers/usb/gadget/udc/goku_udc.c 	req->req.actual++;
req               626 drivers/usb/gadget/udc/goku_udc.c 		req->req.actual, req->req.length, req);
req               628 drivers/usb/gadget/udc/goku_udc.c 	done(ep, req, 0);
req               631 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
req               632 drivers/usb/gadget/udc/goku_udc.c 	(void) start_dma(ep, req);
req               638 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request		*req;
req               649 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
req               687 drivers/usb/gadget/udc/goku_udc.c 	req->req.actual = (curr - req->req.dma) + 1;
req               688 drivers/usb/gadget/udc/goku_udc.c 	req->req.status = status;
req               692 drivers/usb/gadget/udc/goku_udc.c 		req->req.actual, req->req.length);
req               701 drivers/usb/gadget/udc/goku_udc.c 	req->req.actual = req->req.length;
req               702 drivers/usb/gadget/udc/goku_udc.c 	req->req.status = 0;
req               710 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request	*req;
req               717 drivers/usb/gadget/udc/goku_udc.c 	req = container_of(_req, struct goku_request, req);
req               719 drivers/usb/gadget/udc/goku_udc.c 			|| !_req->buf || !list_empty(&req->queue)))
req               734 drivers/usb/gadget/udc/goku_udc.c 		status = usb_gadget_map_request(&dev->gadget, &req->req,
req               763 drivers/usb/gadget/udc/goku_udc.c 			status = start_dma(ep, req);
req               765 drivers/usb/gadget/udc/goku_udc.c 			status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
req               770 drivers/usb/gadget/udc/goku_udc.c 			req = NULL;
req               775 drivers/usb/gadget/udc/goku_udc.c 	if (likely(req != NULL))
req               776 drivers/usb/gadget/udc/goku_udc.c 		list_add_tail(&req->queue, &ep->queue);
req               793 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request	*req;
req               801 drivers/usb/gadget/udc/goku_udc.c 		req = list_entry(ep->queue.next, struct goku_request, queue);
req               802 drivers/usb/gadget/udc/goku_udc.c 		done(ep, req, status);
req               809 drivers/usb/gadget/udc/goku_udc.c 	struct goku_request	*req;
req               833 drivers/usb/gadget/udc/goku_udc.c 	list_for_each_entry (req, &ep->queue, queue) {
req               834 drivers/usb/gadget/udc/goku_udc.c 		if (&req->req == _req)
req               837 drivers/usb/gadget/udc/goku_udc.c 	if (&req->req != _req) {
req               842 drivers/usb/gadget/udc/goku_udc.c 	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
req               844 drivers/usb/gadget/udc/goku_udc.c 		done(ep, req, -ECONNRESET);
req               846 drivers/usb/gadget/udc/goku_udc.c 	} else if (!list_empty(&req->queue))
req               847 drivers/usb/gadget/udc/goku_udc.c 		done(ep, req, -ECONNRESET);
req               849 drivers/usb/gadget/udc/goku_udc.c 		req = NULL;
req               852 drivers/usb/gadget/udc/goku_udc.c 	return req ? 0 : -EOPNOTSUPP;
req               866 drivers/usb/gadget/udc/goku_udc.c 			struct goku_request	*req;
req               870 drivers/usb/gadget/udc/goku_udc.c 			req = list_entry(ep->queue.next, struct goku_request,
req               872 drivers/usb/gadget/udc/goku_udc.c 			(void) start_dma(ep, req);
req              1195 drivers/usb/gadget/udc/goku_udc.c 		struct goku_request	*req;
req              1221 drivers/usb/gadget/udc/goku_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
req              1222 drivers/usb/gadget/udc/goku_udc.c 			if (ep->dma && req->queue.prev == &ep->queue) {
req              1227 drivers/usb/gadget/udc/goku_udc.c 				tmp -= req->req.dma;
req              1230 drivers/usb/gadget/udc/goku_udc.c 				tmp = req->req.actual;
req              1233 drivers/usb/gadget/udc/goku_udc.c 				   &req->req, tmp, req->req.length,
req              1234 drivers/usb/gadget/udc/goku_udc.c 				   req->req.buf);
req               223 drivers/usb/gadget/udc/goku_udc.h 	struct usb_request		req;
req                92 drivers/usb/gadget/udc/gr_udc.c 				struct gr_request *req)
req                94 drivers/usb/gadget/udc/gr_udc.c 	int buflen = ep->is_in ? req->req.length : req->req.actual;
req                98 drivers/usb/gadget/udc/gr_udc.c 	dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
req               101 drivers/usb/gadget/udc/gr_udc.c 			     rowlen, 4, req->req.buf, plen, false);
req               113 drivers/usb/gadget/udc/gr_udc.c 				struct gr_request *req) {}
req               130 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req               163 drivers/usb/gadget/udc/gr_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req               167 drivers/usb/gadget/udc/gr_udc.c 		seq_printf(seq, "    0x%p: 0x%p %d %d\n", req,
req               168 drivers/usb/gadget/udc/gr_udc.c 			   &req->req.buf, req->req.actual, req->req.length);
req               170 drivers/usb/gadget/udc/gr_udc.c 		next = req->first_desc;
req               175 drivers/usb/gadget/udc/gr_udc.c 				   desc == req->curr_desc ? 'c' : ' ',
req               177 drivers/usb/gadget/udc/gr_udc.c 		} while (desc != req->last_desc);
req               254 drivers/usb/gadget/udc/gr_udc.c static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
req               259 drivers/usb/gadget/udc/gr_udc.c 	next = req->first_desc;
req               267 drivers/usb/gadget/udc/gr_udc.c 	} while (desc != req->last_desc);
req               269 drivers/usb/gadget/udc/gr_udc.c 	req->first_desc = NULL;
req               270 drivers/usb/gadget/udc/gr_udc.c 	req->curr_desc = NULL;
req               271 drivers/usb/gadget/udc/gr_udc.c 	req->last_desc = NULL;
req               274 drivers/usb/gadget/udc/gr_udc.c static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
req               282 drivers/usb/gadget/udc/gr_udc.c static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
req               289 drivers/usb/gadget/udc/gr_udc.c 	list_del_init(&req->queue);
req               291 drivers/usb/gadget/udc/gr_udc.c 	if (likely(req->req.status == -EINPROGRESS))
req               292 drivers/usb/gadget/udc/gr_udc.c 		req->req.status = status;
req               294 drivers/usb/gadget/udc/gr_udc.c 		status = req->req.status;
req               297 drivers/usb/gadget/udc/gr_udc.c 	usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
req               298 drivers/usb/gadget/udc/gr_udc.c 	gr_free_dma_desc_chain(dev, req);
req               301 drivers/usb/gadget/udc/gr_udc.c 		req->req.actual = req->req.length;
req               302 drivers/usb/gadget/udc/gr_udc.c 	} else if (req->oddlen && req->req.actual > req->evenlen) {
req               308 drivers/usb/gadget/udc/gr_udc.c 		char *buftail = ((char *)req->req.buf + req->evenlen);
req               310 drivers/usb/gadget/udc/gr_udc.c 		memcpy(buftail, ep->tailbuf, req->oddlen);
req               312 drivers/usb/gadget/udc/gr_udc.c 		if (req->req.actual > req->req.length) {
req               316 drivers/usb/gadget/udc/gr_udc.c 			gr_dbgprint_request("OVFL", ep, req);
req               317 drivers/usb/gadget/udc/gr_udc.c 			req->req.status = -EOVERFLOW;
req               323 drivers/usb/gadget/udc/gr_udc.c 			gr_dbgprint_request("SENT", ep, req);
req               325 drivers/usb/gadget/udc/gr_udc.c 			gr_dbgprint_request("RECV", ep, req);
req               330 drivers/usb/gadget/udc/gr_udc.c 	if (req == dev->ep0reqo && !status) {
req               331 drivers/usb/gadget/udc/gr_udc.c 		if (req->setup)
req               332 drivers/usb/gadget/udc/gr_udc.c 			gr_ep0_setup(dev, req);
req               336 drivers/usb/gadget/udc/gr_udc.c 	} else if (req->req.complete) {
req               339 drivers/usb/gadget/udc/gr_udc.c 		usb_gadget_giveback_request(&ep->ep, &req->req);
req               348 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req               350 drivers/usb/gadget/udc/gr_udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               351 drivers/usb/gadget/udc/gr_udc.c 	if (!req)
req               354 drivers/usb/gadget/udc/gr_udc.c 	INIT_LIST_HEAD(&req->queue);
req               356 drivers/usb/gadget/udc/gr_udc.c 	return &req->req;
req               366 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req               374 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
req               377 drivers/usb/gadget/udc/gr_udc.c 	BUG_ON(!req->curr_desc);
req               385 drivers/usb/gadget/udc/gr_udc.c 	if (!ep->is_in && req->oddlen)
req               386 drivers/usb/gadget/udc/gr_udc.c 		req->last_desc->data = ep->tailbuf_paddr;
req               391 drivers/usb/gadget/udc/gr_udc.c 	gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
req               408 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req               410 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
req               411 drivers/usb/gadget/udc/gr_udc.c 	gr_finish_request(ep, req, status);
req               436 drivers/usb/gadget/udc/gr_udc.c static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
req               452 drivers/usb/gadget/udc/gr_udc.c 	if (!req->first_desc) {
req               453 drivers/usb/gadget/udc/gr_udc.c 		req->first_desc = desc;
req               454 drivers/usb/gadget/udc/gr_udc.c 		req->curr_desc = desc;
req               456 drivers/usb/gadget/udc/gr_udc.c 		req->last_desc->next_desc = desc;
req               457 drivers/usb/gadget/udc/gr_udc.c 		req->last_desc->next = desc->paddr;
req               458 drivers/usb/gadget/udc/gr_udc.c 		req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
req               460 drivers/usb/gadget/udc/gr_udc.c 	req->last_desc = desc;
req               475 drivers/usb/gadget/udc/gr_udc.c static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
req               482 drivers/usb/gadget/udc/gr_udc.c 	req->first_desc = NULL; /* Signals that no allocation is done yet */
req               483 drivers/usb/gadget/udc/gr_udc.c 	bytes_left = req->req.length;
req               486 drivers/usb/gadget/udc/gr_udc.c 		dma_addr_t start = req->req.dma + bytes_used;
req               491 drivers/usb/gadget/udc/gr_udc.c 			req->evenlen = req->req.length - bytes_left;
req               492 drivers/usb/gadget/udc/gr_udc.c 			req->oddlen = size;
req               495 drivers/usb/gadget/udc/gr_udc.c 		ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
req               503 drivers/usb/gadget/udc/gr_udc.c 	req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
req               508 drivers/usb/gadget/udc/gr_udc.c 	gr_free_dma_desc_chain(ep->dev, req);
req               528 drivers/usb/gadget/udc/gr_udc.c static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
req               535 drivers/usb/gadget/udc/gr_udc.c 	req->first_desc = NULL; /* Signals that no allocation is done yet */
req               536 drivers/usb/gadget/udc/gr_udc.c 	bytes_left = req->req.length;
req               539 drivers/usb/gadget/udc/gr_udc.c 		dma_addr_t start = req->req.dma + bytes_used;
req               542 drivers/usb/gadget/udc/gr_udc.c 		ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
req               555 drivers/usb/gadget/udc/gr_udc.c 	if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
req               556 drivers/usb/gadget/udc/gr_udc.c 		ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
req               565 drivers/usb/gadget/udc/gr_udc.c 	req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
req               570 drivers/usb/gadget/udc/gr_udc.c 	gr_free_dma_desc_chain(ep->dev, req);
req               576 drivers/usb/gadget/udc/gr_udc.c static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
req               586 drivers/usb/gadget/udc/gr_udc.c 	if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
req               589 drivers/usb/gadget/udc/gr_udc.c 			ep->ep.name, req->req.buf, list_empty(&req->queue));
req               605 drivers/usb/gadget/udc/gr_udc.c 	ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
req               612 drivers/usb/gadget/udc/gr_udc.c 		ret = gr_setup_in_desc_list(ep, req, gfp_flags);
req               614 drivers/usb/gadget/udc/gr_udc.c 		ret = gr_setup_out_desc_list(ep, req, gfp_flags);
req               618 drivers/usb/gadget/udc/gr_udc.c 	req->req.status = -EINPROGRESS;
req               619 drivers/usb/gadget/udc/gr_udc.c 	req->req.actual = 0;
req               620 drivers/usb/gadget/udc/gr_udc.c 	list_add_tail(&req->queue, &ep->queue);
req               634 drivers/usb/gadget/udc/gr_udc.c static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
req               638 drivers/usb/gadget/udc/gr_udc.c 		gr_dbgprint_request("RESP", ep, req);
req               640 drivers/usb/gadget/udc/gr_udc.c 	return gr_queue(ep, req, gfp_flags);
req               653 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req               660 drivers/usb/gadget/udc/gr_udc.c 		req = list_first_entry(&ep->queue, struct gr_request, queue);
req               661 drivers/usb/gadget/udc/gr_udc.c 		gr_finish_request(ep, req, -ESHUTDOWN);
req               821 drivers/usb/gadget/udc/gr_udc.c 					   struct usb_request *req))
req               823 drivers/usb/gadget/udc/gr_udc.c 	u8 *reqbuf = dev->ep0reqi->req.buf;
req               829 drivers/usb/gadget/udc/gr_udc.c 	dev->ep0reqi->req.length = length;
req               830 drivers/usb/gadget/udc/gr_udc.c 	dev->ep0reqi->req.complete = complete;
req              1041 drivers/usb/gadget/udc/gr_udc.c static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
req              1061 drivers/usb/gadget/udc/gr_udc.c 		if (!req->req.actual)
req              1067 drivers/usb/gadget/udc/gr_udc.c 		if (req->req.actual > 0)
req              1080 drivers/usb/gadget/udc/gr_udc.c 	} else if (!req->req.actual) {
req              1087 drivers/usb/gadget/udc/gr_udc.c 	for (i = 0; i < req->req.actual; i++)
req              1088 drivers/usb/gadget/udc/gr_udc.c 		u.raw[i] = ((u8 *)req->req.buf)[i];
req              1238 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req              1240 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
req              1241 drivers/usb/gadget/udc/gr_udc.c 	if (!req->last_desc)
req              1244 drivers/usb/gadget/udc/gr_udc.c 	if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
req              1266 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req              1269 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
req              1270 drivers/usb/gadget/udc/gr_udc.c 	if (!req->curr_desc)
req              1273 drivers/usb/gadget/udc/gr_udc.c 	ctrl = READ_ONCE(req->curr_desc->ctrl);
req              1279 drivers/usb/gadget/udc/gr_udc.c 	req->req.actual += len;
req              1281 drivers/usb/gadget/udc/gr_udc.c 		req->setup = 1;
req              1283 drivers/usb/gadget/udc/gr_udc.c 	if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
req              1300 drivers/usb/gadget/udc/gr_udc.c 		req->curr_desc = req->curr_desc->next_desc;
req              1301 drivers/usb/gadget/udc/gr_udc.c 		req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
req              1637 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req              1641 drivers/usb/gadget/udc/gr_udc.c 	req = container_of(_req, struct gr_request, req);
req              1644 drivers/usb/gadget/udc/gr_udc.c 	WARN(!list_empty(&req->queue),
req              1647 drivers/usb/gadget/udc/gr_udc.c 	kfree(req);
req              1655 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req              1663 drivers/usb/gadget/udc/gr_udc.c 	req = container_of(_req, struct gr_request, req);
req              1680 drivers/usb/gadget/udc/gr_udc.c 		gr_dbgprint_request("EXTERN", ep, req);
req              1682 drivers/usb/gadget/udc/gr_udc.c 	ret = gr_queue(ep, req, GFP_ATOMIC);
req              1692 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req              1712 drivers/usb/gadget/udc/gr_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1713 drivers/usb/gadget/udc/gr_udc.c 		if (&req->req == _req)
req              1716 drivers/usb/gadget/udc/gr_udc.c 	if (&req->req != _req) {
req              1721 drivers/usb/gadget/udc/gr_udc.c 	if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
req              1725 drivers/usb/gadget/udc/gr_udc.c 			gr_finish_request(ep, req, -ECONNRESET);
req              1728 drivers/usb/gadget/udc/gr_udc.c 	} else if (!list_empty(&req->queue)) {
req              1730 drivers/usb/gadget/udc/gr_udc.c 		gr_finish_request(ep, req, -ECONNRESET);
req              1960 drivers/usb/gadget/udc/gr_udc.c 	struct gr_request *req;
req              1989 drivers/usb/gadget/udc/gr_udc.c 		req = container_of(_req, struct gr_request, req);
req              1990 drivers/usb/gadget/udc/gr_udc.c 		req->req.buf = buf;
req              1991 drivers/usb/gadget/udc/gr_udc.c 		req->req.length = MAX_CTRL_PL_SIZE;
req              1994 drivers/usb/gadget/udc/gr_udc.c 			dev->ep0reqi = req; /* Complete gets set as used */
req              1996 drivers/usb/gadget/udc/gr_udc.c 			dev->ep0reqo = req; /* Completion treated separately */
req              2102 drivers/usb/gadget/udc/gr_udc.c 	gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
req              2103 drivers/usb/gadget/udc/gr_udc.c 	gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
req               162 drivers/usb/gadget/udc/gr_udc.h 	struct usb_request req;
req               176 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct usb_request	req;
req               476 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req               488 drivers/usb/gadget/udc/lpc32xx_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
req               489 drivers/usb/gadget/udc/lpc32xx_udc.c 			u32 length = req->req.actual;
req               492 drivers/usb/gadget/udc/lpc32xx_udc.c 				   &req->req, length,
req               493 drivers/usb/gadget/udc/lpc32xx_udc.c 				   req->req.length, req->req.buf);
req               989 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req               995 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
req               998 drivers/usb/gadget/udc/lpc32xx_udc.c 	udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
req              1004 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (req->req.length % ep->ep.maxpacket)
req              1005 drivers/usb/gadget/udc/lpc32xx_udc.c 		req->send_zlp = 0;
req              1015 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1021 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
req              1024 drivers/usb/gadget/udc/lpc32xx_udc.c 	udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
req              1409 drivers/usb/gadget/udc/lpc32xx_udc.c static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
req              1413 drivers/usb/gadget/udc/lpc32xx_udc.c 	list_del_init(&req->queue);
req              1414 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (req->req.status == -EINPROGRESS)
req              1415 drivers/usb/gadget/udc/lpc32xx_udc.c 		req->req.status = status;
req              1417 drivers/usb/gadget/udc/lpc32xx_udc.c 		status = req->req.status;
req              1420 drivers/usb/gadget/udc/lpc32xx_udc.c 		usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
req              1423 drivers/usb/gadget/udc/lpc32xx_udc.c 		udc_dd_free(udc, req->dd_desc_ptr);
req              1427 drivers/usb/gadget/udc/lpc32xx_udc.c 		ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
req              1431 drivers/usb/gadget/udc/lpc32xx_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req              1438 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1441 drivers/usb/gadget/udc/lpc32xx_udc.c 		req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
req              1442 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep, req, status);
req              1454 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1462 drivers/usb/gadget/udc/lpc32xx_udc.c 		req = list_entry(ep0->queue.next, struct lpc32xx_request,
req              1465 drivers/usb/gadget/udc/lpc32xx_udc.c 	tsend = ts = req->req.length - req->req.actual;
req              1469 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep0, req, 0);
req              1475 drivers/usb/gadget/udc/lpc32xx_udc.c 	udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
req              1478 drivers/usb/gadget/udc/lpc32xx_udc.c 	req->req.actual += ts;
req              1485 drivers/usb/gadget/udc/lpc32xx_udc.c 	done(ep0, req, 0);
req              1492 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1499 drivers/usb/gadget/udc/lpc32xx_udc.c 		req = list_entry(ep0->queue.next, struct lpc32xx_request,
req              1502 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (req) {
req              1503 drivers/usb/gadget/udc/lpc32xx_udc.c 		if (req->req.length == 0) {
req              1505 drivers/usb/gadget/udc/lpc32xx_udc.c 			done(ep0, req, 0);
req              1511 drivers/usb/gadget/udc/lpc32xx_udc.c 		bufferspace = req->req.length - req->req.actual;
req              1516 drivers/usb/gadget/udc/lpc32xx_udc.c 		prefetchw(req->req.buf + req->req.actual);
req              1517 drivers/usb/gadget/udc/lpc32xx_udc.c 		tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
req              1519 drivers/usb/gadget/udc/lpc32xx_udc.c 		req->req.actual += bufferspace;
req              1523 drivers/usb/gadget/udc/lpc32xx_udc.c 			done(ep0, req, 0);
req              1720 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1722 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
req              1723 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!req)
req              1726 drivers/usb/gadget/udc/lpc32xx_udc.c 	INIT_LIST_HEAD(&req->queue);
req              1727 drivers/usb/gadget/udc/lpc32xx_udc.c 	return &req->req;
req              1737 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1739 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = container_of(_req, struct lpc32xx_request, req);
req              1740 drivers/usb/gadget/udc/lpc32xx_udc.c 	BUG_ON(!list_empty(&req->queue));
req              1741 drivers/usb/gadget/udc/lpc32xx_udc.c 	kfree(req);
req              1748 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1754 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = container_of(_req, struct lpc32xx_request, req);
req              1758 drivers/usb/gadget/udc/lpc32xx_udc.c 	    !list_empty(&req->queue))
req              1779 drivers/usb/gadget/udc/lpc32xx_udc.c 		req->dd_desc_ptr = dd;
req              1783 drivers/usb/gadget/udc/lpc32xx_udc.c 		dd->dd_buffer_addr = req->req.dma;
req              1793 drivers/usb/gadget/udc/lpc32xx_udc.c 				dd->iso_status[0] = req->req.length;
req              1798 drivers/usb/gadget/udc/lpc32xx_udc.c 				DD_SETUP_DMALENBYTES(req->req.length);
req              1808 drivers/usb/gadget/udc/lpc32xx_udc.c 	req->send_zlp = _req->zero;
req              1812 drivers/usb/gadget/udc/lpc32xx_udc.c 		list_add_tail(&req->queue, &ep->queue);
req              1834 drivers/usb/gadget/udc/lpc32xx_udc.c 		list_add_tail(&req->queue, &ep->queue);
req              1845 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1855 drivers/usb/gadget/udc/lpc32xx_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1856 drivers/usb/gadget/udc/lpc32xx_udc.c 		if (&req->req == _req)
req              1859 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (&req->req != _req) {
req              1864 drivers/usb/gadget/udc/lpc32xx_udc.c 	done(ep, req, -ECONNRESET);
req              1945 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              1973 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
req              1974 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (req) {
req              1975 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep, req, 0);
req              1993 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_request *req;
req              2000 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
req              2001 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!req) {
req              2005 drivers/usb/gadget/udc/lpc32xx_udc.c 	dd = req->dd_desc_ptr;
req              2027 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep, req, -ECONNABORTED);
req              2040 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep, req, -ECONNABORTED);
req              2048 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep, req, -ECONNABORTED);
req              2062 drivers/usb/gadget/udc/lpc32xx_udc.c 		done(ep, req, -ECONNABORTED);
req              2069 drivers/usb/gadget/udc/lpc32xx_udc.c 			req->req.actual = req->req.length;
req              2071 drivers/usb/gadget/udc/lpc32xx_udc.c 			req->req.actual = dd->iso_status[0] & 0xFFFF;
req              2073 drivers/usb/gadget/udc/lpc32xx_udc.c 		req->req.actual += DD_STATUS_CURDMACNT(status);
req              2077 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (req->send_zlp) {
req              2095 drivers/usb/gadget/udc/lpc32xx_udc.c 	done(ep, req, 0);
req              2198 drivers/usb/gadget/udc/lpc32xx_udc.c 	u16 wIndex, wValue, reqtype, req, tmp;
req              2223 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = le16_to_cpu(ctrlpkt.bRequest);
req              2224 drivers/usb/gadget/udc/lpc32xx_udc.c 	switch (req) {
req              2233 drivers/usb/gadget/udc/lpc32xx_udc.c 			if (req == USB_REQ_CLEAR_FEATURE)
req              2254 drivers/usb/gadget/udc/lpc32xx_udc.c 			if (req == USB_REQ_SET_FEATURE)
req              2288 drivers/usb/gadget/udc/lpc32xx_udc.c 		if (req == USB_REQ_SET_CONFIGURATION) {
req              2311 drivers/usb/gadget/udc/lpc32xx_udc.c 				reqtype, req, i);
req                35 drivers/usb/gadget/udc/m66592-udc.c static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
req                36 drivers/usb/gadget/udc/m66592-udc.c static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
req                41 drivers/usb/gadget/udc/m66592-udc.c 		struct m66592_request *req, int status);
req               494 drivers/usb/gadget/udc/m66592-udc.c static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
req               503 drivers/usb/gadget/udc/m66592-udc.c 	if (req->req.length == 0) {
req               506 drivers/usb/gadget/udc/m66592-udc.c 		transfer_complete(ep, req, 0);
req               509 drivers/usb/gadget/udc/m66592-udc.c 		irq_ep0_write(ep, req);
req               513 drivers/usb/gadget/udc/m66592-udc.c static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req)
req               526 drivers/usb/gadget/udc/m66592-udc.c 		irq_packet_write(ep, req);
req               529 drivers/usb/gadget/udc/m66592-udc.c static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
req               547 drivers/usb/gadget/udc/m66592-udc.c 				(req->req.length + ep->ep.maxpacket - 1)
req               556 drivers/usb/gadget/udc/m66592-udc.c static void start_packet(struct m66592_ep *ep, struct m66592_request *req)
req               559 drivers/usb/gadget/udc/m66592-udc.c 		start_packet_write(ep, req);
req               561 drivers/usb/gadget/udc/m66592-udc.c 		start_packet_read(ep, req);
req               564 drivers/usb/gadget/udc/m66592-udc.c static void start_ep0(struct m66592_ep *ep, struct m66592_request *req)
req               572 drivers/usb/gadget/udc/m66592-udc.c 		start_ep0_write(ep, req);
req               575 drivers/usb/gadget/udc/m66592-udc.c 		start_packet_read(ep, req);
req               706 drivers/usb/gadget/udc/m66592-udc.c 		struct m66592_request *req, int status)
req               719 drivers/usb/gadget/udc/m66592-udc.c 	list_del_init(&req->queue);
req               721 drivers/usb/gadget/udc/m66592-udc.c 		req->req.status = -ESHUTDOWN;
req               723 drivers/usb/gadget/udc/m66592-udc.c 		req->req.status = status;
req               729 drivers/usb/gadget/udc/m66592-udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               733 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
req               735 drivers/usb/gadget/udc/m66592-udc.c 			start_packet(ep, req);
req               739 drivers/usb/gadget/udc/m66592-udc.c static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
req               765 drivers/usb/gadget/udc/m66592-udc.c 	buf = req->req.buf + req->req.actual;
req               766 drivers/usb/gadget/udc/m66592-udc.c 	size = min(bufsize, req->req.length - req->req.actual);
req               769 drivers/usb/gadget/udc/m66592-udc.c 	if (req->req.buf) {
req               777 drivers/usb/gadget/udc/m66592-udc.c 	req->req.actual += size;
req               780 drivers/usb/gadget/udc/m66592-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req               792 drivers/usb/gadget/udc/m66592-udc.c static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
req               812 drivers/usb/gadget/udc/m66592-udc.c 	buf = req->req.buf + req->req.actual;
req               813 drivers/usb/gadget/udc/m66592-udc.c 	size = min(bufsize, req->req.length - req->req.actual);
req               816 drivers/usb/gadget/udc/m66592-udc.c 	if (req->req.buf) {
req               826 drivers/usb/gadget/udc/m66592-udc.c 	req->req.actual += size;
req               828 drivers/usb/gadget/udc/m66592-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req               839 drivers/usb/gadget/udc/m66592-udc.c static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req)
req               852 drivers/usb/gadget/udc/m66592-udc.c 		req->req.status = -EPIPE;
req               863 drivers/usb/gadget/udc/m66592-udc.c 	buf = req->req.buf + req->req.actual;
req               864 drivers/usb/gadget/udc/m66592-udc.c 	req_len = req->req.length - req->req.actual;
req               871 drivers/usb/gadget/udc/m66592-udc.c 	req->req.actual += size;
req               874 drivers/usb/gadget/udc/m66592-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req               883 drivers/usb/gadget/udc/m66592-udc.c 	if (req->req.buf) {
req               891 drivers/usb/gadget/udc/m66592-udc.c 		transfer_complete(ep, req, 0);
req               899 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req               907 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
req               908 drivers/usb/gadget/udc/m66592-udc.c 		irq_packet_read(ep, req);
req               915 drivers/usb/gadget/udc/m66592-udc.c 				req = list_entry(ep->queue.next,
req               918 drivers/usb/gadget/udc/m66592-udc.c 					irq_packet_write(ep, req);
req               920 drivers/usb/gadget/udc/m66592-udc.c 					irq_packet_read(ep, req);
req               932 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req               938 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
req               939 drivers/usb/gadget/udc/m66592-udc.c 		irq_ep0_write(ep, req);
req               951 drivers/usb/gadget/udc/m66592-udc.c 					req = list_entry(ep->queue.next,
req               955 drivers/usb/gadget/udc/m66592-udc.c 						transfer_complete(ep, req, 0);
req              1011 drivers/usb/gadget/udc/m66592-udc.c 		struct m66592_request *req;
req              1020 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next,
req              1026 drivers/usb/gadget/udc/m66592-udc.c 			start_packet(ep, req);
req              1167 drivers/usb/gadget/udc/m66592-udc.c 		struct m66592_request *req;
req              1169 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
req              1170 drivers/usb/gadget/udc/m66592-udc.c 		transfer_complete(ep, req, 0);
req              1311 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req              1318 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
req              1320 drivers/usb/gadget/udc/m66592-udc.c 		transfer_complete(ep, req, -ECONNRESET);
req              1331 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req              1333 drivers/usb/gadget/udc/m66592-udc.c 	req = kzalloc(sizeof(struct m66592_request), gfp_flags);
req              1334 drivers/usb/gadget/udc/m66592-udc.c 	if (!req)
req              1337 drivers/usb/gadget/udc/m66592-udc.c 	INIT_LIST_HEAD(&req->queue);
req              1339 drivers/usb/gadget/udc/m66592-udc.c 	return &req->req;
req              1344 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req              1346 drivers/usb/gadget/udc/m66592-udc.c 	req = container_of(_req, struct m66592_request, req);
req              1347 drivers/usb/gadget/udc/m66592-udc.c 	kfree(req);
req              1354 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req              1359 drivers/usb/gadget/udc/m66592-udc.c 	req = container_of(_req, struct m66592_request, req);
req              1369 drivers/usb/gadget/udc/m66592-udc.c 	list_add_tail(&req->queue, &ep->queue);
req              1370 drivers/usb/gadget/udc/m66592-udc.c 	req->req.actual = 0;
req              1371 drivers/usb/gadget/udc/m66592-udc.c 	req->req.status = -EINPROGRESS;
req              1374 drivers/usb/gadget/udc/m66592-udc.c 		start_ep0(ep, req);
req              1377 drivers/usb/gadget/udc/m66592-udc.c 			start_packet(ep, req);
req              1388 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592_request *req;
req              1392 drivers/usb/gadget/udc/m66592-udc.c 	req = container_of(_req, struct m66592_request, req);
req              1396 drivers/usb/gadget/udc/m66592-udc.c 		transfer_complete(ep, req, -ECONNRESET);
req               437 drivers/usb/gadget/udc/m66592-udc.h 	struct usb_request	req;
req               306 drivers/usb/gadget/udc/mv_u3d.h 	struct usb_request	req;
req               127 drivers/usb/gadget/udc/mv_u3d_core.c 	actual = curr_req->req.length;
req               165 drivers/usb/gadget/udc/mv_u3d_core.c 	curr_req->req.actual = actual;
req               175 drivers/usb/gadget/udc/mv_u3d_core.c void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
req               183 drivers/usb/gadget/udc/mv_u3d_core.c 	list_del_init(&req->queue);
req               186 drivers/usb/gadget/udc/mv_u3d_core.c 	if (req->req.status == -EINPROGRESS)
req               187 drivers/usb/gadget/udc/mv_u3d_core.c 		req->req.status = status;
req               189 drivers/usb/gadget/udc/mv_u3d_core.c 		status = req->req.status;
req               192 drivers/usb/gadget/udc/mv_u3d_core.c 	if (!req->chain)
req               194 drivers/usb/gadget/udc/mv_u3d_core.c 			req->trb_head->trb_hw, req->trb_head->trb_dma);
req               197 drivers/usb/gadget/udc/mv_u3d_core.c 			(dma_addr_t)req->trb_head->trb_dma,
req               198 drivers/usb/gadget/udc/mv_u3d_core.c 			req->trb_count * sizeof(struct mv_u3d_trb_hw),
req               200 drivers/usb/gadget/udc/mv_u3d_core.c 		kfree(req->trb_head->trb_hw);
req               202 drivers/usb/gadget/udc/mv_u3d_core.c 	kfree(req->trb_head);
req               204 drivers/usb/gadget/udc/mv_u3d_core.c 	usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
req               208 drivers/usb/gadget/udc/mv_u3d_core.c 			ep->ep.name, &req->req, status,
req               209 drivers/usb/gadget/udc/mv_u3d_core.c 			req->req.actual, req->req.length);
req               214 drivers/usb/gadget/udc/mv_u3d_core.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               219 drivers/usb/gadget/udc/mv_u3d_core.c static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
req               248 drivers/usb/gadget/udc/mv_u3d_core.c 			cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
req               268 drivers/usb/gadget/udc/mv_u3d_core.c static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
req               278 drivers/usb/gadget/udc/mv_u3d_core.c 	*length = req->req.length - req->req.actual;
req               281 drivers/usb/gadget/udc/mv_u3d_core.c 	u3d = req->ep->u3d;
req               303 drivers/usb/gadget/udc/mv_u3d_core.c 	temp = (u32)(req->req.dma + req->req.actual);
req               310 drivers/usb/gadget/udc/mv_u3d_core.c 	if (req->ep->ep_num == 0)
req               315 drivers/usb/gadget/udc/mv_u3d_core.c 	req->req.actual += *length;
req               317 drivers/usb/gadget/udc/mv_u3d_core.c 	direction = mv_u3d_ep_dir(req->ep);
req               324 drivers/usb/gadget/udc/mv_u3d_core.c 	if (!req->req.no_interrupt)
req               333 drivers/usb/gadget/udc/mv_u3d_core.c static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
req               341 drivers/usb/gadget/udc/mv_u3d_core.c 	*length = min(req->req.length - req->req.actual,
req               344 drivers/usb/gadget/udc/mv_u3d_core.c 	u3d = req->ep->u3d;
req               349 drivers/usb/gadget/udc/mv_u3d_core.c 	temp = (u32)(req->req.dma + req->req.actual);
req               356 drivers/usb/gadget/udc/mv_u3d_core.c 	if (req->ep->ep_num == 0)
req               361 drivers/usb/gadget/udc/mv_u3d_core.c 	req->req.actual += *length;
req               363 drivers/usb/gadget/udc/mv_u3d_core.c 	direction = mv_u3d_ep_dir(req->ep);
req               370 drivers/usb/gadget/udc/mv_u3d_core.c 	if (req->req.zero) {
req               371 drivers/usb/gadget/udc/mv_u3d_core.c 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
req               375 drivers/usb/gadget/udc/mv_u3d_core.c 	} else if (req->req.length == req->req.actual)
req               381 drivers/usb/gadget/udc/mv_u3d_core.c 	if (*is_last && !req->req.no_interrupt)
req               400 drivers/usb/gadget/udc/mv_u3d_core.c static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
req               411 drivers/usb/gadget/udc/mv_u3d_core.c 	u3d = req->ep->u3d;
req               413 drivers/usb/gadget/udc/mv_u3d_core.c 	INIT_LIST_HEAD(&req->trb_list);
req               415 drivers/usb/gadget/udc/mv_u3d_core.c 	length = req->req.length - req->req.actual;
req               420 drivers/usb/gadget/udc/mv_u3d_core.c 		trb = mv_u3d_build_trb_one(req, &count, &dma);
req               421 drivers/usb/gadget/udc/mv_u3d_core.c 		list_add_tail(&trb->trb_list, &req->trb_list);
req               422 drivers/usb/gadget/udc/mv_u3d_core.c 		req->trb_head = trb;
req               423 drivers/usb/gadget/udc/mv_u3d_core.c 		req->trb_count = 1;
req               424 drivers/usb/gadget/udc/mv_u3d_core.c 		req->chain = 0;
req               442 drivers/usb/gadget/udc/mv_u3d_core.c 			if (mv_u3d_build_trb_chain(req, &count,
req               450 drivers/usb/gadget/udc/mv_u3d_core.c 			list_add_tail(&trb->trb_list, &req->trb_list);
req               451 drivers/usb/gadget/udc/mv_u3d_core.c 			req->trb_count++;
req               456 drivers/usb/gadget/udc/mv_u3d_core.c 		req->trb_head = list_entry(req->trb_list.next,
req               458 drivers/usb/gadget/udc/mv_u3d_core.c 		req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
req               459 drivers/usb/gadget/udc/mv_u3d_core.c 					req->trb_head->trb_hw,
req               463 drivers/usb/gadget/udc/mv_u3d_core.c 					req->trb_head->trb_dma)) {
req               464 drivers/usb/gadget/udc/mv_u3d_core.c 			kfree(req->trb_head->trb_hw);
req               465 drivers/usb/gadget/udc/mv_u3d_core.c 			kfree(req->trb_head);
req               469 drivers/usb/gadget/udc/mv_u3d_core.c 		req->chain = 1;
req               479 drivers/usb/gadget/udc/mv_u3d_core.c 	struct mv_u3d_req *req;
req               483 drivers/usb/gadget/udc/mv_u3d_core.c 		req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
req               490 drivers/usb/gadget/udc/mv_u3d_core.c 	ret = usb_gadget_map_request(&u3d->gadget, &req->req,
req               495 drivers/usb/gadget/udc/mv_u3d_core.c 	req->req.status = -EINPROGRESS;
req               496 drivers/usb/gadget/udc/mv_u3d_core.c 	req->req.actual = 0;
req               497 drivers/usb/gadget/udc/mv_u3d_core.c 	req->trb_count = 0;
req               500 drivers/usb/gadget/udc/mv_u3d_core.c 	ret = mv_u3d_req_to_trb(req);
req               507 drivers/usb/gadget/udc/mv_u3d_core.c 	ret = mv_u3d_queue_trb(ep, req);
req               512 drivers/usb/gadget/udc/mv_u3d_core.c 	list_add_tail(&req->queue, &ep->queue);
req               669 drivers/usb/gadget/udc/mv_u3d_core.c 	struct mv_u3d_req *req = NULL;
req               671 drivers/usb/gadget/udc/mv_u3d_core.c 	req = kzalloc(sizeof *req, gfp_flags);
req               672 drivers/usb/gadget/udc/mv_u3d_core.c 	if (!req)
req               675 drivers/usb/gadget/udc/mv_u3d_core.c 	INIT_LIST_HEAD(&req->queue);
req               677 drivers/usb/gadget/udc/mv_u3d_core.c 	return &req->req;
req               682 drivers/usb/gadget/udc/mv_u3d_core.c 	struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
req               684 drivers/usb/gadget/udc/mv_u3d_core.c 	kfree(req);
req               774 drivers/usb/gadget/udc/mv_u3d_core.c 	struct mv_u3d_req *req;
req               785 drivers/usb/gadget/udc/mv_u3d_core.c 	req = container_of(_req, struct mv_u3d_req, req);
req               796 drivers/usb/gadget/udc/mv_u3d_core.c 			__func__, _ep->name, req);
req               799 drivers/usb/gadget/udc/mv_u3d_core.c 	if (!req->req.complete || !req->req.buf
req               800 drivers/usb/gadget/udc/mv_u3d_core.c 			|| !list_empty(&req->queue)) {
req               806 drivers/usb/gadget/udc/mv_u3d_core.c 			req->req.complete, req->req.buf,
req               807 drivers/usb/gadget/udc/mv_u3d_core.c 			list_empty(&req->queue));
req               815 drivers/usb/gadget/udc/mv_u3d_core.c 		if (req->req.length > ep->ep.maxpacket)
req               825 drivers/usb/gadget/udc/mv_u3d_core.c 	req->ep = ep;
req               830 drivers/usb/gadget/udc/mv_u3d_core.c 	list_add_tail(&req->list, &ep->req_list);
req               848 drivers/usb/gadget/udc/mv_u3d_core.c 	struct mv_u3d_req *req;
req               865 drivers/usb/gadget/udc/mv_u3d_core.c 	list_for_each_entry(req, &ep->queue, queue) {
req               866 drivers/usb/gadget/udc/mv_u3d_core.c 		if (&req->req == _req)
req               869 drivers/usb/gadget/udc/mv_u3d_core.c 	if (&req->req != _req) {
req               875 drivers/usb/gadget/udc/mv_u3d_core.c 	if (ep->queue.next == &req->queue) {
req               880 drivers/usb/gadget/udc/mv_u3d_core.c 		if (req->queue.next != &ep->queue) {
req               884 drivers/usb/gadget/udc/mv_u3d_core.c 			next_req = list_entry(req->queue.next,
req               900 drivers/usb/gadget/udc/mv_u3d_core.c 	mv_u3d_done(ep, req, -ECONNRESET);
req               907 drivers/usb/gadget/udc/mv_u3d_core.c 		if (curr_req == req) {
req               908 drivers/usb/gadget/udc/mv_u3d_core.c 			list_del_init(&req->list);
req              1369 drivers/usb/gadget/udc/mv_u3d_core.c 		struct mv_u3d_req *req = NULL;
req              1370 drivers/usb/gadget/udc/mv_u3d_core.c 		req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
req              1371 drivers/usb/gadget/udc/mv_u3d_core.c 		mv_u3d_done(ep, req, status);
req              1644 drivers/usb/gadget/udc/mv_u3d_core.c 			struct mv_u3d_req *req;
req              1645 drivers/usb/gadget/udc/mv_u3d_core.c 			req = list_entry(curr_ep->req_list.next,
req              1647 drivers/usb/gadget/udc/mv_u3d_core.c 			list_del_init(&req->list);
req              1659 drivers/usb/gadget/udc/mv_u3d_core.c 			curr_req->req.status = status;
req              1909 drivers/usb/gadget/udc/mv_u3d_core.c 	u3d->status_req->req.buf = (char *)u3d->status_req
req              1911 drivers/usb/gadget/udc/mv_u3d_core.c 	u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
req               240 drivers/usb/gadget/udc/mv_udc.h 	struct usb_request	req;
req               137 drivers/usb/gadget/udc/mv_udc_core.c 	actual = curr_req->req.length;
req               197 drivers/usb/gadget/udc/mv_udc_core.c 	curr_req->req.actual = actual;
req               207 drivers/usb/gadget/udc/mv_udc_core.c static void done(struct mv_ep *ep, struct mv_req *req, int status)
req               218 drivers/usb/gadget/udc/mv_udc_core.c 	list_del_init(&req->queue);
req               221 drivers/usb/gadget/udc/mv_udc_core.c 	if (req->req.status == -EINPROGRESS)
req               222 drivers/usb/gadget/udc/mv_udc_core.c 		req->req.status = status;
req               224 drivers/usb/gadget/udc/mv_udc_core.c 		status = req->req.status;
req               227 drivers/usb/gadget/udc/mv_udc_core.c 	next_td = req->head;
req               228 drivers/usb/gadget/udc/mv_udc_core.c 	for (j = 0; j < req->dtd_count; j++) {
req               230 drivers/usb/gadget/udc/mv_udc_core.c 		if (j != req->dtd_count - 1)
req               235 drivers/usb/gadget/udc/mv_udc_core.c 	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
req               239 drivers/usb/gadget/udc/mv_udc_core.c 			ep->ep.name, &req->req, status,
req               240 drivers/usb/gadget/udc/mv_udc_core.c 			req->req.actual, req->req.length);
req               246 drivers/usb/gadget/udc/mv_udc_core.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               252 drivers/usb/gadget/udc/mv_udc_core.c static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
req               271 drivers/usb/gadget/udc/mv_udc_core.c 			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
req               319 drivers/usb/gadget/udc/mv_udc_core.c 	dqh->next_dtd_ptr = req->head->td_dma
req               335 drivers/usb/gadget/udc/mv_udc_core.c static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
req               344 drivers/usb/gadget/udc/mv_udc_core.c 	if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
req               345 drivers/usb/gadget/udc/mv_udc_core.c 		dqh = req->ep->dqh;
req               348 drivers/usb/gadget/udc/mv_udc_core.c 		*length = min(req->req.length - req->req.actual,
req               349 drivers/usb/gadget/udc/mv_udc_core.c 				(unsigned)(mult * req->ep->ep.maxpacket));
req               351 drivers/usb/gadget/udc/mv_udc_core.c 		*length = min(req->req.length - req->req.actual,
req               354 drivers/usb/gadget/udc/mv_udc_core.c 	udc = req->ep->udc;
req               366 drivers/usb/gadget/udc/mv_udc_core.c 	temp = (u32)(req->req.dma + req->req.actual);
req               374 drivers/usb/gadget/udc/mv_udc_core.c 	req->req.actual += *length;
req               377 drivers/usb/gadget/udc/mv_udc_core.c 	if (req->req.zero) {
req               378 drivers/usb/gadget/udc/mv_udc_core.c 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
req               382 drivers/usb/gadget/udc/mv_udc_core.c 	} else if (req->req.length == req->req.actual)
req               391 drivers/usb/gadget/udc/mv_udc_core.c 	if (*is_last && !req->req.no_interrupt)
req               404 drivers/usb/gadget/udc/mv_udc_core.c static int req_to_dtd(struct mv_req *req)
req               412 drivers/usb/gadget/udc/mv_udc_core.c 		dtd = build_dtd(req, &count, &dma, &is_last);
req               418 drivers/usb/gadget/udc/mv_udc_core.c 			req->head = dtd;
req               424 drivers/usb/gadget/udc/mv_udc_core.c 		req->dtd_count++;
req               430 drivers/usb/gadget/udc/mv_udc_core.c 	req->tail = dtd;
req               599 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_req *req = NULL;
req               601 drivers/usb/gadget/udc/mv_udc_core.c 	req = kzalloc(sizeof *req, gfp_flags);
req               602 drivers/usb/gadget/udc/mv_udc_core.c 	if (!req)
req               605 drivers/usb/gadget/udc/mv_udc_core.c 	req->req.dma = DMA_ADDR_INVALID;
req               606 drivers/usb/gadget/udc/mv_udc_core.c 	INIT_LIST_HEAD(&req->queue);
req               608 drivers/usb/gadget/udc/mv_udc_core.c 	return &req->req;
req               613 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_req *req = NULL;
req               615 drivers/usb/gadget/udc/mv_udc_core.c 	req = container_of(_req, struct mv_req, req);
req               618 drivers/usb/gadget/udc/mv_udc_core.c 		kfree(req);
req               686 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_req *req = container_of(_req, struct mv_req, req);
req               692 drivers/usb/gadget/udc/mv_udc_core.c 	if (!_req || !req->req.complete || !req->req.buf
req               693 drivers/usb/gadget/udc/mv_udc_core.c 			|| !list_empty(&req->queue)) {
req               706 drivers/usb/gadget/udc/mv_udc_core.c 	req->ep = ep;
req               713 drivers/usb/gadget/udc/mv_udc_core.c 	req->req.status = -EINPROGRESS;
req               714 drivers/usb/gadget/udc/mv_udc_core.c 	req->req.actual = 0;
req               715 drivers/usb/gadget/udc/mv_udc_core.c 	req->dtd_count = 0;
req               720 drivers/usb/gadget/udc/mv_udc_core.c 	if (!req_to_dtd(req)) {
req               721 drivers/usb/gadget/udc/mv_udc_core.c 		retval = queue_dtd(ep, req);
req               739 drivers/usb/gadget/udc/mv_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
req               750 drivers/usb/gadget/udc/mv_udc_core.c static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
req               756 drivers/usb/gadget/udc/mv_udc_core.c 	dqh->next_dtd_ptr = req->head->td_dma
req               775 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_req *req;
req               797 drivers/usb/gadget/udc/mv_udc_core.c 	list_for_each_entry(req, &ep->queue, queue) {
req               798 drivers/usb/gadget/udc/mv_udc_core.c 		if (&req->req == _req)
req               801 drivers/usb/gadget/udc/mv_udc_core.c 	if (&req->req != _req) {
req               807 drivers/usb/gadget/udc/mv_udc_core.c 	if (ep->queue.next == &req->queue) {
req               812 drivers/usb/gadget/udc/mv_udc_core.c 		if (req->queue.next != &ep->queue) {
req               815 drivers/usb/gadget/udc/mv_udc_core.c 			next_req = list_entry(req->queue.next,
req               832 drivers/usb/gadget/udc/mv_udc_core.c 		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
req               833 drivers/usb/gadget/udc/mv_udc_core.c 		writel(readl(&req->tail->dtd_next),
req               838 drivers/usb/gadget/udc/mv_udc_core.c 	done(ep, req, -ECONNRESET);
req              1304 drivers/usb/gadget/udc/mv_udc_core.c 		struct mv_req *req = NULL;
req              1305 drivers/usb/gadget/udc/mv_udc_core.c 		req = list_entry(ep->queue.next, struct mv_req, queue);
req              1306 drivers/usb/gadget/udc/mv_udc_core.c 		done(ep, req, status);
req              1425 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_req *req = container_of(_req, struct mv_req, req);
req              1431 drivers/usb/gadget/udc/mv_udc_core.c 	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
req              1434 drivers/usb/gadget/udc/mv_udc_core.c 	if (req->test_mode) {
req              1435 drivers/usb/gadget/udc/mv_udc_core.c 		mv_set_ptc(udc, req->test_mode);
req              1436 drivers/usb/gadget/udc/mv_udc_core.c 		req->test_mode = 0;
req              1445 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_req *req;
req              1452 drivers/usb/gadget/udc/mv_udc_core.c 	req = udc->status_req;
req              1456 drivers/usb/gadget/udc/mv_udc_core.c 		*((u16 *) req->req.buf) = cpu_to_le16(status);
req              1457 drivers/usb/gadget/udc/mv_udc_core.c 		req->req.length = 2;
req              1459 drivers/usb/gadget/udc/mv_udc_core.c 		req->req.length = 0;
req              1461 drivers/usb/gadget/udc/mv_udc_core.c 	req->ep = ep;
req              1462 drivers/usb/gadget/udc/mv_udc_core.c 	req->req.status = -EINPROGRESS;
req              1463 drivers/usb/gadget/udc/mv_udc_core.c 	req->req.actual = 0;
req              1465 drivers/usb/gadget/udc/mv_udc_core.c 		req->req.complete = prime_status_complete;
req              1466 drivers/usb/gadget/udc/mv_udc_core.c 		req->test_mode = udc->test_mode;
req              1469 drivers/usb/gadget/udc/mv_udc_core.c 		req->req.complete = NULL;
req              1470 drivers/usb/gadget/udc/mv_udc_core.c 	req->dtd_count = 0;
req              1472 drivers/usb/gadget/udc/mv_udc_core.c 	if (req->req.dma == DMA_ADDR_INVALID) {
req              1473 drivers/usb/gadget/udc/mv_udc_core.c 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
req              1474 drivers/usb/gadget/udc/mv_udc_core.c 				req->req.buf, req->req.length,
req              1476 drivers/usb/gadget/udc/mv_udc_core.c 		req->mapped = 1;
req              1480 drivers/usb/gadget/udc/mv_udc_core.c 	if (!req_to_dtd(req)) {
req              1481 drivers/usb/gadget/udc/mv_udc_core.c 		retval = queue_dtd(ep, req);
req              1494 drivers/usb/gadget/udc/mv_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
req              1498 drivers/usb/gadget/udc/mv_udc_core.c 	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
req              1722 drivers/usb/gadget/udc/mv_udc_core.c 	struct mv_ep *ep0, struct mv_req *req)
req              1733 drivers/usb/gadget/udc/mv_udc_core.c 	done(ep0, req, 0);
req              1843 drivers/usb/gadget/udc/mv_udc_core.c 			curr_req->req.status = status;
req              2234 drivers/usb/gadget/udc/mv_udc_core.c 	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
req              2235 drivers/usb/gadget/udc/mv_udc_core.c 	udc->status_req->req.dma = DMA_ADDR_INVALID;
req               319 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req               324 drivers/usb/gadget/udc/net2272.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               325 drivers/usb/gadget/udc/net2272.c 	if (!req)
req               328 drivers/usb/gadget/udc/net2272.c 	INIT_LIST_HEAD(&req->queue);
req               330 drivers/usb/gadget/udc/net2272.c 	return &req->req;
req               336 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req               341 drivers/usb/gadget/udc/net2272.c 	req = container_of(_req, struct net2272_request, req);
req               342 drivers/usb/gadget/udc/net2272.c 	WARN_ON(!list_empty(&req->queue));
req               343 drivers/usb/gadget/udc/net2272.c 	kfree(req);
req               347 drivers/usb/gadget/udc/net2272.c net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
req               360 drivers/usb/gadget/udc/net2272.c 	list_del_init(&req->queue);
req               362 drivers/usb/gadget/udc/net2272.c 	if (req->req.status == -EINPROGRESS)
req               363 drivers/usb/gadget/udc/net2272.c 		req->req.status = status;
req               365 drivers/usb/gadget/udc/net2272.c 		status = req->req.status;
req               369 drivers/usb/gadget/udc/net2272.c 		usb_gadget_unmap_request(&dev->gadget, &req->req,
req               374 drivers/usb/gadget/udc/net2272.c 			ep->ep.name, &req->req, status,
req               375 drivers/usb/gadget/udc/net2272.c 			req->req.actual, req->req.length, req->req.buf);
req               380 drivers/usb/gadget/udc/net2272.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               387 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req, unsigned max)
req               394 drivers/usb/gadget/udc/net2272.c 	length = min(req->req.length - req->req.actual, max);
req               395 drivers/usb/gadget/udc/net2272.c 	req->req.actual += length;
req               398 drivers/usb/gadget/udc/net2272.c 		ep->ep.name, req, max, length,
req               423 drivers/usb/gadget/udc/net2272.c net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
req               430 drivers/usb/gadget/udc/net2272.c 		ep->ep.name, req->req.actual, req->req.length);
req               444 drivers/usb/gadget/udc/net2272.c 		buf = req->req.buf + req->req.actual;
req               457 drivers/usb/gadget/udc/net2272.c 		count = net2272_write_packet(ep, buf, req, max);
req               459 drivers/usb/gadget/udc/net2272.c 		if (req->req.length == req->req.actual) {
req               463 drivers/usb/gadget/udc/net2272.c 			net2272_done(ep, req, 0);
req               466 drivers/usb/gadget/udc/net2272.c 				req = list_entry(ep->queue.next,
req               469 drivers/usb/gadget/udc/net2272.c 				status = net2272_kick_dma(ep, req);
req               495 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req, unsigned avail)
req               501 drivers/usb/gadget/udc/net2272.c 	req->req.actual += avail;
req               504 drivers/usb/gadget/udc/net2272.c 		ep->ep.name, req, avail,
req               536 drivers/usb/gadget/udc/net2272.c net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
req               546 drivers/usb/gadget/udc/net2272.c 		ep->ep.name, req->req.actual, req->req.length);
req               550 drivers/usb/gadget/udc/net2272.c 		buf = req->req.buf + req->req.actual;
req               560 drivers/usb/gadget/udc/net2272.c 		tmp = req->req.length - req->req.actual;
req               572 drivers/usb/gadget/udc/net2272.c 		is_short = net2272_read_packet(ep, buf, req, count);
req               576 drivers/usb/gadget/udc/net2272.c 				req->req.actual == req->req.length)) {
req               580 drivers/usb/gadget/udc/net2272.c 				net2272_done(ep, req, -EOVERFLOW);
req               582 drivers/usb/gadget/udc/net2272.c 				net2272_done(ep, req, 0);
req               595 drivers/usb/gadget/udc/net2272.c 				req = list_entry(ep->queue.next,
req               597 drivers/usb/gadget/udc/net2272.c 				status = net2272_kick_dma(ep, req);
req               612 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req               617 drivers/usb/gadget/udc/net2272.c 	req = list_entry(ep->queue.next, struct net2272_request, queue);
req               618 drivers/usb/gadget/udc/net2272.c 	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
req               702 drivers/usb/gadget/udc/net2272.c net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
req               713 drivers/usb/gadget/udc/net2272.c 	if (req->req.length & 1)
req               717 drivers/usb/gadget/udc/net2272.c 		ep->ep.name, req, (unsigned long long) req->req.dma);
req               728 drivers/usb/gadget/udc/net2272.c 	size = req->req.length;
req               734 drivers/usb/gadget/udc/net2272.c 		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
req               737 drivers/usb/gadget/udc/net2272.c 		req->req.actual += size;
req               744 drivers/usb/gadget/udc/net2272.c 		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
req               810 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req               817 drivers/usb/gadget/udc/net2272.c 	req = container_of(_req, struct net2272_request, req);
req               819 drivers/usb/gadget/udc/net2272.c 			|| !list_empty(&req->queue))
req               849 drivers/usb/gadget/udc/net2272.c 			net2272_done(ep, req, 0);
req               865 drivers/usb/gadget/udc/net2272.c 				status = net2272_read_fifo(ep, req);
req               873 drivers/usb/gadget/udc/net2272.c 		status = net2272_kick_dma(ep, req);
req               882 drivers/usb/gadget/udc/net2272.c 				status = net2272_write_fifo(ep, req);
req               886 drivers/usb/gadget/udc/net2272.c 					status = net2272_read_fifo(ep, req);
req               892 drivers/usb/gadget/udc/net2272.c 				req = NULL;
req               896 drivers/usb/gadget/udc/net2272.c 	if (likely(req))
req               897 drivers/usb/gadget/udc/net2272.c 		list_add_tail(&req->queue, &ep->queue);
req               911 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req               917 drivers/usb/gadget/udc/net2272.c 		req = list_entry(ep->queue.next,
req               920 drivers/usb/gadget/udc/net2272.c 		net2272_done(ep, req, -ESHUTDOWN);
req               929 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req               942 drivers/usb/gadget/udc/net2272.c 	list_for_each_entry(req, &ep->queue, queue) {
req               943 drivers/usb/gadget/udc/net2272.c 		if (&req->req == _req)
req               946 drivers/usb/gadget/udc/net2272.c 	if (&req->req != _req) {
req               953 drivers/usb/gadget/udc/net2272.c 	if (ep->queue.next == &req->queue) {
req               955 drivers/usb/gadget/udc/net2272.c 		net2272_done(ep, req, -ECONNRESET);
req               957 drivers/usb/gadget/udc/net2272.c 	req = NULL;
req              1509 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req              1514 drivers/usb/gadget/udc/net2272.c 		req = list_entry(ep->queue.next,
req              1517 drivers/usb/gadget/udc/net2272.c 		req = NULL;
req              1519 drivers/usb/gadget/udc/net2272.c 	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
req              1541 drivers/usb/gadget/udc/net2272.c 		if ((req->req.length % ep->ep.maxpacket != 0) ||
req              1542 drivers/usb/gadget/udc/net2272.c 				req->req.zero)
req              1545 drivers/usb/gadget/udc/net2272.c 		net2272_done(ep, req, 0);
req              1547 drivers/usb/gadget/udc/net2272.c 			req = list_entry(ep->queue.next,
req              1549 drivers/usb/gadget/udc/net2272.c 			status = net2272_kick_dma(ep, req);
req              1575 drivers/usb/gadget/udc/net2272.c 		req->req.actual += len;
req              1587 drivers/usb/gadget/udc/net2272.c 	struct net2272_request *req;
req              1591 drivers/usb/gadget/udc/net2272.c 		req = list_entry(ep->queue.next,
req              1594 drivers/usb/gadget/udc/net2272.c 		req = NULL;
req              1602 drivers/usb/gadget/udc/net2272.c 		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
req              1717 drivers/usb/gadget/udc/net2272.c 		struct net2272_request *req;
req              1734 drivers/usb/gadget/udc/net2272.c 			req = list_entry(ep->queue.next,
req              1736 drivers/usb/gadget/udc/net2272.c 			net2272_done(ep, req,
req              1737 drivers/usb/gadget/udc/net2272.c 				(req->req.actual == req->req.length) ? 0 : -EPROTO);
req               581 drivers/usb/gadget/udc/net2272.h 	struct usb_request req;
req               550 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req               558 drivers/usb/gadget/udc/net2280.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               559 drivers/usb/gadget/udc/net2280.c 	if (!req)
req               562 drivers/usb/gadget/udc/net2280.c 	INIT_LIST_HEAD(&req->queue);
req               569 drivers/usb/gadget/udc/net2280.c 				&req->td_dma);
req               571 drivers/usb/gadget/udc/net2280.c 			kfree(req);
req               576 drivers/usb/gadget/udc/net2280.c 		req->td = td;
req               578 drivers/usb/gadget/udc/net2280.c 	return &req->req;
req               584 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req               593 drivers/usb/gadget/udc/net2280.c 	req = container_of(_req, struct net2280_request, req);
req               594 drivers/usb/gadget/udc/net2280.c 	WARN_ON(!list_empty(&req->queue));
req               595 drivers/usb/gadget/udc/net2280.c 	if (req->td)
req               596 drivers/usb/gadget/udc/net2280.c 		dma_pool_free(ep->dev->requests, req->td, req->td_dma);
req               597 drivers/usb/gadget/udc/net2280.c 	kfree(req);
req               609 drivers/usb/gadget/udc/net2280.c static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
req               618 drivers/usb/gadget/udc/net2280.c 	if (req) {
req               619 drivers/usb/gadget/udc/net2280.c 		buf = req->buf + req->actual;
req               621 drivers/usb/gadget/udc/net2280.c 		total = req->length - req->actual;
req               635 drivers/usb/gadget/udc/net2280.c 			req);
req               709 drivers/usb/gadget/udc/net2280.c static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
req               712 drivers/usb/gadget/udc/net2280.c 	u8			*buf = req->req.buf + req->req.actual;
req               746 drivers/usb/gadget/udc/net2280.c 	tmp = req->req.length - req->req.actual;
req               753 drivers/usb/gadget/udc/net2280.c 			req->req.status = -EOVERFLOW;
req               761 drivers/usb/gadget/udc/net2280.c 	req->req.actual += count;
req               768 drivers/usb/gadget/udc/net2280.c 			req, req->req.actual, req->req.length);
req               792 drivers/usb/gadget/udc/net2280.c 	return is_short || req->req.actual == req->req.length;
req               797 drivers/usb/gadget/udc/net2280.c 					struct net2280_request *req, int valid)
req               799 drivers/usb/gadget/udc/net2280.c 	struct net2280_dma	*td = req->td;
req               800 drivers/usb/gadget/udc/net2280.c 	u32			dmacount = req->req.length;
req               813 drivers/usb/gadget/udc/net2280.c 	req->valid = valid;
req               819 drivers/usb/gadget/udc/net2280.c 	td->dmaaddr = cpu_to_le32 (req->req.dma);
req               870 drivers/usb/gadget/udc/net2280.c static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
req               892 drivers/usb/gadget/udc/net2280.c 			writel(req->req.dma, &dma->dmaaddr);
req               893 drivers/usb/gadget/udc/net2280.c 			tmp = min(tmp, req->req.length);
req               896 drivers/usb/gadget/udc/net2280.c 			req->td->dmacount = cpu_to_le32(req->req.length - tmp);
req               899 drivers/usb/gadget/udc/net2280.c 			req->td->dmadesc = 0;
req               900 drivers/usb/gadget/udc/net2280.c 			req->valid = 1;
req               916 drivers/usb/gadget/udc/net2280.c 		if (likely((req->req.length % ep->ep.maxpacket) ||
req               917 drivers/usb/gadget/udc/net2280.c 							req->req.zero)){
req               925 drivers/usb/gadget/udc/net2280.c 	req->td->dmadesc = cpu_to_le32 (ep->td_dma);
req               926 drivers/usb/gadget/udc/net2280.c 	fill_dma_desc(ep, req, 1);
req               928 drivers/usb/gadget/udc/net2280.c 	req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
req               930 drivers/usb/gadget/udc/net2280.c 	start_queue(ep, tmp, req->td_dma);
req               934 drivers/usb/gadget/udc/net2280.c queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
req               941 drivers/usb/gadget/udc/net2280.c 	ep->dummy = req->td;
req               942 drivers/usb/gadget/udc/net2280.c 	req->td = end;
req               945 drivers/usb/gadget/udc/net2280.c 	ep->td_dma = req->td_dma;
req               946 drivers/usb/gadget/udc/net2280.c 	req->td_dma = tmp;
req               950 drivers/usb/gadget/udc/net2280.c 	fill_dma_desc(ep, req, valid);
req               954 drivers/usb/gadget/udc/net2280.c done(struct net2280_ep *ep, struct net2280_request *req, int status)
req               959 drivers/usb/gadget/udc/net2280.c 	list_del_init(&req->queue);
req               961 drivers/usb/gadget/udc/net2280.c 	if (req->req.status == -EINPROGRESS)
req               962 drivers/usb/gadget/udc/net2280.c 		req->req.status = status;
req               964 drivers/usb/gadget/udc/net2280.c 		status = req->req.status;
req               968 drivers/usb/gadget/udc/net2280.c 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
req               972 drivers/usb/gadget/udc/net2280.c 			ep->ep.name, &req->req, status,
req               973 drivers/usb/gadget/udc/net2280.c 			req->req.actual, req->req.length);
req               978 drivers/usb/gadget/udc/net2280.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               988 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req              1002 drivers/usb/gadget/udc/net2280.c 	req = container_of(_req, struct net2280_request, req);
req              1004 drivers/usb/gadget/udc/net2280.c 				!list_empty(&req->queue)) {
req              1047 drivers/usb/gadget/udc/net2280.c 			start_dma(ep, req);
req              1052 drivers/usb/gadget/udc/net2280.c 				done(ep, req, 0);
req              1072 drivers/usb/gadget/udc/net2280.c 					if (read_fifo(ep, req) &&
req              1074 drivers/usb/gadget/udc/net2280.c 						done(ep, req, 0);
req              1077 drivers/usb/gadget/udc/net2280.c 						req = NULL;
req              1078 drivers/usb/gadget/udc/net2280.c 					} else if (read_fifo(ep, req) &&
req              1080 drivers/usb/gadget/udc/net2280.c 						done(ep, req, 0);
req              1081 drivers/usb/gadget/udc/net2280.c 						req = NULL;
req              1087 drivers/usb/gadget/udc/net2280.c 				if (req && (s & BIT(NAK_OUT_PACKETS)))
req              1102 drivers/usb/gadget/udc/net2280.c 			expect = likely(req->req.zero ||
req              1103 drivers/usb/gadget/udc/net2280.c 				(req->req.length % ep->ep.maxpacket));
req              1107 drivers/usb/gadget/udc/net2280.c 		queue_dma(ep, req, valid);
req              1112 drivers/usb/gadget/udc/net2280.c 	if (req)
req              1113 drivers/usb/gadget/udc/net2280.c 		list_add_tail(&req->queue, &ep->queue);
req              1126 drivers/usb/gadget/udc/net2280.c dma_done(struct net2280_ep *ep,	struct net2280_request *req, u32 dmacount,
req              1129 drivers/usb/gadget/udc/net2280.c 	req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
req              1130 drivers/usb/gadget/udc/net2280.c 	done(ep, req, status);
req              1141 drivers/usb/gadget/udc/net2280.c 		struct net2280_request	*req;
req              1144 drivers/usb/gadget/udc/net2280.c 		req = list_entry(ep->queue.next,
req              1146 drivers/usb/gadget/udc/net2280.c 		if (!req->valid)
req              1149 drivers/usb/gadget/udc/net2280.c 		req_dma_count = le32_to_cpup(&req->td->dmacount);
req              1157 drivers/usb/gadget/udc/net2280.c 		if (unlikely(req->td->dmadesc == 0)) {
req              1164 drivers/usb/gadget/udc/net2280.c 			dma_done(ep, req, req_dma_count, 0);
req              1168 drivers/usb/gadget/udc/net2280.c 			   (req->req.length % ep->ep.maxpacket) &&
req              1179 drivers/usb/gadget/udc/net2280.c 				req->req.status = -EOVERFLOW;
req              1188 drivers/usb/gadget/udc/net2280.c 						req->req.length);
req              1189 drivers/usb/gadget/udc/net2280.c 					req->req.status = -EOVERFLOW;
req              1193 drivers/usb/gadget/udc/net2280.c 		dma_done(ep, req, req_dma_count, 0);
req              1202 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req              1206 drivers/usb/gadget/udc/net2280.c 	req = list_entry(ep->queue.next, struct net2280_request, queue);
req              1208 drivers/usb/gadget/udc/net2280.c 	start_dma(ep, req);
req              1226 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req              1233 drivers/usb/gadget/udc/net2280.c 		req = list_entry(ep->queue.next,
req              1236 drivers/usb/gadget/udc/net2280.c 		done(ep, req, -ESHUTDOWN);
req              1244 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req              1270 drivers/usb/gadget/udc/net2280.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1271 drivers/usb/gadget/udc/net2280.c 		if (&req->req == _req)
req              1274 drivers/usb/gadget/udc/net2280.c 	if (&req->req != _req) {
req              1282 drivers/usb/gadget/udc/net2280.c 	if (ep->queue.next == &req->queue) {
req              1287 drivers/usb/gadget/udc/net2280.c 			if (likely(ep->queue.next == &req->queue)) {
req              1289 drivers/usb/gadget/udc/net2280.c 				req->td->dmacount = 0;	/* invalidate */
req              1290 drivers/usb/gadget/udc/net2280.c 				dma_done(ep, req,
req              1296 drivers/usb/gadget/udc/net2280.c 			done(ep, req, -ECONNRESET);
req              1298 drivers/usb/gadget/udc/net2280.c 		req = NULL;
req              1301 drivers/usb/gadget/udc/net2280.c 	if (req)
req              1302 drivers/usb/gadget/udc/net2280.c 		done(ep, req, -ECONNRESET);
req              1311 drivers/usb/gadget/udc/net2280.c 			if (req)
req              1821 drivers/usb/gadget/udc/net2280.c 		struct net2280_request		*req;
req              1855 drivers/usb/gadget/udc/net2280.c 		list_for_each_entry(req, &ep->queue, queue) {
req              1856 drivers/usb/gadget/udc/net2280.c 			if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
req              1860 drivers/usb/gadget/udc/net2280.c 					&req->req, req->req.actual,
req              1861 drivers/usb/gadget/udc/net2280.c 					req->req.length, req->req.buf,
req              1866 drivers/usb/gadget/udc/net2280.c 					&req->req, req->req.actual,
req              1867 drivers/usb/gadget/udc/net2280.c 					req->req.length, req->req.buf);
req              1876 drivers/usb/gadget/udc/net2280.c 				td = req->td;
req              1879 drivers/usb/gadget/udc/net2280.c 					(u32) req->td_dma,
req              2514 drivers/usb/gadget/udc/net2280.c 	struct net2280_request	*req;
req              2520 drivers/usb/gadget/udc/net2280.c 		req = list_entry(ep->queue.next,
req              2523 drivers/usb/gadget/udc/net2280.c 		req = NULL;
req              2530 drivers/usb/gadget/udc/net2280.c 			ep->ep.name, t, req ? &req->req : NULL);
req              2556 drivers/usb/gadget/udc/net2280.c 				if (!req)
req              2566 drivers/usb/gadget/udc/net2280.c 						!req && !ep->stopped)
req              2579 drivers/usb/gadget/udc/net2280.c 					req &&
req              2580 drivers/usb/gadget/udc/net2280.c 					req->req.actual == req->req.length) ||
req              2581 drivers/usb/gadget/udc/net2280.c 					(ep->responded && !req)) {
req              2585 drivers/usb/gadget/udc/net2280.c 				if (req)
req              2586 drivers/usb/gadget/udc/net2280.c 					done(ep, req, -EOVERFLOW);
req              2587 drivers/usb/gadget/udc/net2280.c 				req = NULL;
req              2592 drivers/usb/gadget/udc/net2280.c 	if (unlikely(!req))
req              2617 drivers/usb/gadget/udc/net2280.c 					req = NULL;
req              2620 drivers/usb/gadget/udc/net2280.c 				req = list_entry(ep->queue.next,
req              2630 drivers/usb/gadget/udc/net2280.c 							!= req->td_dma)
req              2631 drivers/usb/gadget/udc/net2280.c 						req = NULL;
req              2639 drivers/usb/gadget/udc/net2280.c 					if (stuck_req == req &&
req              2641 drivers/usb/gadget/udc/net2280.c 						  req->td_dma && stuck++ > 5) {
req              2645 drivers/usb/gadget/udc/net2280.c 						req = NULL;
req              2650 drivers/usb/gadget/udc/net2280.c 					} else if (stuck_req != req) {
req              2651 drivers/usb/gadget/udc/net2280.c 						stuck_req = req;
req              2666 drivers/usb/gadget/udc/net2280.c 			if (likely(req)) {
req              2667 drivers/usb/gadget/udc/net2280.c 				req->td->dmacount = 0;
req              2669 drivers/usb/gadget/udc/net2280.c 				dma_done(ep, req, count,
req              2694 drivers/usb/gadget/udc/net2280.c 		if (read_fifo(ep, req) && ep->num != 0)
req              2701 drivers/usb/gadget/udc/net2280.c 		len = req->req.length - req->req.actual;
req              2704 drivers/usb/gadget/udc/net2280.c 		req->req.actual += len;
req              2708 drivers/usb/gadget/udc/net2280.c 		if ((req->req.actual == req->req.length) &&
req              2709 drivers/usb/gadget/udc/net2280.c 			(!req->req.zero || len != ep->ep.maxpacket) && ep->num)
req              2719 drivers/usb/gadget/udc/net2280.c 		done(ep, req, 0);
req              2729 drivers/usb/gadget/udc/net2280.c 			req = NULL;
req              2732 drivers/usb/gadget/udc/net2280.c 				req = list_entry(ep->queue.next,
req              2735 drivers/usb/gadget/udc/net2280.c 				req = NULL;
req              2736 drivers/usb/gadget/udc/net2280.c 			if (req && !ep->is_in)
req              2744 drivers/usb/gadget/udc/net2280.c 	if (req && !ep->stopped) {
req              2748 drivers/usb/gadget/udc/net2280.c 			write_fifo(ep, &req->req);
req              3107 drivers/usb/gadget/udc/net2280.c 		struct net2280_request		*req;
req              3135 drivers/usb/gadget/udc/net2280.c 			req = list_entry(ep->queue.next,
req              3137 drivers/usb/gadget/udc/net2280.c 			done(ep, req, (req->req.actual == req->req.length)
req               140 drivers/usb/gadget/udc/net2280.h 	struct usb_request		req;
req               266 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req               268 drivers/usb/gadget/udc/omap_udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               269 drivers/usb/gadget/udc/omap_udc.c 	if (!req)
req               272 drivers/usb/gadget/udc/omap_udc.c 	INIT_LIST_HEAD(&req->queue);
req               274 drivers/usb/gadget/udc/omap_udc.c 	return &req->req;
req               280 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req = container_of(_req, struct omap_req, req);
req               282 drivers/usb/gadget/udc/omap_udc.c 	kfree(req);
req               288 drivers/usb/gadget/udc/omap_udc.c done(struct omap_ep *ep, struct omap_req *req, int status)
req               293 drivers/usb/gadget/udc/omap_udc.c 	list_del_init(&req->queue);
req               295 drivers/usb/gadget/udc/omap_udc.c 	if (req->req.status == -EINPROGRESS)
req               296 drivers/usb/gadget/udc/omap_udc.c 		req->req.status = status;
req               298 drivers/usb/gadget/udc/omap_udc.c 		status = req->req.status;
req               301 drivers/usb/gadget/udc/omap_udc.c 		usb_gadget_unmap_request(&udc->gadget, &req->req,
req               308 drivers/usb/gadget/udc/omap_udc.c 			ep->ep.name, &req->req, status,
req               309 drivers/usb/gadget/udc/omap_udc.c 			req->req.actual, req->req.length);
req               314 drivers/usb/gadget/udc/omap_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               328 drivers/usb/gadget/udc/omap_udc.c write_packet(u8 *buf, struct omap_req *req, unsigned max)
req               333 drivers/usb/gadget/udc/omap_udc.c 	len = min(req->req.length - req->req.actual, max);
req               334 drivers/usb/gadget/udc/omap_udc.c 	req->req.actual += len;
req               354 drivers/usb/gadget/udc/omap_udc.c static int write_fifo(struct omap_ep *ep, struct omap_req *req)
req               361 drivers/usb/gadget/udc/omap_udc.c 	buf = req->req.buf + req->req.actual;
req               370 drivers/usb/gadget/udc/omap_udc.c 	count = write_packet(buf, req, count);
req               377 drivers/usb/gadget/udc/omap_udc.c 	else if (req->req.length == req->req.actual
req               378 drivers/usb/gadget/udc/omap_udc.c 			&& !req->req.zero)
req               388 drivers/usb/gadget/udc/omap_udc.c 		done(ep, req, 0);
req               393 drivers/usb/gadget/udc/omap_udc.c read_packet(u8 *buf, struct omap_req *req, unsigned avail)
req               398 drivers/usb/gadget/udc/omap_udc.c 	len = min(req->req.length - req->req.actual, avail);
req               399 drivers/usb/gadget/udc/omap_udc.c 	req->req.actual += len;
req               416 drivers/usb/gadget/udc/omap_udc.c static int read_fifo(struct omap_ep *ep, struct omap_req *req)
req               422 drivers/usb/gadget/udc/omap_udc.c 	buf = req->req.buf + req->req.actual;
req               443 drivers/usb/gadget/udc/omap_udc.c 		count = read_packet(buf, req, avail);
req               450 drivers/usb/gadget/udc/omap_udc.c 				req->req.status = -EOVERFLOW;
req               455 drivers/usb/gadget/udc/omap_udc.c 		} else if (req->req.length == req->req.actual)
req               463 drivers/usb/gadget/udc/omap_udc.c 			done(ep, req, 0);
req               513 drivers/usb/gadget/udc/omap_udc.c static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
req               516 drivers/usb/gadget/udc/omap_udc.c 	unsigned	length = req->req.length - req->req.actual;
req               538 drivers/usb/gadget/udc/omap_udc.c 		OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
req               547 drivers/usb/gadget/udc/omap_udc.c 	req->dma_bytes = length;
req               550 drivers/usb/gadget/udc/omap_udc.c static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
req               555 drivers/usb/gadget/udc/omap_udc.c 		req->req.actual += req->dma_bytes;
req               558 drivers/usb/gadget/udc/omap_udc.c 		if (req->req.actual < req->req.length)
req               560 drivers/usb/gadget/udc/omap_udc.c 		if (req->req.zero
req               561 drivers/usb/gadget/udc/omap_udc.c 				&& req->dma_bytes != 0
req               562 drivers/usb/gadget/udc/omap_udc.c 				&& (req->req.actual % ep->maxpacket) == 0)
req               565 drivers/usb/gadget/udc/omap_udc.c 		req->req.actual += dma_src_len(ep, req->req.dma
req               566 drivers/usb/gadget/udc/omap_udc.c 							+ req->req.actual);
req               573 drivers/usb/gadget/udc/omap_udc.c 	done(ep, req, status);
req               576 drivers/usb/gadget/udc/omap_udc.c static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
req               578 drivers/usb/gadget/udc/omap_udc.c 	unsigned packets = req->req.length - req->req.actual;
req               585 drivers/usb/gadget/udc/omap_udc.c 	req->dma_bytes = packets * ep->ep.maxpacket;
req               591 drivers/usb/gadget/udc/omap_udc.c 		OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
req               606 drivers/usb/gadget/udc/omap_udc.c finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
req               611 drivers/usb/gadget/udc/omap_udc.c 		ep->dma_counter = (u16) (req->req.dma + req->req.actual);
req               612 drivers/usb/gadget/udc/omap_udc.c 	count = dma_dest_len(ep, req->req.dma + req->req.actual);
req               613 drivers/usb/gadget/udc/omap_udc.c 	count += req->req.actual;
req               616 drivers/usb/gadget/udc/omap_udc.c 	if (count <= req->req.length)
req               617 drivers/usb/gadget/udc/omap_udc.c 		req->req.actual = count;
req               619 drivers/usb/gadget/udc/omap_udc.c 	if (count != req->dma_bytes || status)
req               623 drivers/usb/gadget/udc/omap_udc.c 	else if (req->req.actual < req->req.length)
req               630 drivers/usb/gadget/udc/omap_udc.c 	done(ep, req, status);
req               637 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req               645 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
req               647 drivers/usb/gadget/udc/omap_udc.c 			finish_in_dma(ep, req, 0);
req               652 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
req               654 drivers/usb/gadget/udc/omap_udc.c 			next_in_dma(ep, req);
req               664 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
req               666 drivers/usb/gadget/udc/omap_udc.c 			finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
req               671 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
req               673 drivers/usb/gadget/udc/omap_udc.c 			next_out_dma(ep, req);
req               787 drivers/usb/gadget/udc/omap_udc.c 		struct omap_req	*req;
req               788 drivers/usb/gadget/udc/omap_udc.c 		req = container_of(ep->queue.next, struct omap_req, queue);
req               790 drivers/usb/gadget/udc/omap_udc.c 			(is_in ? next_in_dma : next_out_dma)(ep, req);
req               793 drivers/usb/gadget/udc/omap_udc.c 			(is_in ? write_fifo : read_fifo)(ep, req);
req               808 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req               813 drivers/usb/gadget/udc/omap_udc.c 		req = container_of(ep->queue.next, struct omap_req, queue);
req               815 drivers/usb/gadget/udc/omap_udc.c 		req = NULL;
req               822 drivers/usb/gadget/udc/omap_udc.c 			ep->dma_channel - 1, req);
req               833 drivers/usb/gadget/udc/omap_udc.c 		if (req) {
req               834 drivers/usb/gadget/udc/omap_udc.c 			finish_in_dma(ep, req, -ECONNRESET);
req               850 drivers/usb/gadget/udc/omap_udc.c 		if (req)
req               851 drivers/usb/gadget/udc/omap_udc.c 			finish_out_dma(ep, req, -ECONNRESET, 0);
req               866 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req = container_of(_req, struct omap_req, req);
req               872 drivers/usb/gadget/udc/omap_udc.c 	if (!_req || !req->req.complete || !req->req.buf
req               873 drivers/usb/gadget/udc/omap_udc.c 			|| !list_empty(&req->queue)) {
req               882 drivers/usb/gadget/udc/omap_udc.c 		if (req->req.length > ep->ep.maxpacket)
req               894 drivers/usb/gadget/udc/omap_udc.c 			&& (req->req.length % ep->ep.maxpacket) != 0) {
req               904 drivers/usb/gadget/udc/omap_udc.c 		usb_gadget_map_request(&udc->gadget, &req->req,
req               912 drivers/usb/gadget/udc/omap_udc.c 	req->req.status = -EINPROGRESS;
req               913 drivers/usb/gadget/udc/omap_udc.c 	req->req.actual = 0;
req               933 drivers/usb/gadget/udc/omap_udc.c 			if (!req->req.length) {
req               961 drivers/usb/gadget/udc/omap_udc.c 				done(ep, req, 0);
req               962 drivers/usb/gadget/udc/omap_udc.c 				req = NULL;
req               981 drivers/usb/gadget/udc/omap_udc.c 			(is_in ? next_in_dma : next_out_dma)(ep, req);
req               982 drivers/usb/gadget/udc/omap_udc.c 		else if (req) {
req               983 drivers/usb/gadget/udc/omap_udc.c 			if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
req               984 drivers/usb/gadget/udc/omap_udc.c 				req = NULL;
req               996 drivers/usb/gadget/udc/omap_udc.c 	if (req != NULL)
req               997 drivers/usb/gadget/udc/omap_udc.c 		list_add_tail(&req->queue, &ep->queue);
req              1006 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req              1015 drivers/usb/gadget/udc/omap_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1016 drivers/usb/gadget/udc/omap_udc.c 		if (&req->req == _req)
req              1019 drivers/usb/gadget/udc/omap_udc.c 	if (&req->req != _req) {
req              1024 drivers/usb/gadget/udc/omap_udc.c 	if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
req              1033 drivers/usb/gadget/udc/omap_udc.c 		done(ep, req, -ECONNRESET);
req              1329 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req              1342 drivers/usb/gadget/udc/omap_udc.c 		req = list_entry(ep->queue.next, struct omap_req, queue);
req              1343 drivers/usb/gadget/udc/omap_udc.c 		done(ep, req, status);
req              1392 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req = NULL;
req              1418 drivers/usb/gadget/udc/omap_udc.c 		req = container_of(ep0->queue.next, struct omap_req, queue);
req              1432 drivers/usb/gadget/udc/omap_udc.c 				if (req)
req              1433 drivers/usb/gadget/udc/omap_udc.c 					stat = write_fifo(ep0, req);
req              1435 drivers/usb/gadget/udc/omap_udc.c 				if (!req && udc->ep0_pending) {
req              1445 drivers/usb/gadget/udc/omap_udc.c 				if (req)
req              1446 drivers/usb/gadget/udc/omap_udc.c 					done(ep0, req, 0);
req              1448 drivers/usb/gadget/udc/omap_udc.c 			req = NULL;
req              1470 drivers/usb/gadget/udc/omap_udc.c 				stat = read_fifo(ep0, req);
req              1471 drivers/usb/gadget/udc/omap_udc.c 				if (!req || stat < 0) {
req              1481 drivers/usb/gadget/udc/omap_udc.c 					done(ep0, req, 0);
req              1493 drivers/usb/gadget/udc/omap_udc.c 				if (req)
req              1494 drivers/usb/gadget/udc/omap_udc.c 					done(ep0, req, 0);
req              1870 drivers/usb/gadget/udc/omap_udc.c 			struct omap_req	*req;
req              1873 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
req              1875 drivers/usb/gadget/udc/omap_udc.c 			(void) read_fifo(ep, req);
req              1893 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req              1914 drivers/usb/gadget/udc/omap_udc.c 				req = container_of(ep->queue.next,
req              1916 drivers/usb/gadget/udc/omap_udc.c 				stat = read_fifo(ep, req);
req              1948 drivers/usb/gadget/udc/omap_udc.c 				req = container_of(ep->queue.next,
req              1950 drivers/usb/gadget/udc/omap_udc.c 				(void) write_fifo(ep, req);
req              1977 drivers/usb/gadget/udc/omap_udc.c 		struct omap_req	*req;
req              1981 drivers/usb/gadget/udc/omap_udc.c 		req = list_entry(ep->queue.next, struct omap_req, queue);
req              1993 drivers/usb/gadget/udc/omap_udc.c 				write_fifo(ep, req);
req              2007 drivers/usb/gadget/udc/omap_udc.c 				read_fifo(ep, req);
req              2144 drivers/usb/gadget/udc/omap_udc.c 	struct omap_req	*req;
req              2194 drivers/usb/gadget/udc/omap_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
req              2195 drivers/usb/gadget/udc/omap_udc.c 			unsigned	length = req->req.actual;
req              2200 drivers/usb/gadget/udc/omap_udc.c 					(ep, req->req.dma + length);
req              2204 drivers/usb/gadget/udc/omap_udc.c 					&req->req, length,
req              2205 drivers/usb/gadget/udc/omap_udc.c 					req->req.length, req->req.buf);
req               133 drivers/usb/gadget/udc/omap_udc.h 	struct usb_request		req;
req               395 drivers/usb/gadget/udc/pch_udc.c 	struct usb_request		req;
req              1431 drivers/usb/gadget/udc/pch_udc.c static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
req              1439 drivers/usb/gadget/udc/pch_udc.c 	list_del_init(&req->queue);
req              1442 drivers/usb/gadget/udc/pch_udc.c 	if (req->req.status == -EINPROGRESS)
req              1443 drivers/usb/gadget/udc/pch_udc.c 		req->req.status = status;
req              1445 drivers/usb/gadget/udc/pch_udc.c 		status = req->req.status;
req              1448 drivers/usb/gadget/udc/pch_udc.c 	if (req->dma_mapped) {
req              1449 drivers/usb/gadget/udc/pch_udc.c 		if (req->dma == DMA_ADDR_INVALID) {
req              1451 drivers/usb/gadget/udc/pch_udc.c 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
req              1452 drivers/usb/gadget/udc/pch_udc.c 						 req->req.length,
req              1455 drivers/usb/gadget/udc/pch_udc.c 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
req              1456 drivers/usb/gadget/udc/pch_udc.c 						 req->req.length,
req              1458 drivers/usb/gadget/udc/pch_udc.c 			req->req.dma = DMA_ADDR_INVALID;
req              1461 drivers/usb/gadget/udc/pch_udc.c 				dma_unmap_single(&dev->pdev->dev, req->dma,
req              1462 drivers/usb/gadget/udc/pch_udc.c 						 req->req.length,
req              1465 drivers/usb/gadget/udc/pch_udc.c 				dma_unmap_single(&dev->pdev->dev, req->dma,
req              1466 drivers/usb/gadget/udc/pch_udc.c 						 req->req.length,
req              1468 drivers/usb/gadget/udc/pch_udc.c 				memcpy(req->req.buf, req->buf, req->req.length);
req              1470 drivers/usb/gadget/udc/pch_udc.c 			kfree(req->buf);
req              1471 drivers/usb/gadget/udc/pch_udc.c 			req->dma = DMA_ADDR_INVALID;
req              1473 drivers/usb/gadget/udc/pch_udc.c 		req->dma_mapped = 0;
req              1479 drivers/usb/gadget/udc/pch_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req              1490 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request	*req;
req              1494 drivers/usb/gadget/udc/pch_udc.c 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
req              1495 drivers/usb/gadget/udc/pch_udc.c 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
req              1509 drivers/usb/gadget/udc/pch_udc.c 				   struct pch_udc_request *req)
req              1511 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_data_dma_desc *td = req->td_data;
req              1512 drivers/usb/gadget/udc/pch_udc.c 	unsigned i = req->chain_len;
req              1524 drivers/usb/gadget/udc/pch_udc.c 	req->chain_len = 1;
req              1540 drivers/usb/gadget/udc/pch_udc.c 				    struct pch_udc_request *req,
req              1544 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
req              1545 drivers/usb/gadget/udc/pch_udc.c 	unsigned long bytes = req->req.length, i = 0;
req              1549 drivers/usb/gadget/udc/pch_udc.c 	if (req->chain_len > 1)
req              1550 drivers/usb/gadget/udc/pch_udc.c 		pch_udc_free_dma_chain(ep->dev, req);
req              1552 drivers/usb/gadget/udc/pch_udc.c 	if (req->dma == DMA_ADDR_INVALID)
req              1553 drivers/usb/gadget/udc/pch_udc.c 		td->dataptr = req->req.dma;
req              1555 drivers/usb/gadget/udc/pch_udc.c 		td->dataptr = req->dma;
req              1568 drivers/usb/gadget/udc/pch_udc.c 		td->dataptr = req->td_data->dataptr + i;
req              1572 drivers/usb/gadget/udc/pch_udc.c 	req->td_data_last = td;
req              1574 drivers/usb/gadget/udc/pch_udc.c 	td->next = req->td_data_phys;
req              1575 drivers/usb/gadget/udc/pch_udc.c 	req->chain_len = len;
req              1580 drivers/usb/gadget/udc/pch_udc.c 		req->chain_len = len;
req              1581 drivers/usb/gadget/udc/pch_udc.c 		pch_udc_free_dma_chain(ep->dev, req);
req              1583 drivers/usb/gadget/udc/pch_udc.c 	req->chain_len = 1;
req              1598 drivers/usb/gadget/udc/pch_udc.c static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
req              1604 drivers/usb/gadget/udc/pch_udc.c 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
req              1610 drivers/usb/gadget/udc/pch_udc.c 		req->td_data->status = (req->td_data->status &
req              1621 drivers/usb/gadget/udc/pch_udc.c static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
req              1626 drivers/usb/gadget/udc/pch_udc.c 	complete_req(ep, req, 0);
req              1648 drivers/usb/gadget/udc/pch_udc.c 					 struct pch_udc_request *req)
req              1653 drivers/usb/gadget/udc/pch_udc.c 	td_data = req->td_data;
req              1663 drivers/usb/gadget/udc/pch_udc.c 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
req              1664 drivers/usb/gadget/udc/pch_udc.c 	req->dma_going = 1;
req              1752 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request		*req;
req              1759 drivers/usb/gadget/udc/pch_udc.c 	req = kzalloc(sizeof *req, gfp);
req              1760 drivers/usb/gadget/udc/pch_udc.c 	if (!req)
req              1762 drivers/usb/gadget/udc/pch_udc.c 	req->req.dma = DMA_ADDR_INVALID;
req              1763 drivers/usb/gadget/udc/pch_udc.c 	req->dma = DMA_ADDR_INVALID;
req              1764 drivers/usb/gadget/udc/pch_udc.c 	INIT_LIST_HEAD(&req->queue);
req              1766 drivers/usb/gadget/udc/pch_udc.c 		return &req->req;
req              1769 drivers/usb/gadget/udc/pch_udc.c 				  &req->td_data_phys);
req              1771 drivers/usb/gadget/udc/pch_udc.c 		kfree(req);
req              1777 drivers/usb/gadget/udc/pch_udc.c 	req->td_data = dma_desc;
req              1778 drivers/usb/gadget/udc/pch_udc.c 	req->td_data_last = dma_desc;
req              1779 drivers/usb/gadget/udc/pch_udc.c 	req->chain_len = 1;
req              1780 drivers/usb/gadget/udc/pch_udc.c 	return &req->req;
req              1793 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request	*req;
req              1799 drivers/usb/gadget/udc/pch_udc.c 	req = container_of(usbreq, struct pch_udc_request, req);
req              1801 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&req->queue))
req              1803 drivers/usb/gadget/udc/pch_udc.c 			__func__, usbep->name, req);
req              1804 drivers/usb/gadget/udc/pch_udc.c 	if (req->td_data != NULL) {
req              1805 drivers/usb/gadget/udc/pch_udc.c 		if (req->chain_len > 1)
req              1806 drivers/usb/gadget/udc/pch_udc.c 			pch_udc_free_dma_chain(ep->dev, req);
req              1807 drivers/usb/gadget/udc/pch_udc.c 		dma_pool_free(ep->dev->data_requests, req->td_data,
req              1808 drivers/usb/gadget/udc/pch_udc.c 			      req->td_data_phys);
req              1810 drivers/usb/gadget/udc/pch_udc.c 	kfree(req);
req              1830 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request	*req;
req              1839 drivers/usb/gadget/udc/pch_udc.c 	req = container_of(usbreq, struct pch_udc_request, req);
req              1840 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&req->queue))
req              1860 drivers/usb/gadget/udc/pch_udc.c 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
req              1861 drivers/usb/gadget/udc/pch_udc.c 			if (!req->buf) {
req              1866 drivers/usb/gadget/udc/pch_udc.c 				memcpy(req->buf, usbreq->buf, usbreq->length);
req              1867 drivers/usb/gadget/udc/pch_udc.c 				req->dma = dma_map_single(&dev->pdev->dev,
req              1868 drivers/usb/gadget/udc/pch_udc.c 							  req->buf,
req              1872 drivers/usb/gadget/udc/pch_udc.c 				req->dma = dma_map_single(&dev->pdev->dev,
req              1873 drivers/usb/gadget/udc/pch_udc.c 							  req->buf,
req              1877 drivers/usb/gadget/udc/pch_udc.c 		req->dma_mapped = 1;
req              1880 drivers/usb/gadget/udc/pch_udc.c 		retval = prepare_dma(ep, req, GFP_ATOMIC);
req              1886 drivers/usb/gadget/udc/pch_udc.c 	req->dma_done = 0;
req              1890 drivers/usb/gadget/udc/pch_udc.c 			process_zlp(ep, req);
req              1895 drivers/usb/gadget/udc/pch_udc.c 			pch_udc_start_rxrequest(ep, req);
req              1908 drivers/usb/gadget/udc/pch_udc.c 	if (req != NULL)
req              1909 drivers/usb/gadget/udc/pch_udc.c 		list_add_tail(&req->queue, &ep->queue);
req              1930 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request	*req;
req              1937 drivers/usb/gadget/udc/pch_udc.c 	req = container_of(usbreq, struct pch_udc_request, req);
req              1940 drivers/usb/gadget/udc/pch_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1941 drivers/usb/gadget/udc/pch_udc.c 		if (&req->req == usbreq) {
req              1943 drivers/usb/gadget/udc/pch_udc.c 			if (!list_empty(&req->queue))
req              1944 drivers/usb/gadget/udc/pch_udc.c 				complete_req(ep, req, -ECONNRESET);
req              2085 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request *req;
req              2095 drivers/usb/gadget/udc/pch_udc.c 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
req              2096 drivers/usb/gadget/udc/pch_udc.c 	if (req->dma_going)
req              2098 drivers/usb/gadget/udc/pch_udc.c 	if (!req->td_data)
req              2101 drivers/usb/gadget/udc/pch_udc.c 	req->dma_going = 1;
req              2103 drivers/usb/gadget/udc/pch_udc.c 	td_data = req->td_data;
req              2111 drivers/usb/gadget/udc/pch_udc.c 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
req              2124 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request *req;
req              2129 drivers/usb/gadget/udc/pch_udc.c 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
req              2130 drivers/usb/gadget/udc/pch_udc.c 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
req              2133 drivers/usb/gadget/udc/pch_udc.c 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
req              2137 drivers/usb/gadget/udc/pch_udc.c 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
req              2142 drivers/usb/gadget/udc/pch_udc.c 	req->req.actual = req->req.length;
req              2143 drivers/usb/gadget/udc/pch_udc.c 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
req              2144 drivers/usb/gadget/udc/pch_udc.c 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
req              2145 drivers/usb/gadget/udc/pch_udc.c 	complete_req(ep, req, 0);
req              2146 drivers/usb/gadget/udc/pch_udc.c 	req->dma_going = 0;
req              2164 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request *req;
req              2173 drivers/usb/gadget/udc/pch_udc.c 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
req              2176 drivers/usb/gadget/udc/pch_udc.c 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
req              2178 drivers/usb/gadget/udc/pch_udc.c 		td = req->td_data_last;
req              2180 drivers/usb/gadget/udc/pch_udc.c 		td = req->td_data;
req              2186 drivers/usb/gadget/udc/pch_udc.c 				(req->td_data->status & PCH_UDC_RXTX_STS),
req              2195 drivers/usb/gadget/udc/pch_udc.c 		if (td == req->td_data_last) {
req              2203 drivers/usb/gadget/udc/pch_udc.c 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
req              2205 drivers/usb/gadget/udc/pch_udc.c 	req->td_data->status |= PCH_UDC_DMA_LAST;
req              2208 drivers/usb/gadget/udc/pch_udc.c 	req->dma_going = 0;
req              2209 drivers/usb/gadget/udc/pch_udc.c 	req->req.actual = count;
req              2210 drivers/usb/gadget/udc/pch_udc.c 	complete_req(ep, req, 0);
req              2213 drivers/usb/gadget/udc/pch_udc.c 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
req              2214 drivers/usb/gadget/udc/pch_udc.c 		pch_udc_start_rxrequest(ep, req);
req              2272 drivers/usb/gadget/udc/pch_udc.c 	struct pch_udc_request		*req = NULL;
req              2280 drivers/usb/gadget/udc/pch_udc.c 		req = list_entry(ep->queue.next, struct pch_udc_request,
req              2282 drivers/usb/gadget/udc/pch_udc.c 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
req              2284 drivers/usb/gadget/udc/pch_udc.c 			if (!req->dma_going)
req              2285 drivers/usb/gadget/udc/pch_udc.c 				pch_udc_start_rxrequest(ep, req);
req               513 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request *req;
req               515 drivers/usb/gadget/udc/pxa25x_udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               516 drivers/usb/gadget/udc/pxa25x_udc.c 	if (!req)
req               519 drivers/usb/gadget/udc/pxa25x_udc.c 	INIT_LIST_HEAD (&req->queue);
req               520 drivers/usb/gadget/udc/pxa25x_udc.c 	return &req->req;
req               530 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request	*req;
req               532 drivers/usb/gadget/udc/pxa25x_udc.c 	req = container_of (_req, struct pxa25x_request, req);
req               533 drivers/usb/gadget/udc/pxa25x_udc.c 	WARN_ON(!list_empty (&req->queue));
req               534 drivers/usb/gadget/udc/pxa25x_udc.c 	kfree(req);
req               542 drivers/usb/gadget/udc/pxa25x_udc.c static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status)
req               546 drivers/usb/gadget/udc/pxa25x_udc.c 	list_del_init(&req->queue);
req               548 drivers/usb/gadget/udc/pxa25x_udc.c 	if (likely (req->req.status == -EINPROGRESS))
req               549 drivers/usb/gadget/udc/pxa25x_udc.c 		req->req.status = status;
req               551 drivers/usb/gadget/udc/pxa25x_udc.c 		status = req->req.status;
req               555 drivers/usb/gadget/udc/pxa25x_udc.c 			ep->ep.name, &req->req, status,
req               556 drivers/usb/gadget/udc/pxa25x_udc.c 			req->req.actual, req->req.length);
req               560 drivers/usb/gadget/udc/pxa25x_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               571 drivers/usb/gadget/udc/pxa25x_udc.c write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max)
req               576 drivers/usb/gadget/udc/pxa25x_udc.c 	buf = req->req.buf + req->req.actual;
req               580 drivers/usb/gadget/udc/pxa25x_udc.c 	length = min(req->req.length - req->req.actual, max);
req               581 drivers/usb/gadget/udc/pxa25x_udc.c 	req->req.actual += length;
req               596 drivers/usb/gadget/udc/pxa25x_udc.c write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
req               605 drivers/usb/gadget/udc/pxa25x_udc.c 		count = write_packet(ep, req, max);
req               611 drivers/usb/gadget/udc/pxa25x_udc.c 			if (likely(req->req.length != req->req.actual)
req               612 drivers/usb/gadget/udc/pxa25x_udc.c 					|| req->req.zero)
req               623 drivers/usb/gadget/udc/pxa25x_udc.c 			req->req.length - req->req.actual, req);
req               635 drivers/usb/gadget/udc/pxa25x_udc.c 			done (ep, req, 0);
req               663 drivers/usb/gadget/udc/pxa25x_udc.c write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
req               669 drivers/usb/gadget/udc/pxa25x_udc.c 	count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE);
req               676 drivers/usb/gadget/udc/pxa25x_udc.c 		req->req.length - req->req.actual, req);
req               684 drivers/usb/gadget/udc/pxa25x_udc.c 		count = req->req.length;
req               685 drivers/usb/gadget/udc/pxa25x_udc.c 		done (ep, req, 0);
req               724 drivers/usb/gadget/udc/pxa25x_udc.c read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
req               738 drivers/usb/gadget/udc/pxa25x_udc.c 		buf = req->req.buf + req->req.actual;
req               740 drivers/usb/gadget/udc/pxa25x_udc.c 		bufferspace = req->req.length - req->req.actual;
req               745 drivers/usb/gadget/udc/pxa25x_udc.c 			req->req.actual += min (count, bufferspace);
req               752 drivers/usb/gadget/udc/pxa25x_udc.c 			req, req->req.actual, req->req.length);
req               761 drivers/usb/gadget/udc/pxa25x_udc.c 				if (req->req.status != -EOVERFLOW)
req               764 drivers/usb/gadget/udc/pxa25x_udc.c 				req->req.status = -EOVERFLOW;
req               776 drivers/usb/gadget/udc/pxa25x_udc.c 				req->req.status = -EHOSTUNREACH;
req               782 drivers/usb/gadget/udc/pxa25x_udc.c 		if (is_short || req->req.actual == req->req.length) {
req               783 drivers/usb/gadget/udc/pxa25x_udc.c 			done (ep, req, 0);
req               801 drivers/usb/gadget/udc/pxa25x_udc.c read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
req               806 drivers/usb/gadget/udc/pxa25x_udc.c 	buf = req->req.buf + req->req.actual;
req               807 drivers/usb/gadget/udc/pxa25x_udc.c 	bufferspace = req->req.length - req->req.actual;
req               817 drivers/usb/gadget/udc/pxa25x_udc.c 			if (req->req.status != -EOVERFLOW)
req               819 drivers/usb/gadget/udc/pxa25x_udc.c 			req->req.status = -EOVERFLOW;
req               822 drivers/usb/gadget/udc/pxa25x_udc.c 			req->req.actual++;
req               830 drivers/usb/gadget/udc/pxa25x_udc.c 	if (req->req.actual >= req->req.length)
req               842 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request	*req;
req               847 drivers/usb/gadget/udc/pxa25x_udc.c 	req = container_of(_req, struct pxa25x_request, req);
req               849 drivers/usb/gadget/udc/pxa25x_udc.c 			|| !list_empty(&req->queue))) {
req               871 drivers/usb/gadget/udc/pxa25x_udc.c 			&& req->req.length > usb_endpoint_maxp(ep->ep.desc)))
req               890 drivers/usb/gadget/udc/pxa25x_udc.c 				if (write_ep0_fifo(ep, req))
req               891 drivers/usb/gadget/udc/pxa25x_udc.c 					req = NULL;
req               903 drivers/usb/gadget/udc/pxa25x_udc.c 					done(ep, req, 0);
req               911 drivers/usb/gadget/udc/pxa25x_udc.c 						&& read_ep0_fifo(ep, req))) {
req               913 drivers/usb/gadget/udc/pxa25x_udc.c 					done(ep, req, 0);
req               914 drivers/usb/gadget/udc/pxa25x_udc.c 					req = NULL;
req               926 drivers/usb/gadget/udc/pxa25x_udc.c 					&& write_fifo(ep, req))
req               927 drivers/usb/gadget/udc/pxa25x_udc.c 				req = NULL;
req               929 drivers/usb/gadget/udc/pxa25x_udc.c 				&& read_fifo(ep, req)) {
req               930 drivers/usb/gadget/udc/pxa25x_udc.c 			req = NULL;
req               933 drivers/usb/gadget/udc/pxa25x_udc.c 		if (likely(req && ep->ep.desc))
req               938 drivers/usb/gadget/udc/pxa25x_udc.c 	if (likely(req != NULL))
req               939 drivers/usb/gadget/udc/pxa25x_udc.c 		list_add_tail(&req->queue, &ep->queue);
req               951 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request *req;
req               955 drivers/usb/gadget/udc/pxa25x_udc.c 		req = list_entry(ep->queue.next,
req               958 drivers/usb/gadget/udc/pxa25x_udc.c 		done(ep, req, status);
req               969 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request	*req;
req               979 drivers/usb/gadget/udc/pxa25x_udc.c 	list_for_each_entry (req, &ep->queue, queue) {
req               980 drivers/usb/gadget/udc/pxa25x_udc.c 		if (&req->req == _req)
req               983 drivers/usb/gadget/udc/pxa25x_udc.c 	if (&req->req != _req) {
req               988 drivers/usb/gadget/udc/pxa25x_udc.c 	done(ep, req, -ECONNRESET);
req              1302 drivers/usb/gadget/udc/pxa25x_udc.c 		struct pxa25x_request	*req;
req              1325 drivers/usb/gadget/udc/pxa25x_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
req              1328 drivers/usb/gadget/udc/pxa25x_udc.c 					&req->req, req->req.actual,
req              1329 drivers/usb/gadget/udc/pxa25x_udc.c 					req->req.length, req->req.buf);
req              1632 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request	*req;
req              1640 drivers/usb/gadget/udc/pxa25x_udc.c 		req = NULL;
req              1642 drivers/usb/gadget/udc/pxa25x_udc.c 		req = list_entry(ep->queue.next, struct pxa25x_request, queue);
req              1810 drivers/usb/gadget/udc/pxa25x_udc.c 			if (req)
req              1811 drivers/usb/gadget/udc/pxa25x_udc.c 				done(ep, req, 0);
req              1814 drivers/usb/gadget/udc/pxa25x_udc.c 			if (req) {
req              1816 drivers/usb/gadget/udc/pxa25x_udc.c 				(void) write_ep0_fifo(ep, req);
req              1822 drivers/usb/gadget/udc/pxa25x_udc.c 			if (req) {
req              1824 drivers/usb/gadget/udc/pxa25x_udc.c 				if (read_ep0_fifo(ep, req))
req              1825 drivers/usb/gadget/udc/pxa25x_udc.c 					done(ep, req, 0);
req              1830 drivers/usb/gadget/udc/pxa25x_udc.c 			if (req)
req              1831 drivers/usb/gadget/udc/pxa25x_udc.c 				done(ep, req, 0);
req              1836 drivers/usb/gadget/udc/pxa25x_udc.c 		if (req)
req              1837 drivers/usb/gadget/udc/pxa25x_udc.c 			done(ep, req, 0);
req              1854 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_request	*req;
req              1862 drivers/usb/gadget/udc/pxa25x_udc.c 			req = list_entry(ep->queue.next,
req              1865 drivers/usb/gadget/udc/pxa25x_udc.c 			req = NULL;
req              1877 drivers/usb/gadget/udc/pxa25x_udc.c 			if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
req              1878 drivers/usb/gadget/udc/pxa25x_udc.c 				completed = write_fifo(ep, req);
req              1890 drivers/usb/gadget/udc/pxa25x_udc.c 			if (likely(req)) {
req              1891 drivers/usb/gadget/udc/pxa25x_udc.c 				completed = read_fifo(ep, req);
req                60 drivers/usb/gadget/udc/pxa25x_udc.h 	struct usb_request			req;
req               140 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request *req;
req               158 drivers/usb/gadget/udc/pxa27x_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
req               160 drivers/usb/gadget/udc/pxa27x_udc.c 				   &req->req, req->req.actual,
req               161 drivers/usb/gadget/udc/pxa27x_udc.c 				   req->req.length, req->req.buf);
req               577 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request *req;
req               579 drivers/usb/gadget/udc/pxa27x_udc.c 	req = kzalloc(sizeof *req, gfp_flags);
req               580 drivers/usb/gadget/udc/pxa27x_udc.c 	if (!req)
req               583 drivers/usb/gadget/udc/pxa27x_udc.c 	INIT_LIST_HEAD(&req->queue);
req               584 drivers/usb/gadget/udc/pxa27x_udc.c 	req->in_use = 0;
req               585 drivers/usb/gadget/udc/pxa27x_udc.c 	req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
req               587 drivers/usb/gadget/udc/pxa27x_udc.c 	return &req->req;
req               599 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request *req;
req               601 drivers/usb/gadget/udc/pxa27x_udc.c 	req = container_of(_req, struct pxa27x_request, req);
req               602 drivers/usb/gadget/udc/pxa27x_udc.c 	WARN_ON(!list_empty(&req->queue));
req               603 drivers/usb/gadget/udc/pxa27x_udc.c 	kfree(req);
req               616 drivers/usb/gadget/udc/pxa27x_udc.c static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
req               618 drivers/usb/gadget/udc/pxa27x_udc.c 	if (unlikely(!req))
req               620 drivers/usb/gadget/udc/pxa27x_udc.c 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
req               621 drivers/usb/gadget/udc/pxa27x_udc.c 		req->req.length, udc_ep_readl(ep, UDCCSR));
req               623 drivers/usb/gadget/udc/pxa27x_udc.c 	req->in_use = 1;
req               624 drivers/usb/gadget/udc/pxa27x_udc.c 	list_add_tail(&req->queue, &ep->queue);
req               639 drivers/usb/gadget/udc/pxa27x_udc.c static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
req               641 drivers/usb/gadget/udc/pxa27x_udc.c 	if (unlikely(!req))
req               643 drivers/usb/gadget/udc/pxa27x_udc.c 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
req               644 drivers/usb/gadget/udc/pxa27x_udc.c 		req->req.length, udc_ep_readl(ep, UDCCSR));
req               646 drivers/usb/gadget/udc/pxa27x_udc.c 	list_del_init(&req->queue);
req               647 drivers/usb/gadget/udc/pxa27x_udc.c 	req->in_use = 0;
req               663 drivers/usb/gadget/udc/pxa27x_udc.c static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
req               668 drivers/usb/gadget/udc/pxa27x_udc.c 	ep_del_request(ep, req);
req               669 drivers/usb/gadget/udc/pxa27x_udc.c 	if (likely(req->req.status == -EINPROGRESS))
req               670 drivers/usb/gadget/udc/pxa27x_udc.c 		req->req.status = status;
req               672 drivers/usb/gadget/udc/pxa27x_udc.c 		status = req->req.status;
req               676 drivers/usb/gadget/udc/pxa27x_udc.c 			&req->req, status,
req               677 drivers/usb/gadget/udc/pxa27x_udc.c 			req->req.actual, req->req.length);
req               682 drivers/usb/gadget/udc/pxa27x_udc.c 	usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req);
req               698 drivers/usb/gadget/udc/pxa27x_udc.c static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
req               702 drivers/usb/gadget/udc/pxa27x_udc.c 	req_done(ep, req, 0, pflags);
req               716 drivers/usb/gadget/udc/pxa27x_udc.c static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
req               720 drivers/usb/gadget/udc/pxa27x_udc.c 	ep_end_out_req(ep, req, pflags);
req               734 drivers/usb/gadget/udc/pxa27x_udc.c static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
req               738 drivers/usb/gadget/udc/pxa27x_udc.c 	req_done(ep, req, 0, pflags);
req               752 drivers/usb/gadget/udc/pxa27x_udc.c static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
req               756 drivers/usb/gadget/udc/pxa27x_udc.c 	ep_end_in_req(ep, req, pflags);
req               771 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request	*req;
req               776 drivers/usb/gadget/udc/pxa27x_udc.c 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
req               777 drivers/usb/gadget/udc/pxa27x_udc.c 		req_done(ep, req, status, &flags);
req               793 drivers/usb/gadget/udc/pxa27x_udc.c static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
req               799 drivers/usb/gadget/udc/pxa27x_udc.c 	bufferspace = req->req.length - req->req.actual;
req               801 drivers/usb/gadget/udc/pxa27x_udc.c 	buf = (u32 *)(req->req.buf + req->req.actual);
req               811 drivers/usb/gadget/udc/pxa27x_udc.c 	req->req.actual += count;
req               830 drivers/usb/gadget/udc/pxa27x_udc.c static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
req               837 drivers/usb/gadget/udc/pxa27x_udc.c 	buf = (u32 *)(req->req.buf + req->req.actual);
req               840 drivers/usb/gadget/udc/pxa27x_udc.c 	length = min(req->req.length - req->req.actual, max);
req               841 drivers/usb/gadget/udc/pxa27x_udc.c 	req->req.actual += length;
req               872 drivers/usb/gadget/udc/pxa27x_udc.c static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
req               877 drivers/usb/gadget/udc/pxa27x_udc.c 		count = read_packet(ep, req);
req               883 drivers/usb/gadget/udc/pxa27x_udc.c 			&req->req, req->req.actual, req->req.length);
req               886 drivers/usb/gadget/udc/pxa27x_udc.c 		if (is_short || req->req.actual == req->req.length) {
req               907 drivers/usb/gadget/udc/pxa27x_udc.c static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
req               927 drivers/usb/gadget/udc/pxa27x_udc.c 		count = write_packet(ep, req, max);
req               936 drivers/usb/gadget/udc/pxa27x_udc.c 			if (likely(req->req.length > req->req.actual)
req               937 drivers/usb/gadget/udc/pxa27x_udc.c 					|| req->req.zero)
req               957 drivers/usb/gadget/udc/pxa27x_udc.c 			req->req.length - req->req.actual, &req->req);
req               973 drivers/usb/gadget/udc/pxa27x_udc.c static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
req               978 drivers/usb/gadget/udc/pxa27x_udc.c 		count = read_packet(ep, req);
req               985 drivers/usb/gadget/udc/pxa27x_udc.c 			&req->req, req->req.actual, req->req.length);
req               987 drivers/usb/gadget/udc/pxa27x_udc.c 		if (is_short || req->req.actual >= req->req.length) {
req              1011 drivers/usb/gadget/udc/pxa27x_udc.c static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
req              1016 drivers/usb/gadget/udc/pxa27x_udc.c 	count = write_packet(ep, req, EP0_FIFO_SIZE);
req              1028 drivers/usb/gadget/udc/pxa27x_udc.c 		req->req.length - req->req.actual,
req              1029 drivers/usb/gadget/udc/pxa27x_udc.c 		&req->req, udc_ep_readl(ep, UDCCSR));
req              1051 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request	*req;
req              1059 drivers/usb/gadget/udc/pxa27x_udc.c 	req = container_of(_req, struct pxa27x_request, req);
req              1082 drivers/usb/gadget/udc/pxa27x_udc.c 			&& req->req.length > ep->fifo_size))
req              1099 drivers/usb/gadget/udc/pxa27x_udc.c 	if (req->in_use) {
req              1100 drivers/usb/gadget/udc/pxa27x_udc.c 		ep_err(ep, "refusing to queue req %p (already queued)\n", req);
req              1108 drivers/usb/gadget/udc/pxa27x_udc.c 	ep_add_request(ep, req);
req              1115 drivers/usb/gadget/udc/pxa27x_udc.c 				ep_end_in_req(ep, req, NULL);
req              1120 drivers/usb/gadget/udc/pxa27x_udc.c 				ep_del_request(ep, req);
req              1127 drivers/usb/gadget/udc/pxa27x_udc.c 				if (write_ep0_fifo(ep, req))
req              1128 drivers/usb/gadget/udc/pxa27x_udc.c 					ep0_end_in_req(ep, req, NULL);
req              1132 drivers/usb/gadget/udc/pxa27x_udc.c 				if (read_ep0_fifo(ep, req))
req              1133 drivers/usb/gadget/udc/pxa27x_udc.c 					ep0_end_out_req(ep, req, NULL);
req              1138 drivers/usb/gadget/udc/pxa27x_udc.c 			ep_del_request(ep, req);
req              1165 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request	*req;
req              1179 drivers/usb/gadget/udc/pxa27x_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1180 drivers/usb/gadget/udc/pxa27x_udc.c 		if (&req->req == _req) {
req              1188 drivers/usb/gadget/udc/pxa27x_udc.c 		req_done(ep, req, -ECONNRESET, NULL);
req              1819 drivers/usb/gadget/udc/pxa27x_udc.c 				struct pxa27x_request *req)
req              1938 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request	*req = NULL;
req              1942 drivers/usb/gadget/udc/pxa27x_udc.c 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
req              1946 drivers/usb/gadget/udc/pxa27x_udc.c 		EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
req              1973 drivers/usb/gadget/udc/pxa27x_udc.c 			handle_ep0_ctrl_req(udc, req);
req              1978 drivers/usb/gadget/udc/pxa27x_udc.c 		if (req && !ep_is_full(ep))
req              1979 drivers/usb/gadget/udc/pxa27x_udc.c 			completed = write_ep0_fifo(ep, req);
req              1981 drivers/usb/gadget/udc/pxa27x_udc.c 			ep0_end_in_req(ep, req, NULL);
req              1984 drivers/usb/gadget/udc/pxa27x_udc.c 		if (epout_has_pkt(ep) && req)
req              1985 drivers/usb/gadget/udc/pxa27x_udc.c 			completed = read_ep0_fifo(ep, req);
req              1987 drivers/usb/gadget/udc/pxa27x_udc.c 			ep0_end_out_req(ep, req, NULL);
req              2021 drivers/usb/gadget/udc/pxa27x_udc.c 	struct pxa27x_request	*req;
req              2038 drivers/usb/gadget/udc/pxa27x_udc.c 			req = list_entry(ep->queue.next,
req              2041 drivers/usb/gadget/udc/pxa27x_udc.c 			req = NULL;
req              2044 drivers/usb/gadget/udc/pxa27x_udc.c 				req, udccsr, loop++);
req              2049 drivers/usb/gadget/udc/pxa27x_udc.c 		if (!req)
req              2054 drivers/usb/gadget/udc/pxa27x_udc.c 				completed = write_fifo(ep, req);
req              2057 drivers/usb/gadget/udc/pxa27x_udc.c 				completed = read_fifo(ep, req);
req              2062 drivers/usb/gadget/udc/pxa27x_udc.c 				ep_end_in_req(ep, req, &flags);
req              2064 drivers/usb/gadget/udc/pxa27x_udc.c 				ep_end_out_req(ep, req, &flags);
req              2083 drivers/usb/gadget/udc/pxa27x_udc.c 	struct usb_ctrlrequest req ;
req              2091 drivers/usb/gadget/udc/pxa27x_udc.c 	req.bRequestType = 0;
req              2092 drivers/usb/gadget/udc/pxa27x_udc.c 	req.bRequest = USB_REQ_SET_CONFIGURATION;
req              2093 drivers/usb/gadget/udc/pxa27x_udc.c 	req.wValue = config;
req              2094 drivers/usb/gadget/udc/pxa27x_udc.c 	req.wIndex = 0;
req              2095 drivers/usb/gadget/udc/pxa27x_udc.c 	req.wLength = 0;
req              2098 drivers/usb/gadget/udc/pxa27x_udc.c 	udc->driver->setup(&udc->gadget, &req);
req              2113 drivers/usb/gadget/udc/pxa27x_udc.c 	struct usb_ctrlrequest  req;
req              2120 drivers/usb/gadget/udc/pxa27x_udc.c 	req.bRequestType = USB_RECIP_INTERFACE;
req              2121 drivers/usb/gadget/udc/pxa27x_udc.c 	req.bRequest = USB_REQ_SET_INTERFACE;
req              2122 drivers/usb/gadget/udc/pxa27x_udc.c 	req.wValue = alt;
req              2123 drivers/usb/gadget/udc/pxa27x_udc.c 	req.wIndex = iface;
req              2124 drivers/usb/gadget/udc/pxa27x_udc.c 	req.wLength = 0;
req              2127 drivers/usb/gadget/udc/pxa27x_udc.c 	udc->driver->setup(&udc->gadget, &req);
req               382 drivers/usb/gadget/udc/pxa27x_udc.h 	struct usb_request			req;
req                35 drivers/usb/gadget/udc/r8a66597-udc.c static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
req                37 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req);
req                42 drivers/usb/gadget/udc/r8a66597-udc.c 		struct r8a66597_request *req, int status);
req               569 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req               576 drivers/usb/gadget/udc/r8a66597-udc.c 	if (req->req.length == 0) {
req               579 drivers/usb/gadget/udc/r8a66597-udc.c 		transfer_complete(ep, req, 0);
req               582 drivers/usb/gadget/udc/r8a66597-udc.c 		irq_ep0_write(ep, req);
req               631 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req               663 drivers/usb/gadget/udc/r8a66597-udc.c 	return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
req               668 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req               673 drivers/usb/gadget/udc/r8a66597-udc.c 	usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
req               686 drivers/usb/gadget/udc/r8a66597-udc.c 			 struct r8a66597_request *req)
req               688 drivers/usb/gadget/udc/r8a66597-udc.c 	BUG_ON(req->req.length == 0);
req               691 drivers/usb/gadget/udc/r8a66597-udc.c 	r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
req               692 drivers/usb/gadget/udc/r8a66597-udc.c 	r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
req               699 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req               708 drivers/usb/gadget/udc/r8a66597-udc.c 	if (req->req.length == 0) {
req               709 drivers/usb/gadget/udc/r8a66597-udc.c 		transfer_complete(ep, req, 0);
req               712 drivers/usb/gadget/udc/r8a66597-udc.c 		if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
req               721 drivers/usb/gadget/udc/r8a66597-udc.c 				irq_packet_write(ep, req);
req               728 drivers/usb/gadget/udc/r8a66597-udc.c 			sudmac_start(r8a66597, ep, req);
req               734 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req               750 drivers/usb/gadget/udc/r8a66597-udc.c 				DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
req               755 drivers/usb/gadget/udc/r8a66597-udc.c 		if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
req               762 drivers/usb/gadget/udc/r8a66597-udc.c 			sudmac_start(r8a66597, ep, req);
req               768 drivers/usb/gadget/udc/r8a66597-udc.c static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
req               771 drivers/usb/gadget/udc/r8a66597-udc.c 		start_packet_write(ep, req);
req               773 drivers/usb/gadget/udc/r8a66597-udc.c 		start_packet_read(ep, req);
req               776 drivers/usb/gadget/udc/r8a66597-udc.c static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
req               784 drivers/usb/gadget/udc/r8a66597-udc.c 		start_ep0_write(ep, req);
req               787 drivers/usb/gadget/udc/r8a66597-udc.c 		start_packet_read(ep, req);
req               899 drivers/usb/gadget/udc/r8a66597-udc.c 		struct r8a66597_request *req, int status)
req               912 drivers/usb/gadget/udc/r8a66597-udc.c 	list_del_init(&req->queue);
req               914 drivers/usb/gadget/udc/r8a66597-udc.c 		req->req.status = -ESHUTDOWN;
req               916 drivers/usb/gadget/udc/r8a66597-udc.c 		req->req.status = status;
req               922 drivers/usb/gadget/udc/r8a66597-udc.c 		sudmac_free_channel(ep->r8a66597, ep, req);
req               925 drivers/usb/gadget/udc/r8a66597-udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               929 drivers/usb/gadget/udc/r8a66597-udc.c 		req = get_request_from_ep(ep);
req               931 drivers/usb/gadget/udc/r8a66597-udc.c 			start_packet(ep, req);
req               935 drivers/usb/gadget/udc/r8a66597-udc.c static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
req               962 drivers/usb/gadget/udc/r8a66597-udc.c 	buf = req->req.buf + req->req.actual;
req               963 drivers/usb/gadget/udc/r8a66597-udc.c 	size = min(bufsize, req->req.length - req->req.actual);
req               966 drivers/usb/gadget/udc/r8a66597-udc.c 	if (req->req.buf) {
req               974 drivers/usb/gadget/udc/r8a66597-udc.c 	req->req.actual += size;
req               977 drivers/usb/gadget/udc/r8a66597-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req               990 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req              1011 drivers/usb/gadget/udc/r8a66597-udc.c 	buf = req->req.buf + req->req.actual;
req              1012 drivers/usb/gadget/udc/r8a66597-udc.c 	size = min(bufsize, req->req.length - req->req.actual);
req              1015 drivers/usb/gadget/udc/r8a66597-udc.c 	if (req->req.buf) {
req              1025 drivers/usb/gadget/udc/r8a66597-udc.c 	req->req.actual += size;
req              1027 drivers/usb/gadget/udc/r8a66597-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req              1039 drivers/usb/gadget/udc/r8a66597-udc.c 				struct r8a66597_request *req)
req              1052 drivers/usb/gadget/udc/r8a66597-udc.c 		req->req.status = -EPIPE;
req              1063 drivers/usb/gadget/udc/r8a66597-udc.c 	buf = req->req.buf + req->req.actual;
req              1064 drivers/usb/gadget/udc/r8a66597-udc.c 	req_len = req->req.length - req->req.actual;
req              1071 drivers/usb/gadget/udc/r8a66597-udc.c 	req->req.actual += size;
req              1074 drivers/usb/gadget/udc/r8a66597-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req              1083 drivers/usb/gadget/udc/r8a66597-udc.c 	if (req->req.buf) {
req              1092 drivers/usb/gadget/udc/r8a66597-udc.c 		transfer_complete(ep, req, 0);
req              1100 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1107 drivers/usb/gadget/udc/r8a66597-udc.c 		req = get_request_from_ep(ep);
req              1108 drivers/usb/gadget/udc/r8a66597-udc.c 		irq_packet_read(ep, req);
req              1115 drivers/usb/gadget/udc/r8a66597-udc.c 				req = get_request_from_ep(ep);
req              1117 drivers/usb/gadget/udc/r8a66597-udc.c 					irq_packet_write(ep, req);
req              1119 drivers/usb/gadget/udc/r8a66597-udc.c 					irq_packet_read(ep, req);
req              1131 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1137 drivers/usb/gadget/udc/r8a66597-udc.c 		req = get_request_from_ep(ep);
req              1138 drivers/usb/gadget/udc/r8a66597-udc.c 		irq_ep0_write(ep, req);
req              1150 drivers/usb/gadget/udc/r8a66597-udc.c 					req = get_request_from_ep(ep);
req              1152 drivers/usb/gadget/udc/r8a66597-udc.c 						transfer_complete(ep, req, 0);
req              1209 drivers/usb/gadget/udc/r8a66597-udc.c 		struct r8a66597_request *req;
req              1223 drivers/usb/gadget/udc/r8a66597-udc.c 		req = get_request_from_ep(ep);
req              1228 drivers/usb/gadget/udc/r8a66597-udc.c 			start_packet(ep, req);
req              1371 drivers/usb/gadget/udc/r8a66597-udc.c 		struct r8a66597_request *req;
req              1373 drivers/usb/gadget/udc/r8a66597-udc.c 		req = get_request_from_ep(ep);
req              1374 drivers/usb/gadget/udc/r8a66597-udc.c 		transfer_complete(ep, req, 0);
req              1403 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1421 drivers/usb/gadget/udc/r8a66597-udc.c 	req = get_request_from_ep(ep);
req              1425 drivers/usb/gadget/udc/r8a66597-udc.c 	req->req.actual += len;
req              1431 drivers/usb/gadget/udc/r8a66597-udc.c 	if ((!req->req.zero && (req->req.actual == req->req.length))
req              1439 drivers/usb/gadget/udc/r8a66597-udc.c 			transfer_complete(ep, req, 0);
req              1561 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1568 drivers/usb/gadget/udc/r8a66597-udc.c 		req = get_request_from_ep(ep);
req              1570 drivers/usb/gadget/udc/r8a66597-udc.c 		transfer_complete(ep, req, -ECONNRESET);
req              1581 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1583 drivers/usb/gadget/udc/r8a66597-udc.c 	req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
req              1584 drivers/usb/gadget/udc/r8a66597-udc.c 	if (!req)
req              1587 drivers/usb/gadget/udc/r8a66597-udc.c 	INIT_LIST_HEAD(&req->queue);
req              1589 drivers/usb/gadget/udc/r8a66597-udc.c 	return &req->req;
req              1594 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1596 drivers/usb/gadget/udc/r8a66597-udc.c 	req = container_of(_req, struct r8a66597_request, req);
req              1597 drivers/usb/gadget/udc/r8a66597-udc.c 	kfree(req);
req              1604 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1609 drivers/usb/gadget/udc/r8a66597-udc.c 	req = container_of(_req, struct r8a66597_request, req);
req              1619 drivers/usb/gadget/udc/r8a66597-udc.c 	list_add_tail(&req->queue, &ep->queue);
req              1620 drivers/usb/gadget/udc/r8a66597-udc.c 	req->req.actual = 0;
req              1621 drivers/usb/gadget/udc/r8a66597-udc.c 	req->req.status = -EINPROGRESS;
req              1624 drivers/usb/gadget/udc/r8a66597-udc.c 		start_ep0(ep, req);
req              1627 drivers/usb/gadget/udc/r8a66597-udc.c 			start_packet(ep, req);
req              1638 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597_request *req;
req              1642 drivers/usb/gadget/udc/r8a66597-udc.c 	req = container_of(_req, struct r8a66597_request, req);
req              1646 drivers/usb/gadget/udc/r8a66597-udc.c 		transfer_complete(ep, req, -ECONNRESET);
req                51 drivers/usb/gadget/udc/r8a66597-udc.h 	struct usb_request	req;
req               305 drivers/usb/gadget/udc/renesas_usb3.c 	struct usb_request	req;
req               376 drivers/usb/gadget/udc/renesas_usb3.c 					    struct renesas_usb3_request, req)
req               898 drivers/usb/gadget/udc/renesas_usb3.c 		usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
req               900 drivers/usb/gadget/udc/renesas_usb3.c 	usb3_req->req.status = status;
req               904 drivers/usb/gadget/udc/renesas_usb3.c 	usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
req              1046 drivers/usb/gadget/udc/renesas_usb3.c 	struct usb_request *req = &usb3_req->req;
req              1048 drivers/usb/gadget/udc/renesas_usb3.c 	if ((!req->zero && req->actual == req->length) ||
req              1049 drivers/usb/gadget/udc/renesas_usb3.c 	    (req->actual % usb3_ep->ep.maxpacket) || (req->length == 0))
req              1081 drivers/usb/gadget/udc/renesas_usb3.c 	int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual,
req              1083 drivers/usb/gadget/udc/renesas_usb3.c 	u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
req              1091 drivers/usb/gadget/udc/renesas_usb3.c 	usb3_req->req.actual += len;
req              1127 drivers/usb/gadget/udc/renesas_usb3.c 	int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual,
req              1129 drivers/usb/gadget/udc/renesas_usb3.c 	u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
req              1136 drivers/usb/gadget/udc/renesas_usb3.c 	usb3_req->req.actual += len;
req              1162 drivers/usb/gadget/udc/renesas_usb3.c 		if (!usb3_req->req.length)
req              1198 drivers/usb/gadget/udc/renesas_usb3.c 		if (usb3_req->req.length)
req              1253 drivers/usb/gadget/udc/renesas_usb3.c 	if (usb3_req->req.length > USB3_DMA_MAX_XFER_SIZE_ALL_PRDS) {
req              1255 drivers/usb/gadget/udc/renesas_usb3.c 			__func__, usb3_req->req.length);
req              1260 drivers/usb/gadget/udc/renesas_usb3.c 	if (!usb3_req->req.length)
req              1270 drivers/usb/gadget/udc/renesas_usb3.c 		if (usb_gadget_map_request(&usb3->gadget, &usb3_req->req,
req              1292 drivers/usb/gadget/udc/renesas_usb3.c 			usb_gadget_unmap_request(&usb3->gadget, &usb3_req->req,
req              1305 drivers/usb/gadget/udc/renesas_usb3.c 	u32 remain = usb3_req->req.length;
req              1306 drivers/usb/gadget/udc/renesas_usb3.c 	u32 dma = usb3_req->req.dma;
req              1363 drivers/usb/gadget/udc/renesas_usb3.c 	struct usb_request *req = &usb3_req->req;
req              1374 drivers/usb/gadget/udc/renesas_usb3.c 			len = req->length % USB3_DMA_MAX_XFER_SIZE;
req              1378 drivers/usb/gadget/udc/renesas_usb3.c 		req->actual += len - remain;
req              1559 drivers/usb/gadget/udc/renesas_usb3.c 						      struct usb_request *req))
req              1574 drivers/usb/gadget/udc/renesas_usb3.c 					     struct usb_request *req)
req              1753 drivers/usb/gadget/udc/renesas_usb3.c 					  struct usb_request *req)
req              1918 drivers/usb/gadget/udc/renesas_usb3.c 			__func__, usb3_req->req.length, usb3_req->req.actual);
req              2189 drivers/usb/gadget/udc/renesas_usb3.c 	return &usb3_req->req;
req               124 drivers/usb/gadget/udc/s3c-hsudc.c 	struct usb_request req;
req               163 drivers/usb/gadget/udc/s3c-hsudc.c static inline struct s3c_hsudc_req *our_req(struct usb_request *req)
req               165 drivers/usb/gadget/udc/s3c-hsudc.c 	return container_of(req, struct s3c_hsudc_req, req);
req               249 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq->req.status = status;
req               258 drivers/usb/gadget/udc/s3c-hsudc.c 	usb_gadget_giveback_request(&hsep->ep, &hsreq->req);
req               337 drivers/usb/gadget/udc/s3c-hsudc.c 	buf = hsreq->req.buf + hsreq->req.actual;
req               340 drivers/usb/gadget/udc/s3c-hsudc.c 	length = hsreq->req.length - hsreq->req.actual;
req               342 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq->req.actual += length;
req               351 drivers/usb/gadget/udc/s3c-hsudc.c 		if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero)
req               389 drivers/usb/gadget/udc/s3c-hsudc.c 	buf = hsreq->req.buf + hsreq->req.actual;
req               391 drivers/usb/gadget/udc/s3c-hsudc.c 	buflen = hsreq->req.length - hsreq->req.actual;
req               396 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq->req.actual += min(rlen, buflen);
req               405 drivers/usb/gadget/udc/s3c-hsudc.c 			hsreq->req.status = -EOVERFLOW;
req               411 drivers/usb/gadget/udc/s3c-hsudc.c 	if (is_short || hsreq->req.actual == hsreq->req.length) {
req               611 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq.req.length = 2;
req               612 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq.req.buf = &reply;
req               613 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq.req.actual = 0;
req               614 drivers/usb/gadget/udc/s3c-hsudc.c 	hsreq.req.complete = NULL;
req               836 drivers/usb/gadget/udc/s3c-hsudc.c 	return &hsreq->req;
req               939 drivers/usb/gadget/udc/s3c-hsudc.c 		if (&hsreq->req == _req)
req               942 drivers/usb/gadget/udc/s3c-hsudc.c 	if (&hsreq->req != _req) {
req               235 drivers/usb/gadget/udc/s3c2410_udc.c 		struct s3c2410_request *req, int status)
req               239 drivers/usb/gadget/udc/s3c2410_udc.c 	list_del_init(&req->queue);
req               241 drivers/usb/gadget/udc/s3c2410_udc.c 	if (likely(req->req.status == -EINPROGRESS))
req               242 drivers/usb/gadget/udc/s3c2410_udc.c 		req->req.status = status;
req               244 drivers/usb/gadget/udc/s3c2410_udc.c 		status = req->req.status;
req               247 drivers/usb/gadget/udc/s3c2410_udc.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req               259 drivers/usb/gadget/udc/s3c2410_udc.c 		struct s3c2410_request *req;
req               260 drivers/usb/gadget/udc/s3c2410_udc.c 		req = list_entry(ep->queue.next, struct s3c2410_request,
req               262 drivers/usb/gadget/udc/s3c2410_udc.c 		s3c2410_udc_done(ep, req, status);
req               279 drivers/usb/gadget/udc/s3c2410_udc.c 		struct s3c2410_request *req,
req               282 drivers/usb/gadget/udc/s3c2410_udc.c 	unsigned len = min(req->req.length - req->req.actual, max);
req               283 drivers/usb/gadget/udc/s3c2410_udc.c 	u8 *buf = req->req.buf + req->req.actual;
req               288 drivers/usb/gadget/udc/s3c2410_udc.c 		req->req.actual, req->req.length, len, req->req.actual + len);
req               290 drivers/usb/gadget/udc/s3c2410_udc.c 	req->req.actual += len;
req               303 drivers/usb/gadget/udc/s3c2410_udc.c 		struct s3c2410_request *req)
req               333 drivers/usb/gadget/udc/s3c2410_udc.c 	count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket);
req               338 drivers/usb/gadget/udc/s3c2410_udc.c 	else if (req->req.length != req->req.actual || req->req.zero)
req               347 drivers/usb/gadget/udc/s3c2410_udc.c 			idx, count, req->req.actual, req->req.length,
req               348 drivers/usb/gadget/udc/s3c2410_udc.c 			is_last, req->req.zero);
req               368 drivers/usb/gadget/udc/s3c2410_udc.c 		s3c2410_udc_done(ep, req, 0);
req               389 drivers/usb/gadget/udc/s3c2410_udc.c 		struct s3c2410_request *req, unsigned avail)
req               393 drivers/usb/gadget/udc/s3c2410_udc.c 	len = min(req->req.length - req->req.actual, avail);
req               394 drivers/usb/gadget/udc/s3c2410_udc.c 	req->req.actual += len;
req               404 drivers/usb/gadget/udc/s3c2410_udc.c 				 struct s3c2410_request *req)
req               438 drivers/usb/gadget/udc/s3c2410_udc.c 	if (!req->req.length)
req               441 drivers/usb/gadget/udc/s3c2410_udc.c 	buf = req->req.buf + req->req.actual;
req               442 drivers/usb/gadget/udc/s3c2410_udc.c 	bufferspace = req->req.length - req->req.actual;
req               458 drivers/usb/gadget/udc/s3c2410_udc.c 	fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail);
req               467 drivers/usb/gadget/udc/s3c2410_udc.c 			req->req.status = -EOVERFLOW;
req               469 drivers/usb/gadget/udc/s3c2410_udc.c 		is_last = (req->req.length <= req->req.actual) ? 1 : 0;
req               492 drivers/usb/gadget/udc/s3c2410_udc.c 		s3c2410_udc_done(ep, req, 0);
req               727 drivers/usb/gadget/udc/s3c2410_udc.c 	struct s3c2410_request	*req;
req               731 drivers/usb/gadget/udc/s3c2410_udc.c 		req = NULL;
req               733 drivers/usb/gadget/udc/s3c2410_udc.c 		req = list_entry(ep->queue.next, struct s3c2410_request, queue);
req               768 drivers/usb/gadget/udc/s3c2410_udc.c 		if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req)
req               769 drivers/usb/gadget/udc/s3c2410_udc.c 			s3c2410_udc_write_fifo(ep, req);
req               774 drivers/usb/gadget/udc/s3c2410_udc.c 		if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req)
req               775 drivers/usb/gadget/udc/s3c2410_udc.c 			s3c2410_udc_read_fifo(ep, req);
req               796 drivers/usb/gadget/udc/s3c2410_udc.c 	struct s3c2410_request	*req;
req               802 drivers/usb/gadget/udc/s3c2410_udc.c 		req = list_entry(ep->queue.next,
req               805 drivers/usb/gadget/udc/s3c2410_udc.c 		req = NULL;
req               813 drivers/usb/gadget/udc/s3c2410_udc.c 			idx, ep_csr1, req ? 1 : 0);
req               823 drivers/usb/gadget/udc/s3c2410_udc.c 		if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req)
req               824 drivers/usb/gadget/udc/s3c2410_udc.c 			s3c2410_udc_write_fifo(ep, req);
req               837 drivers/usb/gadget/udc/s3c2410_udc.c 		if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req)
req               838 drivers/usb/gadget/udc/s3c2410_udc.c 			s3c2410_udc_read_fifo(ep, req);
req              1005 drivers/usb/gadget/udc/s3c2410_udc.c static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req)
req              1007 drivers/usb/gadget/udc/s3c2410_udc.c 	return container_of(req, struct s3c2410_request, req);
req              1131 drivers/usb/gadget/udc/s3c2410_udc.c 	struct s3c2410_request *req;
req              1138 drivers/usb/gadget/udc/s3c2410_udc.c 	req = kzalloc(sizeof(struct s3c2410_request), mem_flags);
req              1139 drivers/usb/gadget/udc/s3c2410_udc.c 	if (!req)
req              1142 drivers/usb/gadget/udc/s3c2410_udc.c 	INIT_LIST_HEAD(&req->queue);
req              1143 drivers/usb/gadget/udc/s3c2410_udc.c 	return &req->req;
req              1153 drivers/usb/gadget/udc/s3c2410_udc.c 	struct s3c2410_request	*req = to_s3c2410_req(_req);
req              1160 drivers/usb/gadget/udc/s3c2410_udc.c 	WARN_ON(!list_empty(&req->queue));
req              1161 drivers/usb/gadget/udc/s3c2410_udc.c 	kfree(req);
req              1170 drivers/usb/gadget/udc/s3c2410_udc.c 	struct s3c2410_request	*req = to_s3c2410_req(_req);
req              1191 drivers/usb/gadget/udc/s3c2410_udc.c 			|| !_req->buf || !list_empty(&req->queue))) {
req              1197 drivers/usb/gadget/udc/s3c2410_udc.c 				!list_empty(&req->queue));
req              1230 drivers/usb/gadget/udc/s3c2410_udc.c 							req)) {
req              1232 drivers/usb/gadget/udc/s3c2410_udc.c 					req = NULL;
req              1240 drivers/usb/gadget/udc/s3c2410_udc.c 							req))) {
req              1242 drivers/usb/gadget/udc/s3c2410_udc.c 					req = NULL;
req              1252 drivers/usb/gadget/udc/s3c2410_udc.c 				&& s3c2410_udc_write_fifo(ep, req)) {
req              1253 drivers/usb/gadget/udc/s3c2410_udc.c 			req = NULL;
req              1256 drivers/usb/gadget/udc/s3c2410_udc.c 				&& s3c2410_udc_read_fifo(ep, req)) {
req              1257 drivers/usb/gadget/udc/s3c2410_udc.c 			req = NULL;
req              1262 drivers/usb/gadget/udc/s3c2410_udc.c 	if (likely(req))
req              1263 drivers/usb/gadget/udc/s3c2410_udc.c 		list_add_tail(&req->queue, &ep->queue);
req              1280 drivers/usb/gadget/udc/s3c2410_udc.c 	struct s3c2410_request	*req = NULL;
req              1294 drivers/usb/gadget/udc/s3c2410_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1295 drivers/usb/gadget/udc/s3c2410_udc.c 		if (&req->req == _req) {
req              1296 drivers/usb/gadget/udc/s3c2410_udc.c 			list_del_init(&req->queue);
req              1306 drivers/usb/gadget/udc/s3c2410_udc.c 			req, _ep->name, _req->length, _req->buf);
req              1308 drivers/usb/gadget/udc/s3c2410_udc.c 		s3c2410_udc_done(ep, req, -ECONNRESET);
req                53 drivers/usb/gadget/udc/s3c2410_udc.h 	struct usb_request		req;
req               510 drivers/usb/gadget/udc/snps_udc_core.c 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
req               522 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request	*req;
req               532 drivers/usb/gadget/udc/snps_udc_core.c 	req = kzalloc(sizeof(struct udc_request), gfp);
req               533 drivers/usb/gadget/udc/snps_udc_core.c 	if (!req)
req               536 drivers/usb/gadget/udc/snps_udc_core.c 	req->req.dma = DMA_DONT_USE;
req               537 drivers/usb/gadget/udc/snps_udc_core.c 	INIT_LIST_HEAD(&req->queue);
req               542 drivers/usb/gadget/udc/snps_udc_core.c 						&req->td_phys);
req               544 drivers/usb/gadget/udc/snps_udc_core.c 			kfree(req);
req               550 drivers/usb/gadget/udc/snps_udc_core.c 				req, dma_desc,
req               551 drivers/usb/gadget/udc/snps_udc_core.c 				(unsigned long)req->td_phys);
req               557 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data = dma_desc;
req               558 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data_last = NULL;
req               559 drivers/usb/gadget/udc/snps_udc_core.c 		req->chain_len = 1;
req               562 drivers/usb/gadget/udc/snps_udc_core.c 	return &req->req;
req               566 drivers/usb/gadget/udc/snps_udc_core.c static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
req               568 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_data_dma *td = req->td_data;
req               574 drivers/usb/gadget/udc/snps_udc_core.c 	DBG(dev, "free chain req = %p\n", req);
req               577 drivers/usb/gadget/udc/snps_udc_core.c 	for (i = 1; i < req->chain_len; i++) {
req               590 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request	*req;
req               596 drivers/usb/gadget/udc/snps_udc_core.c 	req = container_of(usbreq, struct udc_request, req);
req               597 drivers/usb/gadget/udc/snps_udc_core.c 	VDBG(ep->dev, "free_req req=%p\n", req);
req               598 drivers/usb/gadget/udc/snps_udc_core.c 	BUG_ON(!list_empty(&req->queue));
req               599 drivers/usb/gadget/udc/snps_udc_core.c 	if (req->td_data) {
req               600 drivers/usb/gadget/udc/snps_udc_core.c 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
req               603 drivers/usb/gadget/udc/snps_udc_core.c 		if (req->chain_len > 1)
req               604 drivers/usb/gadget/udc/snps_udc_core.c 			udc_free_dma_chain(ep->dev, req);
req               606 drivers/usb/gadget/udc/snps_udc_core.c 		dma_pool_free(ep->dev->data_requests, req->td_data,
req               607 drivers/usb/gadget/udc/snps_udc_core.c 							req->td_phys);
req               609 drivers/usb/gadget/udc/snps_udc_core.c 	kfree(req);
req               613 drivers/usb/gadget/udc/snps_udc_core.c static void udc_init_bna_dummy(struct udc_request *req)
req               615 drivers/usb/gadget/udc/snps_udc_core.c 	if (req) {
req               617 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
req               619 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data->next = req->td_phys;
req               621 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data->status
req               622 drivers/usb/gadget/udc/snps_udc_core.c 			= AMD_ADDBITS(req->td_data->status,
req               627 drivers/usb/gadget/udc/snps_udc_core.c 			req->td_data, req->td_data->status);
req               635 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request *req = NULL;
req               641 drivers/usb/gadget/udc/snps_udc_core.c 		req = container_of(_req, struct udc_request, req);
req               642 drivers/usb/gadget/udc/snps_udc_core.c 		ep->bna_dummy_req = req;
req               643 drivers/usb/gadget/udc/snps_udc_core.c 		udc_init_bna_dummy(req);
req               645 drivers/usb/gadget/udc/snps_udc_core.c 	return req;
req               650 drivers/usb/gadget/udc/snps_udc_core.c udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
req               658 drivers/usb/gadget/udc/snps_udc_core.c 	if (!req || !ep)
req               661 drivers/usb/gadget/udc/snps_udc_core.c 	req_buf = req->buf + req->actual;
req               663 drivers/usb/gadget/udc/snps_udc_core.c 	remaining = req->length - req->actual;
req               723 drivers/usb/gadget/udc/snps_udc_core.c udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
req               734 drivers/usb/gadget/udc/snps_udc_core.c 	buf_space = req->req.length - req->req.actual;
req               735 drivers/usb/gadget/udc/snps_udc_core.c 	buf = req->req.buf + req->req.actual;
req               741 drivers/usb/gadget/udc/snps_udc_core.c 			req->req.status = -EOVERFLOW;
req               745 drivers/usb/gadget/udc/snps_udc_core.c 	req->req.actual += bytes;
req               749 drivers/usb/gadget/udc/snps_udc_core.c 		|| ((req->req.actual == req->req.length) && !req->req.zero))
req               762 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request *req,
req               766 drivers/usb/gadget/udc/snps_udc_core.c 	unsigned long bytes = req->req.length;
req               781 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
req               784 drivers/usb/gadget/udc/snps_udc_core.c 	len = req->req.length / ep->ep.maxpacket;
req               785 drivers/usb/gadget/udc/snps_udc_core.c 	if (req->req.length % ep->ep.maxpacket)
req               788 drivers/usb/gadget/udc/snps_udc_core.c 	if (len > req->chain_len) {
req               790 drivers/usb/gadget/udc/snps_udc_core.c 		if (req->chain_len > 1)
req               791 drivers/usb/gadget/udc/snps_udc_core.c 			udc_free_dma_chain(ep->dev, req);
req               792 drivers/usb/gadget/udc/snps_udc_core.c 		req->chain_len = len;
req               796 drivers/usb/gadget/udc/snps_udc_core.c 	td = req->td_data;
req               810 drivers/usb/gadget/udc/snps_udc_core.c 						req->td_data->next);
req               818 drivers/usb/gadget/udc/snps_udc_core.c 			td->bufptr = req->req.dma + i; /* assign buffer */
req               833 drivers/usb/gadget/udc/snps_udc_core.c 				req->td_data->next = dma_addr;
req               841 drivers/usb/gadget/udc/snps_udc_core.c 				req->td_data->status =
req               842 drivers/usb/gadget/udc/snps_udc_core.c 					AMD_ADDBITS(req->td_data->status,
req               870 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data_last = td;
req               877 drivers/usb/gadget/udc/snps_udc_core.c static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
req               884 drivers/usb/gadget/udc/snps_udc_core.c 			ep->num, req->td_data);
req               887 drivers/usb/gadget/udc/snps_udc_core.c 	req->td_data->bufptr = req->req.dma;
req               890 drivers/usb/gadget/udc/snps_udc_core.c 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
req               895 drivers/usb/gadget/udc/snps_udc_core.c 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
req               902 drivers/usb/gadget/udc/snps_udc_core.c 			if (req->req.length == ep->ep.maxpacket) {
req               904 drivers/usb/gadget/udc/snps_udc_core.c 				req->td_data->status =
req               905 drivers/usb/gadget/udc/snps_udc_core.c 					AMD_ADDBITS(req->td_data->status,
req               917 drivers/usb/gadget/udc/snps_udc_core.c 				use_dma_ppb, req->req.length,
req               923 drivers/usb/gadget/udc/snps_udc_core.c 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
req               927 drivers/usb/gadget/udc/snps_udc_core.c 			req->td_data->status =
req               928 drivers/usb/gadget/udc/snps_udc_core.c 				AMD_ADDBITS(req->td_data->status,
req               929 drivers/usb/gadget/udc/snps_udc_core.c 						req->req.length,
req               932 drivers/usb/gadget/udc/snps_udc_core.c 			req->td_data->status =
req               933 drivers/usb/gadget/udc/snps_udc_core.c 				AMD_ADDBITS(req->td_data->status,
req               938 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data->status =
req               939 drivers/usb/gadget/udc/snps_udc_core.c 			AMD_ADDBITS(req->td_data->status,
req               945 drivers/usb/gadget/udc/snps_udc_core.c 		req->td_data->status =
req               946 drivers/usb/gadget/udc/snps_udc_core.c 			AMD_ADDBITS(req->td_data->status,
req               966 drivers/usb/gadget/udc/snps_udc_core.c complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
req               978 drivers/usb/gadget/udc/snps_udc_core.c 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
req               984 drivers/usb/gadget/udc/snps_udc_core.c 	if (req->req.status == -EINPROGRESS)
req               985 drivers/usb/gadget/udc/snps_udc_core.c 		req->req.status = sts;
req               988 drivers/usb/gadget/udc/snps_udc_core.c 	list_del_init(&req->queue);
req               991 drivers/usb/gadget/udc/snps_udc_core.c 		&req->req, req->req.length, ep->ep.name, sts);
req               994 drivers/usb/gadget/udc/snps_udc_core.c 	usb_gadget_giveback_request(&ep->ep, &req->req);
req              1000 drivers/usb/gadget/udc/snps_udc_core.c static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
req              1004 drivers/usb/gadget/udc/snps_udc_core.c 	td = req->td_data;
req              1013 drivers/usb/gadget/udc/snps_udc_core.c static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
req              1018 drivers/usb/gadget/udc/snps_udc_core.c 	td = req->td_data;
req              1060 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request	*req;
req              1065 drivers/usb/gadget/udc/snps_udc_core.c 	req = container_of(usbreq, struct udc_request, req);
req              1068 drivers/usb/gadget/udc/snps_udc_core.c 			|| !list_empty(&req->queue))
req              1083 drivers/usb/gadget/udc/snps_udc_core.c 		VDBG(dev, "DMA map req %p\n", req);
req              1091 drivers/usb/gadget/udc/snps_udc_core.c 			req->td_data, usbreq->buf);
req              1096 drivers/usb/gadget/udc/snps_udc_core.c 	req->dma_done = 0;
req              1103 drivers/usb/gadget/udc/snps_udc_core.c 			complete_req(ep, req, 0);
req              1129 drivers/usb/gadget/udc/snps_udc_core.c 			retval = prep_dma(ep, req, GFP_ATOMIC);
req              1135 drivers/usb/gadget/udc/snps_udc_core.c 				req->td_data->status =
req              1136 drivers/usb/gadget/udc/snps_udc_core.c 					AMD_ADDBITS(req->td_data->status,
req              1161 drivers/usb/gadget/udc/snps_udc_core.c 						req->td_data,
req              1166 drivers/usb/gadget/udc/snps_udc_core.c 			writel(req->td_phys, &ep->regs->desptr);
req              1197 drivers/usb/gadget/udc/snps_udc_core.c 			retval = prep_dma(ep, req, GFP_ATOMIC);
req              1204 drivers/usb/gadget/udc/snps_udc_core.c 	if (req) {
req              1206 drivers/usb/gadget/udc/snps_udc_core.c 		list_add_tail(&req->queue, &ep->queue);
req              1211 drivers/usb/gadget/udc/snps_udc_core.c 			req->dma_going = 1;
req              1225 drivers/usb/gadget/udc/snps_udc_core.c 				if (udc_rxfifo_read(ep, req)) {
req              1227 drivers/usb/gadget/udc/snps_udc_core.c 					complete_req(ep, req, 0);
req              1243 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request	*req;
req              1247 drivers/usb/gadget/udc/snps_udc_core.c 		req = list_entry(ep->queue.next,
req              1250 drivers/usb/gadget/udc/snps_udc_core.c 		complete_req(ep, req, -ESHUTDOWN);
req              1259 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request	*req;
req              1268 drivers/usb/gadget/udc/snps_udc_core.c 	req = container_of(usbreq, struct udc_request, req);
req              1274 drivers/usb/gadget/udc/snps_udc_core.c 	if (ep->queue.next == &req->queue) {
req              1275 drivers/usb/gadget/udc/snps_udc_core.c 		if (ep->dma && req->dma_going) {
req              1289 drivers/usb/gadget/udc/snps_udc_core.c 				dma_sts = AMD_GETBITS(req->td_data->status,
req              1294 drivers/usb/gadget/udc/snps_udc_core.c 					udc_init_bna_dummy(ep->req);
req              1302 drivers/usb/gadget/udc/snps_udc_core.c 	complete_req(ep, req, -ECONNRESET);
req              2088 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request	*req;
req              2125 drivers/usb/gadget/udc/snps_udc_core.c 		req = list_entry(ep->queue.next,
req              2128 drivers/usb/gadget/udc/snps_udc_core.c 		req = NULL;
req              2131 drivers/usb/gadget/udc/snps_udc_core.c 	VDBG(dev, "req = %p\n", req);
req              2136 drivers/usb/gadget/udc/snps_udc_core.c 		if (req && udc_rxfifo_read(ep, req)) {
req              2140 drivers/usb/gadget/udc/snps_udc_core.c 			complete_req(ep, req, 0);
req              2143 drivers/usb/gadget/udc/snps_udc_core.c 				req = list_entry(ep->queue.next,
req              2146 drivers/usb/gadget/udc/snps_udc_core.c 				req = NULL;
req              2150 drivers/usb/gadget/udc/snps_udc_core.c 	} else if (!ep->cancel_transfer && req) {
req              2155 drivers/usb/gadget/udc/snps_udc_core.c 			dma_done = AMD_GETBITS(req->td_data->status,
req              2165 drivers/usb/gadget/udc/snps_udc_core.c 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
req              2168 drivers/usb/gadget/udc/snps_udc_core.c 				udc_init_bna_dummy(ep->req);
req              2170 drivers/usb/gadget/udc/snps_udc_core.c 			td = udc_get_last_dma_desc(req);
req              2177 drivers/usb/gadget/udc/snps_udc_core.c 				count = AMD_GETBITS(req->td_data->status,
req              2182 drivers/usb/gadget/udc/snps_udc_core.c 				VDBG(dev, "req->td_data=%p\n", req->td_data);
req              2187 drivers/usb/gadget/udc/snps_udc_core.c 					count = udc_get_ppbdu_rxbytes(req);
req              2192 drivers/usb/gadget/udc/snps_udc_core.c 					if (!count && req->req.length
req              2204 drivers/usb/gadget/udc/snps_udc_core.c 			tmp = req->req.length - req->req.actual;
req              2209 drivers/usb/gadget/udc/snps_udc_core.c 					req->req.status = -EOVERFLOW;
req              2213 drivers/usb/gadget/udc/snps_udc_core.c 			req->req.actual += count;
req              2214 drivers/usb/gadget/udc/snps_udc_core.c 			req->dma_going = 0;
req              2216 drivers/usb/gadget/udc/snps_udc_core.c 			complete_req(ep, req, 0);
req              2220 drivers/usb/gadget/udc/snps_udc_core.c 				req = list_entry(ep->queue.next,
req              2229 drivers/usb/gadget/udc/snps_udc_core.c 				if (req->dma_going == 0) {
req              2231 drivers/usb/gadget/udc/snps_udc_core.c 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
req              2234 drivers/usb/gadget/udc/snps_udc_core.c 					writel(req->td_phys,
req              2236 drivers/usb/gadget/udc/snps_udc_core.c 					req->dma_going = 1;
req              2303 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request *req;
req              2341 drivers/usb/gadget/udc/snps_udc_core.c 			req = list_entry(ep->queue.next,
req              2348 drivers/usb/gadget/udc/snps_udc_core.c 				td = udc_get_last_dma_desc(req);
req              2350 drivers/usb/gadget/udc/snps_udc_core.c 					req->req.actual = req->req.length;
req              2353 drivers/usb/gadget/udc/snps_udc_core.c 				req->req.actual = req->req.length;
req              2356 drivers/usb/gadget/udc/snps_udc_core.c 			if (req->req.actual == req->req.length) {
req              2358 drivers/usb/gadget/udc/snps_udc_core.c 				complete_req(ep, req, 0);
req              2359 drivers/usb/gadget/udc/snps_udc_core.c 				req->dma_going = 0;
req              2381 drivers/usb/gadget/udc/snps_udc_core.c 			req = list_entry(ep->queue.next,
req              2386 drivers/usb/gadget/udc/snps_udc_core.c 				udc_txfifo_write(ep, &req->req);
req              2387 drivers/usb/gadget/udc/snps_udc_core.c 				len = req->req.length - req->req.actual;
req              2390 drivers/usb/gadget/udc/snps_udc_core.c 				req->req.actual += len;
req              2391 drivers/usb/gadget/udc/snps_udc_core.c 				if (req->req.actual == req->req.length
req              2394 drivers/usb/gadget/udc/snps_udc_core.c 					complete_req(ep, req, 0);
req              2397 drivers/usb/gadget/udc/snps_udc_core.c 			} else if (req && !req->dma_going) {
req              2399 drivers/usb/gadget/udc/snps_udc_core.c 					req, req->td_data);
req              2400 drivers/usb/gadget/udc/snps_udc_core.c 				if (req->td_data) {
req              2402 drivers/usb/gadget/udc/snps_udc_core.c 					req->dma_going = 1;
req              2408 drivers/usb/gadget/udc/snps_udc_core.c 					if (use_dma_ppb && req->req.length >
req              2410 drivers/usb/gadget/udc/snps_udc_core.c 						req->td_data->status &=
req              2416 drivers/usb/gadget/udc/snps_udc_core.c 					writel(req->td_phys, &ep->regs->desptr);
req              2419 drivers/usb/gadget/udc/snps_udc_core.c 					req->td_data->status =
req              2421 drivers/usb/gadget/udc/snps_udc_core.c 						req->td_data->status,
req              2671 drivers/usb/gadget/udc/snps_udc_core.c 	struct udc_request *req;
req              2707 drivers/usb/gadget/udc/snps_udc_core.c 				req = list_entry(ep->queue.next,
req              2712 drivers/usb/gadget/udc/snps_udc_core.c 					writel(req->td_phys, &ep->regs->desptr);
req              2714 drivers/usb/gadget/udc/snps_udc_core.c 					req->td_data->status =
req              2716 drivers/usb/gadget/udc/snps_udc_core.c 						req->td_data->status,
req              2728 drivers/usb/gadget/udc/snps_udc_core.c 					req->req.actual = req->req.length;
req              2731 drivers/usb/gadget/udc/snps_udc_core.c 					complete_req(ep, req, 0);
req              2735 drivers/usb/gadget/udc/snps_udc_core.c 					udc_txfifo_write(ep, &req->req);
req              2738 drivers/usb/gadget/udc/snps_udc_core.c 					len = req->req.length - req->req.actual;
req              2742 drivers/usb/gadget/udc/snps_udc_core.c 					req->req.actual += len;
req              2743 drivers/usb/gadget/udc/snps_udc_core.c 					if (req->req.actual == req->req.length
req              2746 drivers/usb/gadget/udc/snps_udc_core.c 						complete_req(ep, req, 0);
req               214 drivers/usb/gadget/udc/trace.h 	TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
req               215 drivers/usb/gadget/udc/trace.h 	TP_ARGS(ep, req, ret),
req               228 drivers/usb/gadget/udc/trace.h 		__field(struct usb_request *, req)
req               232 drivers/usb/gadget/udc/trace.h 		__entry->length = req->length;
req               233 drivers/usb/gadget/udc/trace.h 		__entry->actual = req->actual;
req               234 drivers/usb/gadget/udc/trace.h 		__entry->num_sgs = req->num_sgs;
req               235 drivers/usb/gadget/udc/trace.h 		__entry->num_mapped_sgs = req->num_mapped_sgs;
req               236 drivers/usb/gadget/udc/trace.h 		__entry->stream_id = req->stream_id;
req               237 drivers/usb/gadget/udc/trace.h 		__entry->no_interrupt = req->no_interrupt;
req               238 drivers/usb/gadget/udc/trace.h 		__entry->zero = req->zero;
req               239 drivers/usb/gadget/udc/trace.h 		__entry->short_not_ok = req->short_not_ok;
req               240 drivers/usb/gadget/udc/trace.h 		__entry->status = req->status;
req               242 drivers/usb/gadget/udc/trace.h 		__entry->req = req;
req               245 drivers/usb/gadget/udc/trace.h 		__get_str(name),__entry->req,  __entry->actual, __entry->length,
req               255 drivers/usb/gadget/udc/trace.h 	TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
req               256 drivers/usb/gadget/udc/trace.h 	TP_ARGS(ep, req, ret)
req               260 drivers/usb/gadget/udc/trace.h 	TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
req               261 drivers/usb/gadget/udc/trace.h 	TP_ARGS(ep, req, ret)
req               265 drivers/usb/gadget/udc/trace.h 	TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
req               266 drivers/usb/gadget/udc/trace.h 	TP_ARGS(ep, req, ret)
req               270 drivers/usb/gadget/udc/trace.h 	TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
req               271 drivers/usb/gadget/udc/trace.h 	TP_ARGS(ep, req, ret)
req               275 drivers/usb/gadget/udc/trace.h 	TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
req               276 drivers/usb/gadget/udc/trace.h 	TP_ARGS(ep, req, ret)
req               107 drivers/usb/gadget/udc/udc-xilinx.c #define to_xusb_req(req) container_of((req), struct xusb_req, usb_req)
req               182 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req;
req               375 drivers/usb/gadget/udc/udc-xilinx.c static int xudc_dma_send(struct xusb_ep *ep, struct xusb_req *req,
req               383 drivers/usb/gadget/udc/udc-xilinx.c 	src = req->usb_req.dma + req->usb_req.actual;
req               384 drivers/usb/gadget/udc/udc-xilinx.c 	if (req->usb_req.length)
req               429 drivers/usb/gadget/udc/udc-xilinx.c static int xudc_dma_receive(struct xusb_ep *ep, struct xusb_req *req,
req               437 drivers/usb/gadget/udc/udc-xilinx.c 	dst = req->usb_req.dma + req->usb_req.actual;
req               478 drivers/usb/gadget/udc/udc-xilinx.c static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
req               489 drivers/usb/gadget/udc/udc-xilinx.c 			rc = xudc_dma_send(ep, req, bufferptr, bufferlen);
req               491 drivers/usb/gadget/udc/udc-xilinx.c 			rc = xudc_dma_receive(ep, req, bufferptr, bufferlen);
req               546 drivers/usb/gadget/udc/udc-xilinx.c static void xudc_done(struct xusb_ep *ep, struct xusb_req *req, int status)
req               550 drivers/usb/gadget/udc/udc-xilinx.c 	list_del_init(&req->queue);
req               552 drivers/usb/gadget/udc/udc-xilinx.c 	if (req->usb_req.status == -EINPROGRESS)
req               553 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.status = status;
req               555 drivers/usb/gadget/udc/udc-xilinx.c 		status = req->usb_req.status;
req               559 drivers/usb/gadget/udc/udc-xilinx.c 			ep->ep_usb.name, req, status);
req               561 drivers/usb/gadget/udc/udc-xilinx.c 	if (udc->dma_enabled && ep->epnumber && req->usb_req.length)
req               562 drivers/usb/gadget/udc/udc-xilinx.c 		usb_gadget_unmap_request(&udc->gadget, &req->usb_req,
req               565 drivers/usb/gadget/udc/udc-xilinx.c 	if (req->usb_req.complete) {
req               567 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.complete(&ep->ep_usb, &req->usb_req);
req               581 drivers/usb/gadget/udc/udc-xilinx.c static int xudc_read_fifo(struct xusb_ep *ep, struct xusb_req *req)
req               606 drivers/usb/gadget/udc/udc-xilinx.c 	buf = req->usb_req.buf + req->usb_req.actual;
req               608 drivers/usb/gadget/udc/udc-xilinx.c 	bufferspace = req->usb_req.length - req->usb_req.actual;
req               617 drivers/usb/gadget/udc/udc-xilinx.c 		if (req->usb_req.status != -EOVERFLOW)
req               620 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.status = -EOVERFLOW;
req               621 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_done(ep, req, -EOVERFLOW);
req               625 drivers/usb/gadget/udc/udc-xilinx.c 	ret = xudc_eptxrx(ep, req, buf, count);
req               628 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.actual += min(count, bufferspace);
req               630 drivers/usb/gadget/udc/udc-xilinx.c 			ep->ep_usb.name, count, is_short ? "/S" : "", req,
req               631 drivers/usb/gadget/udc/udc-xilinx.c 			req->usb_req.actual, req->usb_req.length);
req               634 drivers/usb/gadget/udc/udc-xilinx.c 		if ((req->usb_req.actual == req->usb_req.length) || is_short) {
req               635 drivers/usb/gadget/udc/udc-xilinx.c 			if (udc->dma_enabled && req->usb_req.length)
req               637 drivers/usb/gadget/udc/udc-xilinx.c 							req->usb_req.dma,
req               638 drivers/usb/gadget/udc/udc-xilinx.c 							req->usb_req.actual,
req               640 drivers/usb/gadget/udc/udc-xilinx.c 			xudc_done(ep, req, 0);
req               654 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_done(ep, req, -ECONNRESET);
req               671 drivers/usb/gadget/udc/udc-xilinx.c static int xudc_write_fifo(struct xusb_ep *ep, struct xusb_req *req)
req               682 drivers/usb/gadget/udc/udc-xilinx.c 	buf = req->usb_req.buf + req->usb_req.actual;
req               684 drivers/usb/gadget/udc/udc-xilinx.c 	length = req->usb_req.length - req->usb_req.actual;
req               687 drivers/usb/gadget/udc/udc-xilinx.c 	ret = xudc_eptxrx(ep, req, buf, length);
req               690 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.actual += length;
req               694 drivers/usb/gadget/udc/udc-xilinx.c 			if (likely(req->usb_req.length !=
req               695 drivers/usb/gadget/udc/udc-xilinx.c 				   req->usb_req.actual) || req->usb_req.zero)
req               703 drivers/usb/gadget/udc/udc-xilinx.c 			req->usb_req.length - req->usb_req.actual, req);
req               706 drivers/usb/gadget/udc/udc-xilinx.c 			xudc_done(ep, req, 0);
req               716 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_done(ep, req, -ECONNRESET);
req               731 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req;
req               734 drivers/usb/gadget/udc/udc-xilinx.c 		req = list_first_entry(&ep->queue, struct xusb_req, queue);
req               735 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_done(ep, req, status);
req               966 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req;
req               968 drivers/usb/gadget/udc/udc-xilinx.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               969 drivers/usb/gadget/udc/udc-xilinx.c 	if (!req)
req               972 drivers/usb/gadget/udc/udc-xilinx.c 	req->ep = ep;
req               973 drivers/usb/gadget/udc/udc-xilinx.c 	INIT_LIST_HEAD(&req->queue);
req               974 drivers/usb/gadget/udc/udc-xilinx.c 	return &req->usb_req;
req               984 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req = to_xusb_req(_req);
req               986 drivers/usb/gadget/udc/udc-xilinx.c 	kfree(req);
req               996 drivers/usb/gadget/udc/udc-xilinx.c static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
req              1011 drivers/usb/gadget/udc/udc-xilinx.c 	req->usb_req.status = -EINPROGRESS;
req              1012 drivers/usb/gadget/udc/udc-xilinx.c 	req->usb_req.actual = 0;
req              1014 drivers/usb/gadget/udc/udc-xilinx.c 	list_add_tail(&req->queue, &ep0->queue);
req              1017 drivers/usb/gadget/udc/udc-xilinx.c 		prefetch(req->usb_req.buf);
req              1018 drivers/usb/gadget/udc/udc-xilinx.c 		length = req->usb_req.length;
req              1021 drivers/usb/gadget/udc/udc-xilinx.c 		length = req->usb_req.actual = min_t(u32, length,
req              1023 drivers/usb/gadget/udc/udc-xilinx.c 		memcpy(corebuf, req->usb_req.buf, length);
req              1050 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req	= to_xusb_req(_req);
req              1057 drivers/usb/gadget/udc/udc-xilinx.c 	ret = __xudc_ep0_queue(ep0, req);
req              1074 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req = to_xusb_req(_req);
req              1097 drivers/usb/gadget/udc/udc-xilinx.c 		ret = usb_gadget_map_request(&udc->gadget, &req->usb_req,
req              1110 drivers/usb/gadget/udc/udc-xilinx.c 			if (!xudc_write_fifo(ep, req))
req              1111 drivers/usb/gadget/udc/udc-xilinx.c 				req = NULL;
req              1114 drivers/usb/gadget/udc/udc-xilinx.c 			if (!xudc_read_fifo(ep, req))
req              1115 drivers/usb/gadget/udc/udc-xilinx.c 				req = NULL;
req              1119 drivers/usb/gadget/udc/udc-xilinx.c 	if (req != NULL)
req              1120 drivers/usb/gadget/udc/udc-xilinx.c 		list_add_tail(&req->queue, &ep->queue);
req              1136 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req	= to_xusb_req(_req);
req              1142 drivers/usb/gadget/udc/udc-xilinx.c 	list_for_each_entry(req, &ep->queue, queue) {
req              1143 drivers/usb/gadget/udc/udc-xilinx.c 		if (&req->usb_req == _req)
req              1146 drivers/usb/gadget/udc/udc-xilinx.c 	if (&req->usb_req != _req) {
req              1150 drivers/usb/gadget/udc/udc-xilinx.c 	xudc_done(ep, req, -ECONNRESET);
req              1576 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req	= udc->req;
req              1579 drivers/usb/gadget/udc/udc-xilinx.c 	req->usb_req.length = 0;
req              1580 drivers/usb/gadget/udc/udc-xilinx.c 	ret = __xudc_ep0_queue(ep0, req);
req              1597 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req	= udc->req;
req              1633 drivers/usb/gadget/udc/udc-xilinx.c 	req->usb_req.length = 2;
req              1634 drivers/usb/gadget/udc/udc-xilinx.c 	*(u16 *)req->usb_req.buf = cpu_to_le16(status);
req              1635 drivers/usb/gadget/udc/udc-xilinx.c 	ret = __xudc_ep0_queue(ep0, req);
req              1652 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req	= udc->req;
req              1720 drivers/usb/gadget/udc/udc-xilinx.c 	req->usb_req.length = 0;
req              1721 drivers/usb/gadget/udc/udc-xilinx.c 	ret = __xudc_ep0_queue(ep0, req);
req              1804 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req;
req              1809 drivers/usb/gadget/udc/udc-xilinx.c 	req = list_first_entry(&ep0->queue, struct xusb_req, queue);
req              1819 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.actual = req->usb_req.length;
req              1820 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_done(ep0, req, 0);
req              1828 drivers/usb/gadget/udc/udc-xilinx.c 		buffer = req->usb_req.buf + req->usb_req.actual;
req              1829 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
req              1832 drivers/usb/gadget/udc/udc-xilinx.c 		if (req->usb_req.length == req->usb_req.actual) {
req              1853 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req;
req              1862 drivers/usb/gadget/udc/udc-xilinx.c 	req = list_first_entry(&ep0->queue, struct xusb_req, queue);
req              1863 drivers/usb/gadget/udc/udc-xilinx.c 	bytes_to_tx = req->usb_req.length - req->usb_req.actual;
req              1884 drivers/usb/gadget/udc/udc-xilinx.c 		req->usb_req.actual = req->usb_req.length;
req              1885 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_done(ep0, req, 0);
req              1904 drivers/usb/gadget/udc/udc-xilinx.c 			buffer = req->usb_req.buf + req->usb_req.actual;
req              1905 drivers/usb/gadget/udc/udc-xilinx.c 			req->usb_req.actual = req->usb_req.actual + length;
req              1949 drivers/usb/gadget/udc/udc-xilinx.c 	struct xusb_req *req;
req              1962 drivers/usb/gadget/udc/udc-xilinx.c 	req = list_first_entry(&ep->queue, struct xusb_req, queue);
req              1965 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_write_fifo(ep, req);
req              1967 drivers/usb/gadget/udc/udc-xilinx.c 		xudc_read_fifo(ep, req);
req              2059 drivers/usb/gadget/udc/udc-xilinx.c 	udc->req = devm_kzalloc(&pdev->dev, sizeof(struct xusb_req),
req              2061 drivers/usb/gadget/udc/udc-xilinx.c 	if (!udc->req)
req              2068 drivers/usb/gadget/udc/udc-xilinx.c 	udc->req->usb_req.buf = buff;
req               133 drivers/usb/host/xhci-dbgcap.c static void xhci_dbc_giveback(struct dbc_request *req, int status)
req               137 drivers/usb/host/xhci-dbgcap.c 	struct dbc_ep		*dep = req->dep;
req               142 drivers/usb/host/xhci-dbgcap.c 	list_del_init(&req->list_pending);
req               143 drivers/usb/host/xhci-dbgcap.c 	req->trb_dma = 0;
req               144 drivers/usb/host/xhci-dbgcap.c 	req->trb = NULL;
req               146 drivers/usb/host/xhci-dbgcap.c 	if (req->status == -EINPROGRESS)
req               147 drivers/usb/host/xhci-dbgcap.c 		req->status = status;
req               149 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_giveback_request(req);
req               152 drivers/usb/host/xhci-dbgcap.c 			 req->dma,
req               153 drivers/usb/host/xhci-dbgcap.c 			 req->length,
req               158 drivers/usb/host/xhci-dbgcap.c 	req->complete(xhci, req);
req               162 drivers/usb/host/xhci-dbgcap.c static void xhci_dbc_flush_single_request(struct dbc_request *req)
req               164 drivers/usb/host/xhci-dbgcap.c 	union xhci_trb	*trb = req->trb;
req               172 drivers/usb/host/xhci-dbgcap.c 	xhci_dbc_giveback(req, -ESHUTDOWN);
req               177 drivers/usb/host/xhci-dbgcap.c 	struct dbc_request	*req, *tmp;
req               179 drivers/usb/host/xhci-dbgcap.c 	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
req               180 drivers/usb/host/xhci-dbgcap.c 		xhci_dbc_flush_single_request(req);
req               192 drivers/usb/host/xhci-dbgcap.c 	struct dbc_request	*req;
req               194 drivers/usb/host/xhci-dbgcap.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               195 drivers/usb/host/xhci-dbgcap.c 	if (!req)
req               198 drivers/usb/host/xhci-dbgcap.c 	req->dep = dep;
req               199 drivers/usb/host/xhci-dbgcap.c 	INIT_LIST_HEAD(&req->list_pending);
req               200 drivers/usb/host/xhci-dbgcap.c 	INIT_LIST_HEAD(&req->list_pool);
req               201 drivers/usb/host/xhci-dbgcap.c 	req->direction = dep->direction;
req               203 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_alloc_request(req);
req               205 drivers/usb/host/xhci-dbgcap.c 	return req;
req               209 drivers/usb/host/xhci-dbgcap.c dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
req               211 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_free_request(req);
req               213 drivers/usb/host/xhci-dbgcap.c 	kfree(req);
req               240 drivers/usb/host/xhci-dbgcap.c 				  struct dbc_request *req)
req               249 drivers/usb/host/xhci-dbgcap.c 	num_trbs = count_trbs(req->dma, req->length);
req               254 drivers/usb/host/xhci-dbgcap.c 	addr	= req->dma;
req               257 drivers/usb/host/xhci-dbgcap.c 	length	= TRB_LEN(req->length);
req               265 drivers/usb/host/xhci-dbgcap.c 	req->trb = ring->enqueue;
req               266 drivers/usb/host/xhci-dbgcap.c 	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
req               289 drivers/usb/host/xhci-dbgcap.c dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
req               298 drivers/usb/host/xhci-dbgcap.c 	if (!req->length || !req->buf)
req               301 drivers/usb/host/xhci-dbgcap.c 	req->actual		= 0;
req               302 drivers/usb/host/xhci-dbgcap.c 	req->status		= -EINPROGRESS;
req               304 drivers/usb/host/xhci-dbgcap.c 	req->dma = dma_map_single(dev,
req               305 drivers/usb/host/xhci-dbgcap.c 				  req->buf,
req               306 drivers/usb/host/xhci-dbgcap.c 				  req->length,
req               308 drivers/usb/host/xhci-dbgcap.c 	if (dma_mapping_error(dev, req->dma)) {
req               313 drivers/usb/host/xhci-dbgcap.c 	ret = xhci_dbc_queue_bulk_tx(dep, req);
req               317 drivers/usb/host/xhci-dbgcap.c 				 req->dma,
req               318 drivers/usb/host/xhci-dbgcap.c 				 req->length,
req               323 drivers/usb/host/xhci-dbgcap.c 	list_add_tail(&req->list_pending, &dep->list_pending);
req               328 drivers/usb/host/xhci-dbgcap.c int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
req               337 drivers/usb/host/xhci-dbgcap.c 		ret = dbc_ep_do_queue(dep, req);
req               342 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_queue_request(req);
req               598 drivers/usb/host/xhci-dbgcap.c 	struct dbc_request	*req = NULL, *r;
req               630 drivers/usb/host/xhci-dbgcap.c 			req = r;
req               635 drivers/usb/host/xhci-dbgcap.c 	if (!req) {
req               640 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
req               643 drivers/usb/host/xhci-dbgcap.c 	req->actual = req->length - remain_length;
req               644 drivers/usb/host/xhci-dbgcap.c 	xhci_dbc_giveback(req, status);
req                92 drivers/usb/host/xhci-dbgcap.h 						    struct dbc_request *req);
req               203 drivers/usb/host/xhci-dbgcap.h void dbc_free_request(struct dbc_ep *dep, struct dbc_request *req);
req               204 drivers/usb/host/xhci-dbgcap.h int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, gfp_t gfp_flags);
req                35 drivers/usb/host/xhci-dbgtty.c 	struct dbc_request	*req;
req                41 drivers/usb/host/xhci-dbgtty.c 		req = list_entry(pool->next, struct dbc_request, list_pool);
req                42 drivers/usb/host/xhci-dbgtty.c 		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
req                47 drivers/usb/host/xhci-dbgtty.c 		req->length = len;
req                48 drivers/usb/host/xhci-dbgtty.c 		list_del(&req->list_pool);
req                51 drivers/usb/host/xhci-dbgtty.c 		status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
req                55 drivers/usb/host/xhci-dbgtty.c 			list_add(&req->list_pool, pool);
req                70 drivers/usb/host/xhci-dbgtty.c 	struct dbc_request	*req;
req                78 drivers/usb/host/xhci-dbgtty.c 		req = list_entry(pool->next, struct dbc_request, list_pool);
req                79 drivers/usb/host/xhci-dbgtty.c 		list_del(&req->list_pool);
req                80 drivers/usb/host/xhci-dbgtty.c 		req->length = DBC_MAX_PACKET;
req                83 drivers/usb/host/xhci-dbgtty.c 		status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
req                87 drivers/usb/host/xhci-dbgtty.c 			list_add(&req->list_pool, pool);
req                94 drivers/usb/host/xhci-dbgtty.c dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
req               101 drivers/usb/host/xhci-dbgtty.c 	list_add_tail(&req->list_pool, &port->read_queue);
req               106 drivers/usb/host/xhci-dbgtty.c static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
req               113 drivers/usb/host/xhci-dbgtty.c 	list_add(&req->list_pool, &port->write_pool);
req               114 drivers/usb/host/xhci-dbgtty.c 	switch (req->status) {
req               122 drivers/usb/host/xhci-dbgtty.c 			  req->status);
req               128 drivers/usb/host/xhci-dbgtty.c static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
req               130 drivers/usb/host/xhci-dbgtty.c 	kfree(req->buf);
req               131 drivers/usb/host/xhci-dbgtty.c 	dbc_free_request(dep, req);
req               139 drivers/usb/host/xhci-dbgtty.c 	struct dbc_request	*req;
req               142 drivers/usb/host/xhci-dbgtty.c 		req = dbc_alloc_request(dep, GFP_KERNEL);
req               143 drivers/usb/host/xhci-dbgtty.c 		if (!req)
req               146 drivers/usb/host/xhci-dbgtty.c 		req->length = DBC_MAX_PACKET;
req               147 drivers/usb/host/xhci-dbgtty.c 		req->buf = kmalloc(req->length, GFP_KERNEL);
req               148 drivers/usb/host/xhci-dbgtty.c 		if (!req->buf) {
req               149 drivers/usb/host/xhci-dbgtty.c 			dbc_free_request(dep, req);
req               153 drivers/usb/host/xhci-dbgtty.c 		req->complete = fn;
req               154 drivers/usb/host/xhci-dbgtty.c 		list_add_tail(&req->list_pool, head);
req               163 drivers/usb/host/xhci-dbgtty.c 	struct dbc_request	*req;
req               166 drivers/usb/host/xhci-dbgtty.c 		req = list_entry(head->next, struct dbc_request, list_pool);
req               167 drivers/usb/host/xhci-dbgtty.c 		list_del(&req->list_pool);
req               168 drivers/usb/host/xhci-dbgtty.c 		xhci_dbc_free_req(dep, req);
req               333 drivers/usb/host/xhci-dbgtty.c 	struct dbc_request	*req;
req               344 drivers/usb/host/xhci-dbgtty.c 		req = list_first_entry(queue, struct dbc_request, list_pool);
req               349 drivers/usb/host/xhci-dbgtty.c 		switch (req->status) {
req               357 drivers/usb/host/xhci-dbgtty.c 				req->status);
req               361 drivers/usb/host/xhci-dbgtty.c 		if (req->actual) {
req               362 drivers/usb/host/xhci-dbgtty.c 			char		*packet = req->buf;
req               363 drivers/usb/host/xhci-dbgtty.c 			unsigned int	n, size = req->actual;
req               383 drivers/usb/host/xhci-dbgtty.c 		list_move(&req->list_pool, &port->read_pool);
req               553 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct dbc_request *req),
req               554 drivers/usb/host/xhci-trace.h 	TP_ARGS(req),
req               556 drivers/usb/host/xhci-trace.h 		__field(struct dbc_request *, req)
req               563 drivers/usb/host/xhci-trace.h 		__entry->req = req;
req               564 drivers/usb/host/xhci-trace.h 		__entry->dir = req->direction;
req               565 drivers/usb/host/xhci-trace.h 		__entry->actual = req->actual;
req               566 drivers/usb/host/xhci-trace.h 		__entry->length = req->length;
req               567 drivers/usb/host/xhci-trace.h 		__entry->status = req->status;
req               571 drivers/usb/host/xhci-trace.h 		__entry->req, __entry->actual,
req               577 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct dbc_request *req),
req               578 drivers/usb/host/xhci-trace.h 	TP_ARGS(req)
req               582 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct dbc_request *req),
req               583 drivers/usb/host/xhci-trace.h 	TP_ARGS(req)
req               587 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct dbc_request *req),
req               588 drivers/usb/host/xhci-trace.h 	TP_ARGS(req)
req               592 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct dbc_request *req),
req               593 drivers/usb/host/xhci-trace.h 	TP_ARGS(req)
req                27 drivers/usb/isp1760/isp1760-udc.c 	struct usb_request req;
req                43 drivers/usb/isp1760/isp1760-udc.c static inline struct isp1760_request *req_to_udc_req(struct usb_request *req)
req                45 drivers/usb/isp1760/isp1760-udc.c 	return container_of(req, struct isp1760_request, req);
req               124 drivers/usb/isp1760/isp1760-udc.c 					 struct isp1760_request *req,
req               131 drivers/usb/isp1760/isp1760-udc.c 		req, status);
req               133 drivers/usb/isp1760/isp1760-udc.c 	req->ep = NULL;
req               134 drivers/usb/isp1760/isp1760-udc.c 	req->req.status = status;
req               135 drivers/usb/isp1760/isp1760-udc.c 	req->req.complete(&ep->ep, &req->req);
req               177 drivers/usb/isp1760/isp1760-udc.c 				struct isp1760_request *req)
req               188 drivers/usb/isp1760/isp1760-udc.c 		__func__, len, req->req.actual, req->req.length);
req               190 drivers/usb/isp1760/isp1760-udc.c 	len = min(len, req->req.length - req->req.actual);
req               205 drivers/usb/isp1760/isp1760-udc.c 	buf = req->req.buf + req->req.actual;
req               216 drivers/usb/isp1760/isp1760-udc.c 	req->req.actual += len;
req               225 drivers/usb/isp1760/isp1760-udc.c 		__func__, req, req->req.actual, req->req.length, ep->maxpacket,
req               234 drivers/usb/isp1760/isp1760-udc.c 	if (req->req.actual == req->req.length || len < ep->maxpacket) {
req               235 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
req               243 drivers/usb/isp1760/isp1760-udc.c 				 struct isp1760_request *req)
req               246 drivers/usb/isp1760/isp1760-udc.c 	u32 *buf = req->req.buf + req->req.actual;
req               249 drivers/usb/isp1760/isp1760-udc.c 	req->packet_size = min(req->req.length - req->req.actual,
req               253 drivers/usb/isp1760/isp1760-udc.c 		__func__, req->packet_size, req->req.actual,
req               254 drivers/usb/isp1760/isp1760-udc.c 		req->req.length);
req               258 drivers/usb/isp1760/isp1760-udc.c 	if (req->packet_size)
req               259 drivers/usb/isp1760/isp1760-udc.c 		isp1760_udc_write(udc, DC_BUFLEN, req->packet_size);
req               267 drivers/usb/isp1760/isp1760-udc.c 	for (i = req->packet_size; i > 2; i -= 4, ++buf)
req               274 drivers/usb/isp1760/isp1760-udc.c 	if (!req->packet_size)
req               281 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req;
req               308 drivers/usb/isp1760/isp1760-udc.c 	req = list_first_entry(&ep->queue, struct isp1760_request,
req               310 drivers/usb/isp1760/isp1760-udc.c 	complete = isp1760_udc_receive(ep, req);
req               315 drivers/usb/isp1760/isp1760-udc.c 		isp1760_udc_request_complete(ep, req, 0);
req               322 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req;
req               352 drivers/usb/isp1760/isp1760-udc.c 	req = list_first_entry(&ep->queue, struct isp1760_request,
req               354 drivers/usb/isp1760/isp1760-udc.c 	req->req.actual += req->packet_size;
req               356 drivers/usb/isp1760/isp1760-udc.c 	need_zlp = req->req.actual == req->req.length &&
req               357 drivers/usb/isp1760/isp1760-udc.c 		   !(req->req.length % ep->maxpacket) &&
req               358 drivers/usb/isp1760/isp1760-udc.c 		   req->packet_size && req->req.zero;
req               362 drivers/usb/isp1760/isp1760-udc.c 		 req, req->req.actual, req->req.length, ep->maxpacket,
req               363 drivers/usb/isp1760/isp1760-udc.c 		 req->packet_size, req->req.zero, need_zlp);
req               369 drivers/usb/isp1760/isp1760-udc.c 	if (req->req.actual == req->req.length && !need_zlp) {
req               370 drivers/usb/isp1760/isp1760-udc.c 		complete = req;
req               371 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
req               377 drivers/usb/isp1760/isp1760-udc.c 			req = list_first_entry(&ep->queue,
req               380 drivers/usb/isp1760/isp1760-udc.c 			req = NULL;
req               389 drivers/usb/isp1760/isp1760-udc.c 	if (req)
req               390 drivers/usb/isp1760/isp1760-udc.c 		isp1760_udc_transmit(ep, req);
req               433 drivers/usb/isp1760/isp1760-udc.c 			struct isp1760_request *req;
req               435 drivers/usb/isp1760/isp1760-udc.c 			req = list_first_entry(&ep->queue,
req               437 drivers/usb/isp1760/isp1760-udc.c 			isp1760_udc_transmit(ep, req);
req               451 drivers/usb/isp1760/isp1760-udc.c 				  const struct usb_ctrlrequest *req)
req               456 drivers/usb/isp1760/isp1760-udc.c 	if (req->wLength != cpu_to_le16(2) || req->wValue != cpu_to_le16(0))
req               459 drivers/usb/isp1760/isp1760-udc.c 	switch (req->bRequestType) {
req               469 drivers/usb/isp1760/isp1760-udc.c 		ep = isp1760_udc_find_ep(udc, le16_to_cpu(req->wIndex));
req               521 drivers/usb/isp1760/isp1760-udc.c 				       struct usb_ctrlrequest *req)
req               525 drivers/usb/isp1760/isp1760-udc.c 	switch (req->bRequest) {
req               527 drivers/usb/isp1760/isp1760-udc.c 		return isp1760_udc_get_status(udc, req);
req               530 drivers/usb/isp1760/isp1760-udc.c 		switch (req->bRequestType) {
req               537 drivers/usb/isp1760/isp1760-udc.c 			u16 index = le16_to_cpu(req->wIndex);
req               540 drivers/usb/isp1760/isp1760-udc.c 			if (req->wLength != cpu_to_le16(0) ||
req               541 drivers/usb/isp1760/isp1760-udc.c 			    req->wValue != cpu_to_le16(USB_ENDPOINT_HALT))
req               574 drivers/usb/isp1760/isp1760-udc.c 		switch (req->bRequestType) {
req               581 drivers/usb/isp1760/isp1760-udc.c 			u16 index = le16_to_cpu(req->wIndex);
req               584 drivers/usb/isp1760/isp1760-udc.c 			if (req->wLength != cpu_to_le16(0) ||
req               585 drivers/usb/isp1760/isp1760-udc.c 			    req->wValue != cpu_to_le16(USB_ENDPOINT_HALT))
req               609 drivers/usb/isp1760/isp1760-udc.c 		if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
req               612 drivers/usb/isp1760/isp1760-udc.c 		return isp1760_udc_set_address(udc, le16_to_cpu(req->wValue));
req               615 drivers/usb/isp1760/isp1760-udc.c 		if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
req               622 drivers/usb/isp1760/isp1760-udc.c 		stall = udc->driver->setup(&udc->gadget, req) < 0;
req               626 drivers/usb/isp1760/isp1760-udc.c 		usb_gadget_set_state(&udc->gadget, req->wValue ?
req               638 drivers/usb/isp1760/isp1760-udc.c 		return udc->driver->setup(&udc->gadget, req) < 0;
req               647 drivers/usb/isp1760/isp1760-udc.c 	} req;
req               656 drivers/usb/isp1760/isp1760-udc.c 	if (count != sizeof(req)) {
req               666 drivers/usb/isp1760/isp1760-udc.c 	req.data[0] = isp1760_udc_read(udc, DC_DATAPORT);
req               667 drivers/usb/isp1760/isp1760-udc.c 	req.data[1] = isp1760_udc_read(udc, DC_DATAPORT);
req               676 drivers/usb/isp1760/isp1760-udc.c 	if (!req.r.wLength)
req               678 drivers/usb/isp1760/isp1760-udc.c 	else if (req.r.bRequestType & USB_DIR_IN)
req               683 drivers/usb/isp1760/isp1760-udc.c 	udc->ep0_dir = req.r.bRequestType & USB_DIR_IN;
req               684 drivers/usb/isp1760/isp1760-udc.c 	udc->ep0_length = le16_to_cpu(req.r.wLength);
req               690 drivers/usb/isp1760/isp1760-udc.c 		__func__, req.r.bRequestType, req.r.bRequest,
req               691 drivers/usb/isp1760/isp1760-udc.c 		le16_to_cpu(req.r.wValue), le16_to_cpu(req.r.wIndex),
req               692 drivers/usb/isp1760/isp1760-udc.c 		le16_to_cpu(req.r.wLength));
req               694 drivers/usb/isp1760/isp1760-udc.c 	if ((req.r.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
req               695 drivers/usb/isp1760/isp1760-udc.c 		stall = isp1760_ep0_setup_standard(udc, &req.r);
req               697 drivers/usb/isp1760/isp1760-udc.c 		stall = udc->driver->setup(&udc->gadget, &req.r) < 0;
req               772 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req, *nreq;
req               798 drivers/usb/isp1760/isp1760-udc.c 	list_for_each_entry_safe(req, nreq, &req_list, queue) {
req               799 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
req               800 drivers/usb/isp1760/isp1760-udc.c 		isp1760_udc_request_complete(uep, req, -ESHUTDOWN);
req               809 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req;
req               811 drivers/usb/isp1760/isp1760-udc.c 	req = kzalloc(sizeof(*req), gfp_flags);
req               812 drivers/usb/isp1760/isp1760-udc.c 	if (!req)
req               815 drivers/usb/isp1760/isp1760-udc.c 	return &req->req;
req               820 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req = req_to_udc_req(_req);
req               822 drivers/usb/isp1760/isp1760-udc.c 	kfree(req);
req               828 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req = req_to_udc_req(_req);
req               844 drivers/usb/isp1760/isp1760-udc.c 	req->ep = uep;
req               851 drivers/usb/isp1760/isp1760-udc.c 				__func__, _req->length, req);
req               859 drivers/usb/isp1760/isp1760-udc.c 				__func__, req);
req               861 drivers/usb/isp1760/isp1760-udc.c 			list_add_tail(&req->queue, &uep->queue);
req               862 drivers/usb/isp1760/isp1760-udc.c 			isp1760_udc_transmit(uep, req);
req               866 drivers/usb/isp1760/isp1760-udc.c 			list_add_tail(&req->queue, &uep->queue);
req               884 drivers/usb/isp1760/isp1760-udc.c 		list_add_tail(&req->queue, &uep->queue);
req               886 drivers/usb/isp1760/isp1760-udc.c 			isp1760_udc_transmit(uep, req);
req               888 drivers/usb/isp1760/isp1760-udc.c 			complete = isp1760_udc_receive(uep, req);
req               898 drivers/usb/isp1760/isp1760-udc.c 		req->ep = NULL;
req               903 drivers/usb/isp1760/isp1760-udc.c 		isp1760_udc_request_complete(uep, req, 0);
req               910 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_request *req = req_to_udc_req(_req);
req               919 drivers/usb/isp1760/isp1760-udc.c 	if (req->ep != uep)
req               920 drivers/usb/isp1760/isp1760-udc.c 		req = NULL;
req               922 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
req               926 drivers/usb/isp1760/isp1760-udc.c 	if (!req)
req               929 drivers/usb/isp1760/isp1760-udc.c 	isp1760_udc_request_complete(uep, req, -ECONNRESET);
req                38 drivers/usb/misc/isight_firmware.c 	int llen, len, req, ret = 0;
req                68 drivers/usb/misc/isight_firmware.c 		req = (data[2] << 8 | data[3]);
req                76 drivers/usb/misc/isight_firmware.c 		for (; len > 0; req += 50) {
req                90 drivers/usb/misc/isight_firmware.c 			    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, req, 0,
req               590 drivers/usb/misc/usbtest.c 	struct usb_sg_request *req;
req               597 drivers/usb/misc/usbtest.c 	usb_sg_cancel(timeout->req);
req               604 drivers/usb/misc/usbtest.c 	struct usb_sg_request	*req,
req               612 drivers/usb/misc/usbtest.c 		.req = req,
req               618 drivers/usb/misc/usbtest.c 		retval = usb_sg_init(req, udev, pipe,
req               628 drivers/usb/misc/usbtest.c 		usb_sg_wait(req);
req               632 drivers/usb/misc/usbtest.c 			retval = req->status;
req              1231 drivers/usb/misc/usbtest.c 		struct usb_ctrlrequest	req;
req              1244 drivers/usb/misc/usbtest.c 		memset(&req, 0, sizeof(req));
req              1245 drivers/usb/misc/usbtest.c 		req.bRequest = USB_REQ_GET_DESCRIPTOR;
req              1246 drivers/usb/misc/usbtest.c 		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
req              1250 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
req              1254 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
req              1258 drivers/usb/misc/usbtest.c 			req.bRequest = USB_REQ_GET_INTERFACE;
req              1259 drivers/usb/misc/usbtest.c 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
req              1265 drivers/usb/misc/usbtest.c 			req.bRequest = USB_REQ_GET_STATUS;
req              1266 drivers/usb/misc/usbtest.c 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
req              1271 drivers/usb/misc/usbtest.c 			req.bRequest = USB_REQ_GET_STATUS;
req              1272 drivers/usb/misc/usbtest.c 			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
req              1276 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
req              1282 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
req              1287 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
req              1295 drivers/usb/misc/usbtest.c 			req.bRequest = USB_REQ_CLEAR_FEATURE;
req              1296 drivers/usb/misc/usbtest.c 			req.bRequestType = USB_RECIP_ENDPOINT;
req              1304 drivers/usb/misc/usbtest.c 			req.bRequest = USB_REQ_GET_STATUS;
req              1305 drivers/usb/misc/usbtest.c 			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
req              1310 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
req              1316 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
req              1323 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
req              1330 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
req              1339 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
req              1355 drivers/usb/misc/usbtest.c 			req.wValue = cpu_to_le16(USB_DT_BOS << 8);
req              1368 drivers/usb/misc/usbtest.c 		req.wLength = cpu_to_le16(len);
req              1376 drivers/usb/misc/usbtest.c 		reqp->setup = req;
req              2178 drivers/usb/misc/usbtest.c 	struct usb_sg_request	req;
req              2282 drivers/usb/misc/usbtest.c 				&req, sg, param->sglen);
req              2301 drivers/usb/misc/usbtest.c 				&req, sg, param->sglen);
req              2319 drivers/usb/misc/usbtest.c 				&req, sg, param->sglen);
req              2337 drivers/usb/misc/usbtest.c 				&req, sg, param->sglen);
req               380 drivers/usb/mtu3/mtu3.h static inline struct mtu3_request *to_mtu3_request(struct usb_request *req)
req               382 drivers/usb/mtu3/mtu3.h 	return req ? container_of(req, struct mtu3_request, request) : NULL;
req               424 drivers/usb/mtu3/mtu3.h void mtu3_free_request(struct usb_ep *ep, struct usb_request *req);
req               426 drivers/usb/mtu3/mtu3.h 		struct usb_request *req, int status);
req                14 drivers/usb/mtu3/mtu3_gadget.c 		     struct usb_request *req, int status)
req                22 drivers/usb/mtu3/mtu3_gadget.c 	mreq = to_mtu3_request(req);
req                35 drivers/usb/mtu3/mtu3_gadget.c 		usb_gadget_unmap_request(&mtu->g, req, mep->is_in);
req                38 drivers/usb/mtu3/mtu3_gadget.c 		req, req->status, mreq->request.actual, mreq->request.length);
req               255 drivers/usb/mtu3/mtu3_gadget.c void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
req               257 drivers/usb/mtu3/mtu3_gadget.c 	struct mtu3_request *mreq = to_mtu3_request(req);
req               264 drivers/usb/mtu3/mtu3_gadget.c 		struct usb_request *req, gfp_t gfp_flags)
req               272 drivers/usb/mtu3/mtu3_gadget.c 	if (!ep || !req)
req               275 drivers/usb/mtu3/mtu3_gadget.c 	if (!req->buf)
req               280 drivers/usb/mtu3/mtu3_gadget.c 	mreq = to_mtu3_request(req);
req               290 drivers/usb/mtu3/mtu3_gadget.c 	if (req->length > GPD_BUF_SIZE ||
req               291 drivers/usb/mtu3/mtu3_gadget.c 	    (mtu->gen2cp && req->length > GPD_BUF_SIZE_EL)) {
req               295 drivers/usb/mtu3/mtu3_gadget.c 			req->length);
req               302 drivers/usb/mtu3/mtu3_gadget.c 			req, ep->name);
req               309 drivers/usb/mtu3/mtu3_gadget.c 	ret = usb_gadget_map_request(&mtu->g, req, mep->is_in);
req               333 drivers/usb/mtu3/mtu3_gadget.c static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
req               336 drivers/usb/mtu3/mtu3_gadget.c 	struct mtu3_request *mreq = to_mtu3_request(req);
req               342 drivers/usb/mtu3/mtu3_gadget.c 	if (!ep || !req || mreq->mep != mep)
req               345 drivers/usb/mtu3/mtu3_gadget.c 	dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
req               355 drivers/usb/mtu3/mtu3_gadget.c 		dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name);
req               361 drivers/usb/mtu3/mtu3_gadget.c 	mtu3_req_complete(mep, req, -ECONNRESET);
req                57 drivers/usb/mtu3/mtu3_gadget_ep0.c static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req)
req                59 drivers/usb/mtu3/mtu3_gadget_ep0.c 	mtu3_req_complete(mtu->ep0, req, 0);
req               158 drivers/usb/mtu3/mtu3_gadget_ep0.c static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
req               161 drivers/usb/mtu3/mtu3_gadget_ep0.c static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req)
req               167 drivers/usb/mtu3/mtu3_gadget_ep0.c 	memcpy(&sel, req->buf, sizeof(sel));
req               169 drivers/usb/mtu3/mtu3_gadget_ep0.c 	mreq = to_mtu3_request(req);
req               504 drivers/usb/mtu3/mtu3_gadget_ep0.c 	struct usb_request *req;
req               514 drivers/usb/mtu3/mtu3_gadget_ep0.c 	req = &mreq->request;
req               517 drivers/usb/mtu3/mtu3_gadget_ep0.c 	if (req) {
req               518 drivers/usb/mtu3/mtu3_gadget_ep0.c 		void *buf = req->buf + req->actual;
req               519 drivers/usb/mtu3/mtu3_gadget_ep0.c 		unsigned int len = req->length - req->actual;
req               524 drivers/usb/mtu3/mtu3_gadget_ep0.c 			req->status = -EOVERFLOW;
req               528 drivers/usb/mtu3/mtu3_gadget_ep0.c 		req->actual += count;
req               532 drivers/usb/mtu3/mtu3_gadget_ep0.c 		if (count < maxp || req->actual == req->length) {
req               539 drivers/usb/mtu3/mtu3_gadget_ep0.c 			req = NULL;
req               549 drivers/usb/mtu3/mtu3_gadget_ep0.c 	if (req)
req               550 drivers/usb/mtu3/mtu3_gadget_ep0.c 		ep0_req_giveback(mtu, req);
req               558 drivers/usb/mtu3/mtu3_gadget_ep0.c 	struct usb_request *req;
req               570 drivers/usb/mtu3/mtu3_gadget_ep0.c 	req = &mreq->request;
req               573 drivers/usb/mtu3/mtu3_gadget_ep0.c 	src = (u8 *)req->buf + req->actual;
req               574 drivers/usb/mtu3/mtu3_gadget_ep0.c 	count = min(maxp, req->length - req->actual);
req               579 drivers/usb/mtu3/mtu3_gadget_ep0.c 		 __func__, req->actual, req->length, count, maxp, req->zero);
req               581 drivers/usb/mtu3/mtu3_gadget_ep0.c 	req->actual += count;
req               584 drivers/usb/mtu3/mtu3_gadget_ep0.c 		|| ((req->actual == req->length) && !req->zero))
req               828 drivers/usb/mtu3/mtu3_gadget_ep0.c 	struct usb_request *req, gfp_t gfp)
req               836 drivers/usb/mtu3/mtu3_gadget_ep0.c 	if (!ep || !req)
req               841 drivers/usb/mtu3/mtu3_gadget_ep0.c 	mreq = to_mtu3_request(req);
req               849 drivers/usb/mtu3/mtu3_gadget_ep0.c static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
req               248 drivers/usb/mtu3/mtu3_qmu.c 	struct usb_request *req = &mreq->request;
req               254 drivers/usb/mtu3/mtu3_qmu.c 	gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
req               255 drivers/usb/mtu3/mtu3_qmu.c 	ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
req               256 drivers/usb/mtu3/mtu3_qmu.c 	gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
req               269 drivers/usb/mtu3/mtu3_qmu.c 	if (req->zero) {
req               289 drivers/usb/mtu3/mtu3_qmu.c 	struct usb_request *req = &mreq->request;
req               295 drivers/usb/mtu3/mtu3_qmu.c 	gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
req               296 drivers/usb/mtu3/mtu3_qmu.c 	ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
req               297 drivers/usb/mtu3/mtu3_qmu.c 	gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
req               511 drivers/usb/mtu3/mtu3_qmu.c 	struct usb_request *req = NULL;
req               529 drivers/usb/mtu3/mtu3_qmu.c 		req = &mreq->request;
req               531 drivers/usb/mtu3/mtu3_qmu.c 		req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
req               533 drivers/usb/mtu3/mtu3_qmu.c 		mtu3_req_complete(mep, req, 0);
req               151 drivers/usb/mtu3/mtu3_trace.h 	TP_PROTO(struct mtu3_request *req),
req               152 drivers/usb/mtu3/mtu3_trace.h 	TP_ARGS(req)
req               156 drivers/usb/mtu3/mtu3_trace.h 	TP_PROTO(struct mtu3_request *req),
req               157 drivers/usb/mtu3/mtu3_trace.h 	TP_ARGS(req)
req               161 drivers/usb/mtu3/mtu3_trace.h 	TP_PROTO(struct mtu3_request *req),
req               162 drivers/usb/mtu3/mtu3_trace.h 	TP_ARGS(req)
req               166 drivers/usb/mtu3/mtu3_trace.h 	TP_PROTO(struct mtu3_request *req),
req               167 drivers/usb/mtu3/mtu3_trace.h 	TP_ARGS(req)
req               171 drivers/usb/mtu3/mtu3_trace.h 	TP_PROTO(struct mtu3_request *req),
req               172 drivers/usb/mtu3/mtu3_trace.h 	TP_ARGS(req)
req                27 drivers/usb/musb/musb_gadget.c #define is_buffer_mapped(req) (is_dma_capable() && \
req                28 drivers/usb/musb/musb_gadget.c 					(req->map_state != UN_MAPPED))
req               129 drivers/usb/musb/musb_gadget.c 	struct musb_request	*req;
req               133 drivers/usb/musb/musb_gadget.c 	req = to_musb_request(request);
req               135 drivers/usb/musb/musb_gadget.c 	list_del(&req->list);
req               136 drivers/usb/musb/musb_gadget.c 	if (req->request.status == -EINPROGRESS)
req               137 drivers/usb/musb/musb_gadget.c 		req->request.status = status;
req               138 drivers/usb/musb/musb_gadget.c 	musb = req->musb;
req               144 drivers/usb/musb/musb_gadget.c 		unmap_dma_buffer(req, musb);
req               146 drivers/usb/musb/musb_gadget.c 	trace_musb_req_gb(req);
req               147 drivers/usb/musb/musb_gadget.c 	usb_gadget_giveback_request(&req->ep->end_point, &req->request);
req               161 drivers/usb/musb/musb_gadget.c 	struct musb_request	*req = NULL;
req               194 drivers/usb/musb/musb_gadget.c 		req = list_first_entry(&ep->req_list, struct musb_request, list);
req               195 drivers/usb/musb/musb_gadget.c 		musb_g_giveback(ep, &req->request, status);
req               223 drivers/usb/musb/musb_gadget.c static void txstate(struct musb *musb, struct musb_request *req)
req               225 drivers/usb/musb/musb_gadget.c 	u8			epnum = req->epnum;
req               232 drivers/usb/musb/musb_gadget.c 	musb_ep = req->ep;
req               250 drivers/usb/musb/musb_gadget.c 	request = &req->request;
req               271 drivers/usb/musb/musb_gadget.c 	if (is_buffer_mapped(req)) {
req               383 drivers/usb/musb/musb_gadget.c 		unmap_dma_buffer(req, musb);
req               409 drivers/usb/musb/musb_gadget.c 	struct musb_request	*req;
req               417 drivers/usb/musb/musb_gadget.c 	req = next_request(musb_ep);
req               418 drivers/usb/musb/musb_gadget.c 	request = &req->request;
req               456 drivers/usb/musb/musb_gadget.c 		trace_musb_req_tx(req);
req               501 drivers/usb/musb/musb_gadget.c 			req = musb_ep->desc ? next_request(musb_ep) : NULL;
req               502 drivers/usb/musb/musb_gadget.c 			if (!req) {
req               509 drivers/usb/musb/musb_gadget.c 		txstate(musb, req);
req               518 drivers/usb/musb/musb_gadget.c static void rxstate(struct musb *musb, struct musb_request *req)
req               520 drivers/usb/musb/musb_gadget.c 	const u8		epnum = req->epnum;
req               521 drivers/usb/musb/musb_gadget.c 	struct usb_request	*request = &req->request;
req               556 drivers/usb/musb/musb_gadget.c 	if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
req               598 drivers/usb/musb/musb_gadget.c 			if (!is_buffer_mapped(req))
req               753 drivers/usb/musb/musb_gadget.c 			unmap_dma_buffer(req, musb);
req               790 drivers/usb/musb/musb_gadget.c 	struct musb_request	*req;
req               805 drivers/usb/musb/musb_gadget.c 	req = next_request(musb_ep);
req               806 drivers/usb/musb/musb_gadget.c 	if (!req)
req               809 drivers/usb/musb/musb_gadget.c 	trace_musb_req_rx(req);
req               810 drivers/usb/musb/musb_gadget.c 	request = &req->request;
req               891 drivers/usb/musb/musb_gadget.c 		req = next_request(musb_ep);
req               892 drivers/usb/musb/musb_gadget.c 		if (!req)
req               900 drivers/usb/musb/musb_gadget.c 	rxstate(musb, req);
req              1149 drivers/usb/musb/musb_gadget.c void musb_free_request(struct usb_ep *ep, struct usb_request *req)
req              1151 drivers/usb/musb/musb_gadget.c 	struct musb_request *request = to_musb_request(req);
req              1169 drivers/usb/musb/musb_gadget.c void musb_ep_restart(struct musb *musb, struct musb_request *req)
req              1171 drivers/usb/musb/musb_gadget.c 	trace_musb_req_start(req);
req              1172 drivers/usb/musb/musb_gadget.c 	musb_ep_select(musb->mregs, req->epnum);
req              1173 drivers/usb/musb/musb_gadget.c 	if (req->tx)
req              1174 drivers/usb/musb/musb_gadget.c 		txstate(musb, req);
req              1176 drivers/usb/musb/musb_gadget.c 		rxstate(musb, req);
req              1181 drivers/usb/musb/musb_gadget.c 	struct musb_request *req = data;
req              1183 drivers/usb/musb/musb_gadget.c 	musb_ep_restart(musb, req);
req              1188 drivers/usb/musb/musb_gadget.c static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
req              1197 drivers/usb/musb/musb_gadget.c 	if (!ep || !req)
req              1199 drivers/usb/musb/musb_gadget.c 	if (!req->buf)
req              1205 drivers/usb/musb/musb_gadget.c 	request = to_musb_request(req);
req              1237 drivers/usb/musb/musb_gadget.c 				req, ep->name, "disabled");
req              1267 drivers/usb/musb/musb_gadget.c 	struct musb_request	*req = to_musb_request(request);
req              1273 drivers/usb/musb/musb_gadget.c 	if (!ep || !request || req->ep != musb_ep)
req              1276 drivers/usb/musb/musb_gadget.c 	trace_musb_req_deq(req);
req              1281 drivers/usb/musb/musb_gadget.c 		if (r == req)
req              1284 drivers/usb/musb/musb_gadget.c 	if (r != req) {
req              1292 drivers/usb/musb/musb_gadget.c 	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
req                67 drivers/usb/musb/musb_gadget.h extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
req               175 drivers/usb/musb/musb_gadget_ep0.c static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
req               177 drivers/usb/musb/musb_gadget_ep0.c 	musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
req               468 drivers/usb/musb/musb_gadget_ep0.c 	struct usb_request	*req;
req               472 drivers/usb/musb/musb_gadget_ep0.c 	req = &request->request;
req               477 drivers/usb/musb/musb_gadget_ep0.c 	if (req) {
req               478 drivers/usb/musb/musb_gadget_ep0.c 		void		*buf = req->buf + req->actual;
req               479 drivers/usb/musb/musb_gadget_ep0.c 		unsigned	len = req->length - req->actual;
req               484 drivers/usb/musb/musb_gadget_ep0.c 			req->status = -EOVERFLOW;
req               489 drivers/usb/musb/musb_gadget_ep0.c 			req->actual += count;
req               492 drivers/usb/musb/musb_gadget_ep0.c 		if (count < 64 || req->actual == req->length) {
req               496 drivers/usb/musb/musb_gadget_ep0.c 			req = NULL;
req               504 drivers/usb/musb/musb_gadget_ep0.c 	if (req) {
req               506 drivers/usb/musb/musb_gadget_ep0.c 		musb_g_ep0_giveback(musb, req);
req               524 drivers/usb/musb/musb_gadget_ep0.c 	struct musb_request	*req = next_ep0_request(musb);
req               530 drivers/usb/musb/musb_gadget_ep0.c 	if (!req) {
req               536 drivers/usb/musb/musb_gadget_ep0.c 	request = &req->request;
req               579 drivers/usb/musb/musb_gadget_ep0.c musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
req               584 drivers/usb/musb/musb_gadget_ep0.c 	musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
req               590 drivers/usb/musb/musb_gadget_ep0.c 		req->bRequestType,
req               591 drivers/usb/musb/musb_gadget_ep0.c 		req->bRequest,
req               592 drivers/usb/musb/musb_gadget_ep0.c 		le16_to_cpu(req->wValue),
req               593 drivers/usb/musb/musb_gadget_ep0.c 		le16_to_cpu(req->wIndex),
req               594 drivers/usb/musb/musb_gadget_ep0.c 		le16_to_cpu(req->wLength));
req               611 drivers/usb/musb/musb_gadget_ep0.c 	if (req->wLength == 0) {
req               612 drivers/usb/musb/musb_gadget_ep0.c 		if (req->bRequestType & USB_DIR_IN)
req               615 drivers/usb/musb/musb_gadget_ep0.c 	} else if (req->bRequestType & USB_DIR_IN) {
req               747 drivers/usb/musb/musb_gadget_ep0.c 			struct musb_request	*req;
req               749 drivers/usb/musb/musb_gadget_ep0.c 			req = next_ep0_request(musb);
req               750 drivers/usb/musb/musb_gadget_ep0.c 			if (req)
req               751 drivers/usb/musb/musb_gadget_ep0.c 				musb_g_ep0_giveback(musb, &req->request);
req               910 drivers/usb/musb/musb_gadget_ep0.c 	struct musb_request	*req;
req               923 drivers/usb/musb/musb_gadget_ep0.c 	req = to_musb_request(r);
req               924 drivers/usb/musb/musb_gadget_ep0.c 	req->musb = musb;
req               925 drivers/usb/musb/musb_gadget_ep0.c 	req->request.actual = 0;
req               926 drivers/usb/musb/musb_gadget_ep0.c 	req->request.status = -EINPROGRESS;
req               927 drivers/usb/musb/musb_gadget_ep0.c 	req->tx = ep->is_in;
req               950 drivers/usb/musb/musb_gadget_ep0.c 	list_add_tail(&req->list, &ep->req_list);
req               954 drivers/usb/musb/musb_gadget_ep0.c 			req->request.length);
req               964 drivers/usb/musb/musb_gadget_ep0.c 		if (req->request.length)
req               988 drivers/usb/musb/musb_gadget_ep0.c static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
req               211 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               212 drivers/usb/musb/musb_trace.h 	TP_ARGS(req),
req               214 drivers/usb/musb/musb_trace.h 		__field(struct usb_request *, req)
req               225 drivers/usb/musb/musb_trace.h 		__entry->req = &req->request;
req               226 drivers/usb/musb/musb_trace.h 		__entry->is_tx = req->tx;
req               227 drivers/usb/musb/musb_trace.h 		__entry->epnum = req->epnum;
req               228 drivers/usb/musb/musb_trace.h 		__entry->status = req->request.status;
req               229 drivers/usb/musb/musb_trace.h 		__entry->buf_len = req->request.length;
req               230 drivers/usb/musb/musb_trace.h 		__entry->actual_len = req->request.actual;
req               231 drivers/usb/musb/musb_trace.h 		__entry->zero = req->request.zero;
req               232 drivers/usb/musb/musb_trace.h 		__entry->short_not_ok = req->request.short_not_ok;
req               233 drivers/usb/musb/musb_trace.h 		__entry->no_interrupt = req->request.no_interrupt;
req               236 drivers/usb/musb/musb_trace.h 			__entry->req, __entry->epnum,
req               247 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               248 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               252 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               253 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               257 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               258 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               262 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               263 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               267 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               268 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               272 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               273 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               277 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               278 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               282 drivers/usb/musb/musb_trace.h 	TP_PROTO(struct musb_request *req),
req               283 drivers/usb/musb/musb_trace.h 	TP_ARGS(req)
req               157 drivers/usb/renesas_usbhs/common.c void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
req               162 drivers/usb/renesas_usbhs/common.c 	req->bRequest		= (val >> 8) & 0xFF;
req               163 drivers/usb/renesas_usbhs/common.c 	req->bRequestType	= (val >> 0) & 0xFF;
req               165 drivers/usb/renesas_usbhs/common.c 	req->wValue	= cpu_to_le16(usbhs_read(priv, USBVAL));
req               166 drivers/usb/renesas_usbhs/common.c 	req->wIndex	= cpu_to_le16(usbhs_read(priv, USBINDX));
req               167 drivers/usb/renesas_usbhs/common.c 	req->wLength	= cpu_to_le16(usbhs_read(priv, USBLENG));
req               170 drivers/usb/renesas_usbhs/common.c void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
req               172 drivers/usb/renesas_usbhs/common.c 	usbhs_write(priv, USBREQ,  (req->bRequest << 8) | req->bRequestType);
req               173 drivers/usb/renesas_usbhs/common.c 	usbhs_write(priv, USBVAL,  le16_to_cpu(req->wValue));
req               174 drivers/usb/renesas_usbhs/common.c 	usbhs_write(priv, USBINDX, le16_to_cpu(req->wIndex));
req               175 drivers/usb/renesas_usbhs/common.c 	usbhs_write(priv, USBLENG, le16_to_cpu(req->wLength));
req               310 drivers/usb/renesas_usbhs/common.h void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
req               311 drivers/usb/renesas_usbhs/common.h void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
req                23 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usb_request	req;
req                91 drivers/usb/renesas_usbhs/mod_gadget.c 	container_of(r, struct usbhsg_request, req)
req               130 drivers/usb/renesas_usbhs/mod_gadget.c 	ureq->req.status = status;
req               132 drivers/usb/renesas_usbhs/mod_gadget.c 	usb_gadget_giveback_request(&uep->ep, &ureq->req);
req               156 drivers/usb/renesas_usbhs/mod_gadget.c 	ureq->req.actual = pkt->actual;
req               171 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usb_request *req = &ureq->req;
req               173 drivers/usb/renesas_usbhs/mod_gadget.c 	req->actual = 0;
req               174 drivers/usb/renesas_usbhs/mod_gadget.c 	req->status = -EINPROGRESS;
req               176 drivers/usb/renesas_usbhs/mod_gadget.c 		       req->buf, req->length, req->zero, -1);
req               181 drivers/usb/renesas_usbhs/mod_gadget.c 		req->length);
req               191 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usb_request *req = &ureq->req;
req               200 drivers/usb/renesas_usbhs/mod_gadget.c 		WARN_ON(req->num_sgs);
req               202 drivers/usb/renesas_usbhs/mod_gadget.c 		ret = usb_gadget_map_request_by_dev(dma_dev, req, dir);
req               206 drivers/usb/renesas_usbhs/mod_gadget.c 		pkt->dma = req->dma;
req               208 drivers/usb/renesas_usbhs/mod_gadget.c 		usb_gadget_unmap_request_by_dev(dma_dev, req, dir);
req               302 drivers/usb/renesas_usbhs/mod_gadget.c 					 struct usb_request *req)
req               304 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
req               308 drivers/usb/renesas_usbhs/mod_gadget.c 	usb_ep_free_request(ep, req);
req               317 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usb_request *req;
req               321 drivers/usb/renesas_usbhs/mod_gadget.c 	req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
req               322 drivers/usb/renesas_usbhs/mod_gadget.c 	if (!req) {
req               330 drivers/usb/renesas_usbhs/mod_gadget.c 		usb_ep_free_request(&dcp->ep, req);
req               338 drivers/usb/renesas_usbhs/mod_gadget.c 	req->complete	= __usbhsg_recip_send_complete;
req               339 drivers/usb/renesas_usbhs/mod_gadget.c 	req->buf	= buf;
req               340 drivers/usb/renesas_usbhs/mod_gadget.c 	req->length	= sizeof(*buf);
req               341 drivers/usb/renesas_usbhs/mod_gadget.c 	req->zero	= 0;
req               345 drivers/usb/renesas_usbhs/mod_gadget.c 	usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req));
req               670 drivers/usb/renesas_usbhs/mod_gadget.c 	return &ureq->req;
req               674 drivers/usb/renesas_usbhs/mod_gadget.c 				   struct usb_request *req)
req               676 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
req               682 drivers/usb/renesas_usbhs/mod_gadget.c static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req,
req               687 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
req               701 drivers/usb/renesas_usbhs/mod_gadget.c static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
req               704 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
req               738 drivers/usb/renesas_usbhs/mod_host.c 	struct usb_ctrlrequest *req;
req               740 drivers/usb/renesas_usbhs/mod_host.c 	req = (struct usb_ctrlrequest *)urb->setup_packet;
req               742 drivers/usb/renesas_usbhs/mod_host.c 	if ((DeviceOutRequest    == req->bRequestType << 8) &&
req               743 drivers/usb/renesas_usbhs/mod_host.c 	    (USB_REQ_SET_ADDRESS == req->bRequest))
req               754 drivers/usb/renesas_usbhs/mod_host.c 	struct usb_ctrlrequest req;
req               766 drivers/usb/renesas_usbhs/mod_host.c 	memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
req               779 drivers/usb/renesas_usbhs/mod_host.c 		req.wValue = usbhsh_device_number(hpriv, udev);
req               780 drivers/usb/renesas_usbhs/mod_host.c 		dev_dbg(dev, "create new address - %d\n", req.wValue);
req               784 drivers/usb/renesas_usbhs/mod_host.c 	usbhs_usbreq_set_val(priv, &req);
req               525 drivers/usb/serial/cp210x.c static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
req               544 drivers/usb/serial/cp210x.c 			req, REQTYPE_INTERFACE_TO_HOST, 0,
req               552 drivers/usb/serial/cp210x.c 				req, bufsize, result);
req               571 drivers/usb/serial/cp210x.c static int cp210x_read_u32_reg(struct usb_serial_port *port, u8 req, u32 *val)
req               576 drivers/usb/serial/cp210x.c 	err = cp210x_read_reg_block(port, req, &le32_val, sizeof(le32_val));
req               594 drivers/usb/serial/cp210x.c static int cp210x_read_u16_reg(struct usb_serial_port *port, u8 req, u16 *val)
req               599 drivers/usb/serial/cp210x.c 	err = cp210x_read_reg_block(port, req, &le16_val, sizeof(le16_val));
req               611 drivers/usb/serial/cp210x.c static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
req               613 drivers/usb/serial/cp210x.c 	return cp210x_read_reg_block(port, req, val, sizeof(*val));
req               654 drivers/usb/serial/cp210x.c static int cp210x_write_u16_reg(struct usb_serial_port *port, u8 req, u16 val)
req               661 drivers/usb/serial/cp210x.c 			req, REQTYPE_HOST_TO_INTERFACE, val,
req               666 drivers/usb/serial/cp210x.c 				req, result);
req               676 drivers/usb/serial/cp210x.c static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
req               689 drivers/usb/serial/cp210x.c 			req, REQTYPE_HOST_TO_INTERFACE, 0,
req               699 drivers/usb/serial/cp210x.c 				req, bufsize, result);
req               710 drivers/usb/serial/cp210x.c static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
req               716 drivers/usb/serial/cp210x.c 	return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val));
req                17 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req                19 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req                21 drivers/usb/usbip/stub_rx.c 	return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
req                22 drivers/usb/usbip/stub_rx.c 	       (req->bRequestType == USB_RECIP_ENDPOINT) &&
req                23 drivers/usb/usbip/stub_rx.c 	       (req->wValue == USB_ENDPOINT_HALT);
req                28 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req                30 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req                32 drivers/usb/usbip/stub_rx.c 	return (req->bRequest == USB_REQ_SET_INTERFACE) &&
req                33 drivers/usb/usbip/stub_rx.c 		(req->bRequestType == USB_RECIP_INTERFACE);
req                38 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req                40 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req                42 drivers/usb/usbip/stub_rx.c 	return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
req                43 drivers/usb/usbip/stub_rx.c 		(req->bRequestType == USB_RECIP_DEVICE);
req                48 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req                52 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req                53 drivers/usb/usbip/stub_rx.c 	value = le16_to_cpu(req->wValue);
req                54 drivers/usb/usbip/stub_rx.c 	index = le16_to_cpu(req->wIndex);
req                56 drivers/usb/usbip/stub_rx.c 	if ((req->bRequest == USB_REQ_SET_FEATURE) &&
req                57 drivers/usb/usbip/stub_rx.c 	    (req->bRequestType == USB_RT_PORT) &&
req                67 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req                73 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req                80 drivers/usb/usbip/stub_rx.c 	target_endp = le16_to_cpu(req->wIndex) & 0x000f;
req                83 drivers/usb/usbip/stub_rx.c 	target_dir = le16_to_cpu(req->wIndex) & 0x0080;
req               105 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req               110 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req               111 drivers/usb/usbip/stub_rx.c 	alternate = le16_to_cpu(req->wValue);
req               112 drivers/usb/usbip/stub_rx.c 	interface = le16_to_cpu(req->wIndex);
req               134 drivers/usb/usbip/stub_rx.c 	struct usb_ctrlrequest *req;
req               138 drivers/usb/usbip/stub_rx.c 	req = (struct usb_ctrlrequest *) urb->setup_packet;
req               139 drivers/usb/usbip/stub_rx.c 	config = le16_to_cpu(req->wValue);
req                40 drivers/usb/usbip/vudc.h 	struct usb_request req;
req               130 drivers/usb/usbip/vudc.h 	return container_of(_req, struct vrequest, req);
req                75 drivers/usb/usbip/vudc_dev.c 	struct vrequest	*req;
req                78 drivers/usb/usbip/vudc_dev.c 		req = list_first_entry(&ep->req_queue, struct vrequest,
req                80 drivers/usb/usbip/vudc_dev.c 		list_del_init(&req->req_entry);
req                81 drivers/usb/usbip/vudc_dev.c 		req->req.status = -ESHUTDOWN;
req                84 drivers/usb/usbip/vudc_dev.c 		usb_gadget_giveback_request(&ep->ep, &req->req);
req               282 drivers/usb/usbip/vudc_dev.c 	struct vrequest *req;
req               287 drivers/usb/usbip/vudc_dev.c 	req = kzalloc(sizeof(*req), mem_flags);
req               288 drivers/usb/usbip/vudc_dev.c 	if (!req)
req               291 drivers/usb/usbip/vudc_dev.c 	INIT_LIST_HEAD(&req->req_entry);
req               293 drivers/usb/usbip/vudc_dev.c 	return &req->req;
req               298 drivers/usb/usbip/vudc_dev.c 	struct vrequest *req;
req               304 drivers/usb/usbip/vudc_dev.c 	req = to_vrequest(_req);
req               305 drivers/usb/usbip/vudc_dev.c 	kfree(req);
req               312 drivers/usb/usbip/vudc_dev.c 	struct vrequest *req;
req               320 drivers/usb/usbip/vudc_dev.c 	req = to_vrequest(_req);
req               327 drivers/usb/usbip/vudc_dev.c 	list_add_tail(&req->req_entry, &ep->req_queue);
req               336 drivers/usb/usbip/vudc_dev.c 	struct vrequest *req;
req               346 drivers/usb/usbip/vudc_dev.c 	req = to_vrequest(_req);
req               347 drivers/usb/usbip/vudc_dev.c 	udc = req->udc;
req               354 drivers/usb/usbip/vudc_dev.c 		if (&lst->req == _req) {
req                28 drivers/usb/usbip/vudc_sysfs.c 	struct usb_ctrlrequest req;
req                34 drivers/usb/usbip/vudc_sysfs.c 	req.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
req                35 drivers/usb/usbip/vudc_sysfs.c 	req.bRequest = USB_REQ_GET_DESCRIPTOR;
req                36 drivers/usb/usbip/vudc_sysfs.c 	req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
req                37 drivers/usb/usbip/vudc_sysfs.c 	req.wIndex = cpu_to_le16(0);
req                38 drivers/usb/usbip/vudc_sysfs.c 	req.wLength = cpu_to_le16(sizeof(*ddesc));
req                41 drivers/usb/usbip/vudc_sysfs.c 	ret = udc->driver->setup(&(udc->gadget), &req);
req                50 drivers/usb/usbip/vudc_sysfs.c 	if (usb_req->req.length > sizeof(*ddesc)) {
req                55 drivers/usb/usbip/vudc_sysfs.c 	memcpy(ddesc, usb_req->req.buf, sizeof(*ddesc));
req                59 drivers/usb/usbip/vudc_sysfs.c 	usb_req->req.status = 0;
req                60 drivers/usb/usbip/vudc_sysfs.c 	usb_req->req.actual = usb_req->req.length;
req                61 drivers/usb/usbip/vudc_sysfs.c 	usb_gadget_giveback_request(&(ep0->ep), &(usb_req->req));
req               186 drivers/usb/usbip/vudc_transfer.c 	struct vrequest	*req;
req               190 drivers/usb/usbip/vudc_transfer.c 	list_for_each_entry(req, &ep->req_queue, req_entry) {
req               205 drivers/usb/usbip/vudc_transfer.c 		dev_len = req->req.length - req->req.actual;
req               223 drivers/usb/usbip/vudc_transfer.c 			rbuf_pos = req->req.buf + req->req.actual;
req               231 drivers/usb/usbip/vudc_transfer.c 			req->req.actual += len;
req               245 drivers/usb/usbip/vudc_transfer.c 				req->req.status = 0;
req               248 drivers/usb/usbip/vudc_transfer.c 				req->req.status = 0;
req               256 drivers/usb/usbip/vudc_transfer.c 					req->req.status = -EOVERFLOW;
req               258 drivers/usb/usbip/vudc_transfer.c 					req->req.status = 0;
req               264 drivers/usb/usbip/vudc_transfer.c 			if (req->req.length == req->req.actual) {
req               265 drivers/usb/usbip/vudc_transfer.c 				if (req->req.zero && to_host)
req               268 drivers/usb/usbip/vudc_transfer.c 					req->req.status = 0;
req               280 drivers/usb/usbip/vudc_transfer.c 		if (req->req.status != -EINPROGRESS) {
req               282 drivers/usb/usbip/vudc_transfer.c 			list_del_init(&req->req_entry);
req               284 drivers/usb/usbip/vudc_transfer.c 			usb_gadget_giveback_request(&ep->ep, &req->req);
req               221 drivers/vhost/scsi.c 	void *req;
req               886 drivers/vhost/scsi.c 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
req               951 drivers/vhost/scsi.c 			vc.req = &v_req_pi;
req               956 drivers/vhost/scsi.c 			vc.req = &v_req;
req              1203 drivers/vhost/scsi.c 		vc.req = &v_req.type;
req              1206 drivers/vhost/scsi.c 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
req              1220 drivers/vhost/scsi.c 			vc.req = &v_req.tmf;
req              1228 drivers/vhost/scsi.c 			vc.req = &v_req.an;
req              1251 drivers/vhost/scsi.c 		vc.req += typ_size;
req               387 drivers/video/fbdev/controlfb.c 	struct adb_request req;
req               391 drivers/video/fbdev/controlfb.c 		cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC,
req               393 drivers/video/fbdev/controlfb.c 		while (!req.complete)
req                80 drivers/video/fbdev/omap/hwa742.c 	int		 (*handler)(struct hwa742_request *req);
req               230 drivers/video/fbdev/omap/hwa742.c 	struct hwa742_request *req;
req               240 drivers/video/fbdev/omap/hwa742.c 	req = list_entry(hwa742.free_req_list.next,
req               242 drivers/video/fbdev/omap/hwa742.c 	list_del(&req->entry);
req               245 drivers/video/fbdev/omap/hwa742.c 	INIT_LIST_HEAD(&req->entry);
req               246 drivers/video/fbdev/omap/hwa742.c 	req->flags = req_flags;
req               248 drivers/video/fbdev/omap/hwa742.c 	return req;
req               251 drivers/video/fbdev/omap/hwa742.c static inline void free_req(struct hwa742_request *req)
req               257 drivers/video/fbdev/omap/hwa742.c 	list_move(&req->entry, &hwa742.free_req_list);
req               258 drivers/video/fbdev/omap/hwa742.c 	if (!(req->flags & REQ_FROM_IRQ_POOL))
req               271 drivers/video/fbdev/omap/hwa742.c 		struct hwa742_request *req;
req               275 drivers/video/fbdev/omap/hwa742.c 		req = list_entry(hwa742.pending_req_list.next,
req               279 drivers/video/fbdev/omap/hwa742.c 		if (req->handler(req) == REQ_PENDING)
req               282 drivers/video/fbdev/omap/hwa742.c 		complete = req->complete;
req               283 drivers/video/fbdev/omap/hwa742.c 		complete_data = req->complete_data;
req               284 drivers/video/fbdev/omap/hwa742.c 		free_req(req);
req               312 drivers/video/fbdev/omap/hwa742.c 	struct hwa742_request	*req = (struct hwa742_request *)data;
req               316 drivers/video/fbdev/omap/hwa742.c 	complete = req->complete;
req               317 drivers/video/fbdev/omap/hwa742.c 	complete_data = req->complete_data;
req               319 drivers/video/fbdev/omap/hwa742.c 	free_req(req);
req               327 drivers/video/fbdev/omap/hwa742.c static int send_frame_handler(struct hwa742_request *req)
req               329 drivers/video/fbdev/omap/hwa742.c 	struct update_param *par = &req->par.update;
req               374 drivers/video/fbdev/omap/hwa742.c 	flags = req->par.update.flags;
req               392 drivers/video/fbdev/omap/hwa742.c 	hwa742.extif->transfer_area(w, h, request_complete, req);
req               403 drivers/video/fbdev/omap/hwa742.c 	req = alloc_req();			\
req               404 drivers/video/fbdev/omap/hwa742.c 	req->handler	= send_frame_handler;	\
req               405 drivers/video/fbdev/omap/hwa742.c 	req->complete	= send_frame_complete;	\
req               406 drivers/video/fbdev/omap/hwa742.c 	req->par.update.x = _x;			\
req               407 drivers/video/fbdev/omap/hwa742.c 	req->par.update.y = _y;			\
req               408 drivers/video/fbdev/omap/hwa742.c 	req->par.update.width  = _w;		\
req               409 drivers/video/fbdev/omap/hwa742.c 	req->par.update.height = _h;		\
req               410 drivers/video/fbdev/omap/hwa742.c 	req->par.update.color_mode = color_mode;\
req               411 drivers/video/fbdev/omap/hwa742.c 	req->par.update.flags	  = flags;	\
req               412 drivers/video/fbdev/omap/hwa742.c 	list_add_tail(&req->entry, req_head);	\
req               418 drivers/video/fbdev/omap/hwa742.c 	struct hwa742_request *req;
req               535 drivers/video/fbdev/omap/hwa742.c static int sync_handler(struct hwa742_request *req)
req               537 drivers/video/fbdev/omap/hwa742.c 	complete(req->par.sync);
req               544 drivers/video/fbdev/omap/hwa742.c 	struct hwa742_request *req;
req               547 drivers/video/fbdev/omap/hwa742.c 	req = alloc_req();
req               549 drivers/video/fbdev/omap/hwa742.c 	req->handler = sync_handler;
req               550 drivers/video/fbdev/omap/hwa742.c 	req->complete = NULL;
req               552 drivers/video/fbdev/omap/hwa742.c 	req->par.sync = &comp;
req               554 drivers/video/fbdev/omap/hwa742.c 	list_add(&req->entry, &req_list);
req               676 drivers/video/fbdev/sis/sis.h void			sis_malloc(struct sis_memreq *req);
req               677 drivers/video/fbdev/sis/sis.h void			sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req);
req              3412 drivers/video/fbdev/sis/sis_main.c sis_int_malloc(struct sis_video_info *ivideo, struct sis_memreq *req)
req              3417 drivers/video/fbdev/sis/sis_main.c 		poh = sisfb_poh_allocate(&ivideo->sisfb_heap, (u32)req->size);
req              3420 drivers/video/fbdev/sis/sis_main.c 		req->offset = req->size = 0;
req              3423 drivers/video/fbdev/sis/sis_main.c 		req->offset = poh->offset;
req              3424 drivers/video/fbdev/sis/sis_main.c 		req->size = poh->size;
req              3431 drivers/video/fbdev/sis/sis_main.c sis_malloc(struct sis_memreq *req)
req              3436 drivers/video/fbdev/sis/sis_main.c 		sis_int_malloc(ivideo, req);
req              3438 drivers/video/fbdev/sis/sis_main.c 		req->offset = req->size = 0;
req              3442 drivers/video/fbdev/sis/sis_main.c sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req)
req              3446 drivers/video/fbdev/sis/sis_main.c 	sis_int_malloc(ivideo, req);
req               261 drivers/video/fbdev/valkyriefb.c 	struct adb_request req;
req               265 drivers/video/fbdev/valkyriefb.c 		cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC,
req               267 drivers/video/fbdev/valkyriefb.c 		while (!req.complete)
req                48 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_hypervisorinfo *req;
req                55 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
req                57 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req                60 drivers/virt/vboxguest/vboxguest_core.c 	req->hypervisor_start = 0;
req                61 drivers/virt/vboxguest/vboxguest_core.c 	req->hypervisor_size = 0;
req                62 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req                70 drivers/virt/vboxguest/vboxguest_core.c 	if (req->hypervisor_size == 0)
req                73 drivers/virt/vboxguest/vboxguest_core.c 	hypervisor_size = req->hypervisor_size;
req                75 drivers/virt/vboxguest/vboxguest_core.c 	size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
req                98 drivers/virt/vboxguest/vboxguest_core.c 		req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
req                99 drivers/virt/vboxguest/vboxguest_core.c 		req->header.rc = VERR_INTERNAL_ERROR;
req               100 drivers/virt/vboxguest/vboxguest_core.c 		req->hypervisor_size = hypervisor_size;
req               101 drivers/virt/vboxguest/vboxguest_core.c 		req->hypervisor_start =
req               104 drivers/virt/vboxguest/vboxguest_core.c 		rc = vbg_req_perform(gdev, req);
req               122 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               133 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_hypervisorinfo *req;
req               143 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
req               145 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req               148 drivers/virt/vboxguest/vboxguest_core.c 	req->hypervisor_start = 0;
req               149 drivers/virt/vboxguest/vboxguest_core.c 	req->hypervisor_size = 0;
req               151 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               153 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               238 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_guest_status *req;
req               241 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
req               243 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req               246 drivers/virt/vboxguest/vboxguest_core.c 	req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
req               248 drivers/virt/vboxguest/vboxguest_core.c 		req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
req               250 drivers/virt/vboxguest/vboxguest_core.c 		req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
req               251 drivers/virt/vboxguest/vboxguest_core.c 	req->flags = 0;
req               253 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               257 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               270 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
req               280 drivers/virt/vboxguest/vboxguest_core.c 	req->header.size = sizeof(*req);
req               281 drivers/virt/vboxguest/vboxguest_core.c 	req->inflate = true;
req               282 drivers/virt/vboxguest/vboxguest_core.c 	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
req               291 drivers/virt/vboxguest/vboxguest_core.c 		req->phys_page[i] = page_to_phys(pages[i]);
req               294 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               321 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
req               325 drivers/virt/vboxguest/vboxguest_core.c 	req->header.size = sizeof(*req);
req               326 drivers/virt/vboxguest/vboxguest_core.c 	req->inflate = false;
req               327 drivers/virt/vboxguest/vboxguest_core.c 	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
req               330 drivers/virt/vboxguest/vboxguest_core.c 		req->phys_page[i] = page_to_phys(pages[i]);
req               332 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               354 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
req               362 drivers/virt/vboxguest/vboxguest_core.c 	req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
req               363 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               375 drivers/virt/vboxguest/vboxguest_core.c 			devm_kcalloc(gdev->dev, req->phys_mem_chunks,
req               380 drivers/virt/vboxguest/vboxguest_core.c 		gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
req               383 drivers/virt/vboxguest/vboxguest_core.c 	chunks = req->balloon_chunks;
req               432 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_heartbeat *req;
req               435 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
req               437 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req               440 drivers/virt/vboxguest/vboxguest_core.c 	req->enabled = enabled;
req               441 drivers/virt/vboxguest/vboxguest_core.c 	req->interval_ns = 0;
req               442 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               443 drivers/virt/vboxguest/vboxguest_core.c 	do_div(req->interval_ns, 1000000); /* ns -> ms */
req               444 drivers/virt/vboxguest/vboxguest_core.c 	gdev->heartbeat_interval_ms = req->interval_ns;
req               445 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               539 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_mask *req;
req               542 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
req               544 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req               547 drivers/virt/vboxguest/vboxguest_core.c 	req->not_mask = U32_MAX & ~fixed_events;
req               548 drivers/virt/vboxguest/vboxguest_core.c 	req->or_mask = fixed_events;
req               549 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               553 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               578 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_mask *req;
req               587 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
req               590 drivers/virt/vboxguest/vboxguest_core.c 	if (!req) {
req               611 drivers/virt/vboxguest/vboxguest_core.c 	if (gdev->event_filter_host == or_mask || !req)
req               615 drivers/virt/vboxguest/vboxguest_core.c 	req->or_mask = or_mask;
req               616 drivers/virt/vboxguest/vboxguest_core.c 	req->not_mask = ~or_mask;
req               617 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               633 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               645 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_mask *req;
req               648 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
req               650 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req               653 drivers/virt/vboxguest/vboxguest_core.c 	req->not_mask = U32_MAX;
req               654 drivers/virt/vboxguest/vboxguest_core.c 	req->or_mask = 0;
req               655 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               659 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               680 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_mask *req;
req               689 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
req               692 drivers/virt/vboxguest/vboxguest_core.c 	if (!req) {
req               713 drivers/virt/vboxguest/vboxguest_core.c 	if (gdev->guest_caps_host == or_mask || !req)
req               717 drivers/virt/vboxguest/vboxguest_core.c 	req->or_mask = or_mask;
req               718 drivers/virt/vboxguest/vboxguest_core.c 	req->not_mask = ~or_mask;
req               719 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               735 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req               747 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_host_version *req;
req               750 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
req               752 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req               755 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req               763 drivers/virt/vboxguest/vboxguest_core.c 		 req->major, req->minor, req->build, req->revision);
req               764 drivers/virt/vboxguest/vboxguest_core.c 	gdev->host_features = req->features;
req               769 drivers/virt/vboxguest/vboxguest_core.c 	if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
req               775 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req              1110 drivers/virt/vboxguest/vboxguest_core.c 			   const struct vmmdev_request_header *req)
req              1115 drivers/virt/vboxguest/vboxguest_core.c 	switch (req->request_type) {
req              1162 drivers/virt/vboxguest/vboxguest_core.c 		guest_status = (const struct vmmdev_guest_status *)req;
req              1184 drivers/virt/vboxguest/vboxguest_core.c 			req->request_type);
req              1191 drivers/virt/vboxguest/vboxguest_core.c 			req->request_type);
req              1480 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_write_core_dump *req;
req              1485 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
req              1487 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req              1490 drivers/virt/vboxguest/vboxguest_core.c 	req->flags = dump->u.in.flags;
req              1491 drivers/virt/vboxguest/vboxguest_core.c 	dump->hdr.rc = vbg_req_perform(gdev, req);
req              1493 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req              1504 drivers/virt/vboxguest/vboxguest_core.c int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
req              1506 drivers/virt/vboxguest/vboxguest_core.c 	unsigned int req_no_size = req & ~IOCSIZE_MASK;
req              1522 drivers/virt/vboxguest/vboxguest_core.c 	    req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
req              1529 drivers/virt/vboxguest/vboxguest_core.c 	switch (req) {
req              1563 drivers/virt/vboxguest/vboxguest_core.c 	vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
req              1576 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_mouse_status *req;
req              1579 drivers/virt/vboxguest/vboxguest_core.c 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
req              1581 drivers/virt/vboxguest/vboxguest_core.c 	if (!req)
req              1584 drivers/virt/vboxguest/vboxguest_core.c 	req->mouse_features = features;
req              1585 drivers/virt/vboxguest/vboxguest_core.c 	req->pointer_pos_x = 0;
req              1586 drivers/virt/vboxguest/vboxguest_core.c 	req->pointer_pos_y = 0;
req              1588 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req              1592 drivers/virt/vboxguest/vboxguest_core.c 	vbg_req_free(req, sizeof(*req));
req              1600 drivers/virt/vboxguest/vboxguest_core.c 	struct vmmdev_events *req = gdev->ack_events_req;
req              1610 drivers/virt/vboxguest/vboxguest_core.c 	req->header.rc = VERR_INTERNAL_ERROR;
req              1611 drivers/virt/vboxguest/vboxguest_core.c 	req->events = 0;
req              1612 drivers/virt/vboxguest/vboxguest_core.c 	rc = vbg_req_perform(gdev, req);
req              1618 drivers/virt/vboxguest/vboxguest_core.c 	events = req->events;
req               167 drivers/virt/vboxguest/vboxguest_core.h int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
req               177 drivers/virt/vboxguest/vboxguest_core.h void vbg_req_free(void *req, size_t len);
req               178 drivers/virt/vboxguest/vboxguest_core.h int vbg_req_perform(struct vbg_dev *gdev, void *req);
req               103 drivers/virt/vboxguest/vboxguest_linux.c static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
req               124 drivers/virt/vboxguest/vboxguest_linux.c 	if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
req               133 drivers/virt/vboxguest/vboxguest_linux.c 	is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
req               134 drivers/virt/vboxguest/vboxguest_linux.c 			 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
req               153 drivers/virt/vboxguest/vboxguest_linux.c 	ret = vbg_core_ioctl(session, req, buf);
req                68 drivers/virt/vboxguest/vboxguest_utils.c 	struct vmmdev_request_header *req;
req                71 drivers/virt/vboxguest/vboxguest_utils.c 	req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
req                72 drivers/virt/vboxguest/vboxguest_utils.c 	if (!req)
req                75 drivers/virt/vboxguest/vboxguest_utils.c 	memset(req, 0xaa, len);
req                77 drivers/virt/vboxguest/vboxguest_utils.c 	req->size = len;
req                78 drivers/virt/vboxguest/vboxguest_utils.c 	req->version = VMMDEV_REQUEST_HEADER_VERSION;
req                79 drivers/virt/vboxguest/vboxguest_utils.c 	req->request_type = req_type;
req                80 drivers/virt/vboxguest/vboxguest_utils.c 	req->rc = VERR_GENERAL_FAILURE;
req                81 drivers/virt/vboxguest/vboxguest_utils.c 	req->reserved1 = 0;
req                82 drivers/virt/vboxguest/vboxguest_utils.c 	req->requestor = requestor;
req                84 drivers/virt/vboxguest/vboxguest_utils.c 	return req;
req                87 drivers/virt/vboxguest/vboxguest_utils.c void vbg_req_free(void *req, size_t len)
req                89 drivers/virt/vboxguest/vboxguest_utils.c 	if (!req)
req                92 drivers/virt/vboxguest/vboxguest_utils.c 	free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
req                96 drivers/virt/vboxguest/vboxguest_utils.c int vbg_req_perform(struct vbg_dev *gdev, void *req)
req                98 drivers/virt/vboxguest/vboxguest_utils.c 	unsigned long phys_req = virt_to_phys(req);
req               107 drivers/virt/vboxguest/vboxguest_utils.c 	return ((struct vmmdev_request_header *)req)->rc;
req               879 drivers/visorbus/visorchipset.c 	struct parahotplug_request *req;
req               881 drivers/visorbus/visorchipset.c 	req = kmalloc(sizeof(*req), GFP_KERNEL);
req               882 drivers/visorbus/visorchipset.c 	if (!req)
req               884 drivers/visorbus/visorchipset.c 	req->id = parahotplug_next_id();
req               885 drivers/visorbus/visorchipset.c 	req->expiration = parahotplug_next_expiration();
req               886 drivers/visorbus/visorchipset.c 	req->msg = *msg;
req               887 drivers/visorbus/visorchipset.c 	return req;
req               894 drivers/visorbus/visorchipset.c static void parahotplug_request_destroy(struct parahotplug_request *req)
req               896 drivers/visorbus/visorchipset.c 	kfree(req);
req               918 drivers/visorbus/visorchipset.c 	struct parahotplug_request *req;
req               923 drivers/visorbus/visorchipset.c 		req = list_entry(pos, struct parahotplug_request, list);
req               924 drivers/visorbus/visorchipset.c 		if (req->id == id) {
req               931 drivers/visorbus/visorchipset.c 			req->msg.cmd.device_change_state.state.active = active;
req               932 drivers/visorbus/visorchipset.c 			if (req->msg.hdr.flags.response_expected)
req               934 drivers/visorbus/visorchipset.c 				       &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
req               935 drivers/visorbus/visorchipset.c 				       &req->msg.cmd.device_change_state.state);
req               936 drivers/visorbus/visorchipset.c 			parahotplug_request_destroy(req);
req              1037 drivers/visorbus/visorchipset.c static int parahotplug_request_kickoff(struct parahotplug_request *req)
req              1039 drivers/visorbus/visorchipset.c 	struct controlvm_message_packet *cmd = &req->msg.cmd;
req              1047 drivers/visorbus/visorchipset.c 	sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
req              1067 drivers/visorbus/visorchipset.c 	struct parahotplug_request *req;
req              1070 drivers/visorbus/visorchipset.c 	req = parahotplug_request_create(inmsg);
req              1071 drivers/visorbus/visorchipset.c 	if (!req)
req              1078 drivers/visorbus/visorchipset.c 		err = parahotplug_request_kickoff(req);
req              1083 drivers/visorbus/visorchipset.c 		parahotplug_request_destroy(req);
req              1092 drivers/visorbus/visorchipset.c 	list_add_tail(&req->list, &parahotplug_request_list);
req              1094 drivers/visorbus/visorchipset.c 	err = parahotplug_request_kickoff(req);
req              1483 drivers/visorbus/visorchipset.c 		struct parahotplug_request *req =
req              1486 drivers/visorbus/visorchipset.c 		if (!time_after_eq(jiffies, req->expiration))
req              1489 drivers/visorbus/visorchipset.c 		if (req->msg.hdr.flags.response_expected)
req              1491 drivers/visorbus/visorchipset.c 				&req->msg.hdr,
req              1493 drivers/visorbus/visorchipset.c 				&req->msg.cmd.device_change_state.state);
req              1494 drivers/visorbus/visorchipset.c 		parahotplug_request_destroy(req);
req               135 drivers/watchdog/hpwdt.c static int hpwdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
req               139 drivers/watchdog/hpwdt.c 	dev_dbg(wdd->parent, "set_pretimeout = %d\n", req);
req               140 drivers/watchdog/hpwdt.c 	if (req) {
req               146 drivers/watchdog/hpwdt.c 	if (val != req)
req               168 drivers/watchdog/mei_wdt.c 	struct mei_wdt_start_request req;
req               169 drivers/watchdog/mei_wdt.c 	const size_t req_len = sizeof(req);
req               172 drivers/watchdog/mei_wdt.c 	memset(&req, 0, req_len);
req               173 drivers/watchdog/mei_wdt.c 	req.hdr.command = MEI_MANAGEMENT_CONTROL;
req               174 drivers/watchdog/mei_wdt.c 	req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
req               175 drivers/watchdog/mei_wdt.c 	req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ;
req               176 drivers/watchdog/mei_wdt.c 	req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
req               177 drivers/watchdog/mei_wdt.c 	req.timeout = wdt->timeout;
req               179 drivers/watchdog/mei_wdt.c 	ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
req               196 drivers/watchdog/mei_wdt.c 	struct mei_wdt_stop_request req;
req               197 drivers/watchdog/mei_wdt.c 	const size_t req_len = sizeof(req);
req               200 drivers/watchdog/mei_wdt.c 	memset(&req, 0, req_len);
req               201 drivers/watchdog/mei_wdt.c 	req.hdr.command = MEI_MANAGEMENT_CONTROL;
req               202 drivers/watchdog/mei_wdt.c 	req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
req               203 drivers/watchdog/mei_wdt.c 	req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ;
req               204 drivers/watchdog/mei_wdt.c 	req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
req               206 drivers/watchdog/mei_wdt.c 	ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
req               247 drivers/xen/pvcalls-back.c 		struct xen_pvcalls_request *req)
req               255 drivers/xen/pvcalls-back.c 	if (req->u.socket.domain != AF_INET ||
req               256 drivers/xen/pvcalls-back.c 	    req->u.socket.type != SOCK_STREAM ||
req               257 drivers/xen/pvcalls-back.c 	    (req->u.socket.protocol != IPPROTO_IP &&
req               258 drivers/xen/pvcalls-back.c 	     req->u.socket.protocol != AF_INET))
req               266 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               267 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               268 drivers/xen/pvcalls-back.c 	rsp->u.socket.id = req->u.socket.id;
req               377 drivers/xen/pvcalls-back.c 				struct xen_pvcalls_request *req)
req               384 drivers/xen/pvcalls-back.c 	struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
req               388 drivers/xen/pvcalls-back.c 	if (req->u.connect.len < sizeof(sa->sa_family) ||
req               389 drivers/xen/pvcalls-back.c 	    req->u.connect.len > sizeof(req->u.connect.addr) ||
req               396 drivers/xen/pvcalls-back.c 	ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
req               403 drivers/xen/pvcalls-back.c 					req->u.connect.id,
req               404 drivers/xen/pvcalls-back.c 					req->u.connect.ref,
req               405 drivers/xen/pvcalls-back.c 					req->u.connect.evtchn,
req               414 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               415 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               416 drivers/xen/pvcalls-back.c 	rsp->u.connect.id = req->u.connect.id;
req               466 drivers/xen/pvcalls-back.c 				struct xen_pvcalls_request *req)
req               478 drivers/xen/pvcalls-back.c 		if (map->id == req->u.release.id) {
req               486 drivers/xen/pvcalls-back.c 				    req->u.release.id);
req               496 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               497 drivers/xen/pvcalls-back.c 	rsp->u.release.id = req->u.release.id;
req               498 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               512 drivers/xen/pvcalls-back.c 	struct xen_pvcalls_request *req;
req               525 drivers/xen/pvcalls-back.c 	req = &mappass->reqcopy;
req               526 drivers/xen/pvcalls-back.c 	if (req->cmd != PVCALLS_ACCEPT) {
req               545 drivers/xen/pvcalls-back.c 					req->u.accept.id_new,
req               546 drivers/xen/pvcalls-back.c 					req->u.accept.ref,
req               547 drivers/xen/pvcalls-back.c 					req->u.accept.evtchn,
req               563 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               564 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               565 drivers/xen/pvcalls-back.c 	rsp->u.accept.id = req->u.accept.id;
req               608 drivers/xen/pvcalls-back.c 			     struct xen_pvcalls_request *req)
req               635 drivers/xen/pvcalls-back.c 	ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
req               636 drivers/xen/pvcalls-back.c 			req->u.bind.len);
req               641 drivers/xen/pvcalls-back.c 	map->id = req->u.bind.id;
req               665 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               666 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               667 drivers/xen/pvcalls-back.c 	rsp->u.bind.id = req->u.bind.id;
req               673 drivers/xen/pvcalls-back.c 			       struct xen_pvcalls_request *req)
req               683 drivers/xen/pvcalls-back.c 	map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
req               688 drivers/xen/pvcalls-back.c 	ret = inet_listen(map->sock, req->u.listen.backlog);
req               692 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               693 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               694 drivers/xen/pvcalls-back.c 	rsp->u.listen.id = req->u.listen.id;
req               700 drivers/xen/pvcalls-back.c 			       struct xen_pvcalls_request *req)
req               712 drivers/xen/pvcalls-back.c 		req->u.accept.id);
req               728 drivers/xen/pvcalls-back.c 	mappass->reqcopy = *req;
req               737 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               738 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               739 drivers/xen/pvcalls-back.c 	rsp->u.accept.id = req->u.accept.id;
req               745 drivers/xen/pvcalls-back.c 			     struct xen_pvcalls_request *req)
req               760 drivers/xen/pvcalls-back.c 				    req->u.poll.id);
req               775 drivers/xen/pvcalls-back.c 	mappass->reqcopy = *req;
req               793 drivers/xen/pvcalls-back.c 	rsp->req_id = req->req_id;
req               794 drivers/xen/pvcalls-back.c 	rsp->cmd = req->cmd;
req               795 drivers/xen/pvcalls-back.c 	rsp->u.poll.id = req->u.poll.id;
req               801 drivers/xen/pvcalls-back.c 				   struct xen_pvcalls_request *req)
req               805 drivers/xen/pvcalls-back.c 	switch (req->cmd) {
req               807 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_socket(dev, req);
req               810 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_connect(dev, req);
req               813 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_release(dev, req);
req               816 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_bind(dev, req);
req               819 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_listen(dev, req);
req               822 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_accept(dev, req);
req               825 drivers/xen/pvcalls-back.c 		ret = pvcalls_back_poll(dev, req);
req               835 drivers/xen/pvcalls-back.c 		rsp->req_id = req->req_id;
req               836 drivers/xen/pvcalls-back.c 		rsp->cmd = req->cmd;
req               847 drivers/xen/pvcalls-back.c 	struct xen_pvcalls_request req;
req               854 drivers/xen/pvcalls-back.c 					  &req);
req               856 drivers/xen/pvcalls-back.c 			if (!pvcalls_back_handle_cmd(dev, &req)) {
req               264 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req               309 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req               310 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req               311 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_SOCKET;
req               312 drivers/xen/pvcalls-front.c 	req->u.socket.id = (uintptr_t) map;
req               313 drivers/xen/pvcalls-front.c 	req->u.socket.domain = AF_INET;
req               314 drivers/xen/pvcalls-front.c 	req->u.socket.type = SOCK_STREAM;
req               315 drivers/xen/pvcalls-front.c 	req->u.socket.protocol = IPPROTO_IP;
req               417 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req               450 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req               451 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req               452 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_CONNECT;
req               453 drivers/xen/pvcalls-front.c 	req->u.connect.id = (uintptr_t)map;
req               454 drivers/xen/pvcalls-front.c 	req->u.connect.len = addr_len;
req               455 drivers/xen/pvcalls-front.c 	req->u.connect.flags = flags;
req               456 drivers/xen/pvcalls-front.c 	req->u.connect.ref = map->active.ref;
req               457 drivers/xen/pvcalls-front.c 	req->u.connect.evtchn = evtchn;
req               458 drivers/xen/pvcalls-front.c 	memcpy(req->u.connect.addr, addr, sizeof(*addr));
req               664 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req               682 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req               683 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req               685 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_BIND;
req               686 drivers/xen/pvcalls-front.c 	req->u.bind.id = (uintptr_t)map;
req               687 drivers/xen/pvcalls-front.c 	memcpy(req->u.bind.addr, addr, sizeof(*addr));
req               688 drivers/xen/pvcalls-front.c 	req->u.bind.len = addr_len;
req               717 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req               737 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req               738 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req               739 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_LISTEN;
req               740 drivers/xen/pvcalls-front.c 	req->u.listen.id = (uintptr_t) map;
req               741 drivers/xen/pvcalls-front.c 	req->u.listen.backlog = backlog;
req               767 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req               844 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req               845 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req               846 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_ACCEPT;
req               847 drivers/xen/pvcalls-front.c 	req->u.accept.id = (uintptr_t) map;
req               848 drivers/xen/pvcalls-front.c 	req->u.accept.ref = map2->active.ref;
req               849 drivers/xen/pvcalls-front.c 	req->u.accept.id_new = (uintptr_t) map2;
req               850 drivers/xen/pvcalls-front.c 	req->u.accept.evtchn = evtchn;
req               904 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req               940 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req               941 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req               942 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_POLL;
req               943 drivers/xen/pvcalls-front.c 	req->u.poll.id = (uintptr_t) map;
req              1003 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_request *req;
req              1026 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
req              1027 drivers/xen/pvcalls-front.c 	req->req_id = req_id;
req              1028 drivers/xen/pvcalls-front.c 	req->cmd = PVCALLS_RELEASE;
req              1029 drivers/xen/pvcalls-front.c 	req->u.release.id = (uintptr_t)map;
req               253 drivers/xen/xen-scsiback.c static unsigned long vaddr(struct vscsibk_pend *req, int seg)
req               255 drivers/xen/xen-scsiback.c 	return vaddr_page(req->pages[seg]);
req               269 drivers/xen/xen-scsiback.c static void scsiback_fast_flush_area(struct vscsibk_pend *req)
req               277 drivers/xen/xen-scsiback.c 	kfree(req->sgl);
req               278 drivers/xen/xen-scsiback.c 	req->sgl = NULL;
req               279 drivers/xen/xen-scsiback.c 	req->n_sg = 0;
req               281 drivers/xen/xen-scsiback.c 	if (!req->n_grants)
req               284 drivers/xen/xen-scsiback.c 	for (i = 0; i < req->n_grants; i++) {
req               285 drivers/xen/xen-scsiback.c 		handle = req->grant_handles[i];
req               288 drivers/xen/xen-scsiback.c 		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
req               290 drivers/xen/xen-scsiback.c 		req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
req               291 drivers/xen/xen-scsiback.c 		pages[invcount] = req->pages[i];
req               306 drivers/xen/xen-scsiback.c 	put_free_pages(req->pages, req->n_grants);
req               307 drivers/xen/xen-scsiback.c 	req->n_grants = 0;
req               656 drivers/xen/xen-scsiback.c 	struct vscsibk_pend *req;
req               665 drivers/xen/xen-scsiback.c 	req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
req               666 drivers/xen/xen-scsiback.c 	memset(req, 0, sizeof(*req));
req               667 drivers/xen/xen-scsiback.c 	req->se_cmd.map_tag = tag;
req               668 drivers/xen/xen-scsiback.c 	req->se_cmd.map_cpu = cpu;
req               671 drivers/xen/xen-scsiback.c 		req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
req               673 drivers/xen/xen-scsiback.c 	return req;
req               103 drivers/xen/xenbus/xenbus.h void xs_request_exit(struct xb_req_data *req);
req               135 drivers/xen/xenbus/xenbus.h void xenbus_dev_queue_reply(struct xb_req_data *req);
req               137 drivers/xen/xenbus/xenbus_comms.c 		dst = get_output_chunk(cons, prod, intf->req, &avail);
req               226 drivers/xen/xenbus/xenbus_comms.c 	struct xb_req_data *req;
req               300 drivers/xen/xenbus/xenbus_comms.c 		list_for_each_entry(req, &xs_reply_list, list) {
req               301 drivers/xen/xenbus/xenbus_comms.c 			if (req->msg.req_id == state.msg.req_id) {
req               302 drivers/xen/xenbus/xenbus_comms.c 				list_del(&req->list);
req               311 drivers/xen/xenbus/xenbus_comms.c 		if (req->state == xb_req_state_wait_reply) {
req               312 drivers/xen/xenbus/xenbus_comms.c 			req->msg.req_id = req->caller_req_id;
req               313 drivers/xen/xenbus/xenbus_comms.c 			req->msg.type = state.msg.type;
req               314 drivers/xen/xenbus/xenbus_comms.c 			req->msg.len = state.msg.len;
req               315 drivers/xen/xenbus/xenbus_comms.c 			req->body = state.body;
req               318 drivers/xen/xenbus/xenbus_comms.c 			req->state = xb_req_state_got_reply;
req               319 drivers/xen/xenbus/xenbus_comms.c 			req->cb(req);
req               321 drivers/xen/xenbus/xenbus_comms.c 			kfree(req);
req               341 drivers/xen/xenbus/xenbus_comms.c 		struct xb_req_data *req;
req               354 drivers/xen/xenbus/xenbus_comms.c 	if (!state.req) {
req               355 drivers/xen/xenbus/xenbus_comms.c 		state.req = list_first_entry(&xb_write_list,
req               361 drivers/xen/xenbus/xenbus_comms.c 	if (state.req->state == xb_req_state_aborted)
req               364 drivers/xen/xenbus/xenbus_comms.c 	while (state.idx < state.req->num_vecs) {
req               366 drivers/xen/xenbus/xenbus_comms.c 			base = &state.req->msg;
req               367 drivers/xen/xenbus/xenbus_comms.c 			len = sizeof(state.req->msg);
req               369 drivers/xen/xenbus/xenbus_comms.c 			base = state.req->vec[state.idx].iov_base;
req               370 drivers/xen/xenbus/xenbus_comms.c 			len = state.req->vec[state.idx].iov_len;
req               383 drivers/xen/xenbus/xenbus_comms.c 	list_del(&state.req->list);
req               384 drivers/xen/xenbus/xenbus_comms.c 	state.req->state = xb_req_state_wait_reply;
req               385 drivers/xen/xenbus/xenbus_comms.c 	list_add_tail(&state.req->list, &xs_reply_list);
req               386 drivers/xen/xenbus/xenbus_comms.c 	state.req = NULL;
req               394 drivers/xen/xenbus/xenbus_comms.c 	state.req->msg.type = XS_ERROR;
req               395 drivers/xen/xenbus/xenbus_comms.c 	state.req->err = err;
req               396 drivers/xen/xenbus/xenbus_comms.c 	list_del(&state.req->list);
req               397 drivers/xen/xenbus/xenbus_comms.c 	if (state.req->state == xb_req_state_aborted)
req               398 drivers/xen/xenbus/xenbus_comms.c 		kfree(state.req);
req               402 drivers/xen/xenbus/xenbus_comms.c 		state.req->state = xb_req_state_got_reply;
req               403 drivers/xen/xenbus/xenbus_comms.c 		wake_up(&state.req->wq);
req               408 drivers/xen/xenbus/xenbus_comms.c 	state.req = NULL;
req               363 drivers/xen/xenbus/xenbus_dev_frontend.c void xenbus_dev_queue_reply(struct xb_req_data *req)
req               365 drivers/xen/xenbus/xenbus_dev_frontend.c 	struct xenbus_file_priv *u = req->par;
req               370 drivers/xen/xenbus/xenbus_dev_frontend.c 	xs_request_exit(req);
req               374 drivers/xen/xenbus/xenbus_dev_frontend.c 	if (req->type == XS_TRANSACTION_START) {
req               378 drivers/xen/xenbus/xenbus_dev_frontend.c 		if (req->msg.type == XS_ERROR) {
req               382 drivers/xen/xenbus/xenbus_dev_frontend.c 			rc = kstrtou32(req->body, 10, &trans->handle.id);
req               386 drivers/xen/xenbus/xenbus_dev_frontend.c 	} else if (req->type == XS_TRANSACTION_END) {
req               387 drivers/xen/xenbus/xenbus_dev_frontend.c 		trans = xenbus_get_transaction(u, req->msg.tx_id);
req               397 drivers/xen/xenbus/xenbus_dev_frontend.c 	rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
req               399 drivers/xen/xenbus/xenbus_dev_frontend.c 		rc = queue_reply(&staging_q, req->body, req->msg.len);
req               408 drivers/xen/xenbus/xenbus_dev_frontend.c 	kfree(req->body);
req               409 drivers/xen/xenbus/xenbus_dev_frontend.c 	kfree(req);
req               115 drivers/xen/xenbus/xenbus_xs.c static uint32_t xs_request_enter(struct xb_req_data *req)
req               119 drivers/xen/xenbus/xenbus_xs.c 	req->type = req->msg.type;
req               129 drivers/xen/xenbus/xenbus_xs.c 	if (req->type == XS_TRANSACTION_START && !req->user_req)
req               139 drivers/xen/xenbus/xenbus_xs.c void xs_request_exit(struct xb_req_data *req)
req               143 drivers/xen/xenbus/xenbus_xs.c 	if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
req               144 drivers/xen/xenbus/xenbus_xs.c 	    (req->type == XS_TRANSACTION_END && !req->user_req &&
req               145 drivers/xen/xenbus/xenbus_xs.c 	     !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
req               146 drivers/xen/xenbus/xenbus_xs.c 			   !strcmp(req->body, "ENOENT"))))
req               192 drivers/xen/xenbus/xenbus_xs.c static bool test_reply(struct xb_req_data *req)
req               194 drivers/xen/xenbus/xenbus_xs.c 	if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
req               206 drivers/xen/xenbus/xenbus_xs.c static void *read_reply(struct xb_req_data *req)
req               209 drivers/xen/xenbus/xenbus_xs.c 		wait_event(req->wq, test_reply(req));
req               219 drivers/xen/xenbus/xenbus_xs.c 		if (req->err)
req               220 drivers/xen/xenbus/xenbus_xs.c 			return ERR_PTR(req->err);
req               222 drivers/xen/xenbus/xenbus_xs.c 	} while (req->state != xb_req_state_got_reply);
req               224 drivers/xen/xenbus/xenbus_xs.c 	return req->body;
req               227 drivers/xen/xenbus/xenbus_xs.c static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
req               231 drivers/xen/xenbus/xenbus_xs.c 	req->msg = *msg;
req               232 drivers/xen/xenbus/xenbus_xs.c 	req->err = 0;
req               233 drivers/xen/xenbus/xenbus_xs.c 	req->state = xb_req_state_queued;
req               234 drivers/xen/xenbus/xenbus_xs.c 	init_waitqueue_head(&req->wq);
req               237 drivers/xen/xenbus/xenbus_xs.c 	req->caller_req_id = req->msg.req_id;
req               238 drivers/xen/xenbus/xenbus_xs.c 	req->msg.req_id = xs_request_enter(req);
req               241 drivers/xen/xenbus/xenbus_xs.c 	list_add_tail(&req->list, &xb_write_list);
req               249 drivers/xen/xenbus/xenbus_xs.c static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
req               253 drivers/xen/xenbus/xenbus_xs.c 	ret = read_reply(req);
req               255 drivers/xen/xenbus/xenbus_xs.c 	xs_request_exit(req);
req               257 drivers/xen/xenbus/xenbus_xs.c 	msg->type = req->msg.type;
req               258 drivers/xen/xenbus/xenbus_xs.c 	msg->len = req->msg.len;
req               261 drivers/xen/xenbus/xenbus_xs.c 	if (req->state == xb_req_state_queued ||
req               262 drivers/xen/xenbus/xenbus_xs.c 	    req->state == xb_req_state_wait_reply)
req               263 drivers/xen/xenbus/xenbus_xs.c 		req->state = xb_req_state_aborted;
req               265 drivers/xen/xenbus/xenbus_xs.c 		kfree(req);
req               271 drivers/xen/xenbus/xenbus_xs.c static void xs_wake_up(struct xb_req_data *req)
req               273 drivers/xen/xenbus/xenbus_xs.c 	wake_up(&req->wq);
req               278 drivers/xen/xenbus/xenbus_xs.c 	struct xb_req_data *req;
req               281 drivers/xen/xenbus/xenbus_xs.c 	req = kmalloc(sizeof(*req) + sizeof(*vec), GFP_KERNEL);
req               282 drivers/xen/xenbus/xenbus_xs.c 	if (!req)
req               285 drivers/xen/xenbus/xenbus_xs.c 	vec = (struct kvec *)(req + 1);
req               289 drivers/xen/xenbus/xenbus_xs.c 	req->vec = vec;
req               290 drivers/xen/xenbus/xenbus_xs.c 	req->num_vecs = 1;
req               291 drivers/xen/xenbus/xenbus_xs.c 	req->cb = xenbus_dev_queue_reply;
req               292 drivers/xen/xenbus/xenbus_xs.c 	req->par = par;
req               293 drivers/xen/xenbus/xenbus_xs.c 	req->user_req = true;
req               295 drivers/xen/xenbus/xenbus_xs.c 	xs_send(req, msg);
req               308 drivers/xen/xenbus/xenbus_xs.c 	struct xb_req_data *req;
req               314 drivers/xen/xenbus/xenbus_xs.c 	req = kmalloc(sizeof(*req), GFP_NOIO | __GFP_HIGH);
req               315 drivers/xen/xenbus/xenbus_xs.c 	if (!req)
req               318 drivers/xen/xenbus/xenbus_xs.c 	req->vec = iovec;
req               319 drivers/xen/xenbus/xenbus_xs.c 	req->num_vecs = num_vecs;
req               320 drivers/xen/xenbus/xenbus_xs.c 	req->cb = xs_wake_up;
req               321 drivers/xen/xenbus/xenbus_xs.c 	req->user_req = false;
req               330 drivers/xen/xenbus/xenbus_xs.c 	xs_send(req, &msg);
req               332 drivers/xen/xenbus/xenbus_xs.c 	ret = xs_wait_for_reply(req, &msg);
req               911 drivers/xen/xenbus/xenbus_xs.c 	struct xb_req_data *req;
req               914 drivers/xen/xenbus/xenbus_xs.c 	list_for_each_entry(req, &xs_reply_list, list)
req               915 drivers/xen/xenbus/xenbus_xs.c 		wake_up(&req->wq);
req               916 drivers/xen/xenbus/xenbus_xs.c 	list_for_each_entry(req, &xb_write_list, list)
req               917 drivers/xen/xenbus/xenbus_xs.c 		wake_up(&req->wq);
req               164 fs/afs/dir.c   static bool afs_dir_check_pages(struct afs_vnode *dvnode, struct afs_read *req)
req               169 fs/afs/dir.c   	for (i = 0; i < req->nr_pages; i++)
req               170 fs/afs/dir.c   		if (!afs_dir_check_page(dvnode, req->pages[i], req->actual_len))
req               177 fs/afs/dir.c   		req->file_size, req->len, req->actual_len, req->remain);
req               179 fs/afs/dir.c   		req->pos, req->index, req->nr_pages, req->offset);
req               181 fs/afs/dir.c   	for (i = 0; i < req->nr_pages; i++) {
req               182 fs/afs/dir.c   		dbuf = kmap(req->pages[i]);
req               188 fs/afs/dir.c   		kunmap(req->pages[i]);
req               217 fs/afs/dir.c   	struct afs_read *req;
req               238 fs/afs/dir.c   	if (nr_inline > (PAGE_SIZE - sizeof(*req)) / sizeof(struct page *))
req               241 fs/afs/dir.c   	req = kzalloc(struct_size(req, array, nr_inline), GFP_KERNEL);
req               242 fs/afs/dir.c   	if (!req)
req               245 fs/afs/dir.c   	refcount_set(&req->usage, 1);
req               246 fs/afs/dir.c   	req->nr_pages = nr_pages;
req               247 fs/afs/dir.c   	req->actual_len = i_size; /* May change */
req               248 fs/afs/dir.c   	req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
req               249 fs/afs/dir.c   	req->data_version = dvnode->status.data_version; /* May change */
req               251 fs/afs/dir.c   		req->pages = req->array;
req               253 fs/afs/dir.c   		req->pages = kcalloc(nr_pages, sizeof(struct page *),
req               255 fs/afs/dir.c   		if (!req->pages)
req               267 fs/afs/dir.c   					  req->nr_pages - i,
req               268 fs/afs/dir.c   					  req->pages + i);
req               269 fs/afs/dir.c   		_debug("find %u at %u/%u", n, i, req->nr_pages);
req               277 fs/afs/dir.c   			req->pages[i] = __page_cache_alloc(gfp);
req               278 fs/afs/dir.c   			if (!req->pages[i])
req               280 fs/afs/dir.c   			ret = add_to_page_cache_lru(req->pages[i],
req               286 fs/afs/dir.c   			set_page_private(req->pages[i], 1);
req               287 fs/afs/dir.c   			SetPagePrivate(req->pages[i]);
req               288 fs/afs/dir.c   			unlock_page(req->pages[i]);
req               293 fs/afs/dir.c   	} while (i < req->nr_pages);
req               311 fs/afs/dir.c   		ret = afs_fetch_data(dvnode, key, req);
req               315 fs/afs/dir.c   		task_io_account_read(PAGE_SIZE * req->nr_pages);
req               317 fs/afs/dir.c   		if (req->len < req->file_size)
req               322 fs/afs/dir.c   		if (!afs_dir_check_pages(dvnode, req))
req               332 fs/afs/dir.c   	return req;
req               337 fs/afs/dir.c   	afs_put_read(req);
req               343 fs/afs/dir.c   	afs_put_read(req);
req               448 fs/afs/dir.c   	struct afs_read *req;
req               460 fs/afs/dir.c   	req = afs_read_dir(dvnode, key);
req               461 fs/afs/dir.c   	if (IS_ERR(req))
req               462 fs/afs/dir.c   		return PTR_ERR(req);
req               463 fs/afs/dir.c   	*_dir_version = req->data_version;
req               471 fs/afs/dir.c   	while (ctx->pos < req->actual_len) {
req               477 fs/afs/dir.c   		page = req->pages[blkoff / PAGE_SIZE];
req               508 fs/afs/dir.c   	afs_put_read(req);
req               189 fs/afs/file.c  void afs_put_read(struct afs_read *req)
req               193 fs/afs/file.c  	if (refcount_dec_and_test(&req->usage)) {
req               194 fs/afs/file.c  		if (req->pages) {
req               195 fs/afs/file.c  			for (i = 0; i < req->nr_pages; i++)
req               196 fs/afs/file.c  				if (req->pages[i])
req               197 fs/afs/file.c  					put_page(req->pages[i]);
req               198 fs/afs/file.c  			if (req->pages != req->array)
req               199 fs/afs/file.c  				kfree(req->pages);
req               201 fs/afs/file.c  		kfree(req);
req               276 fs/afs/file.c  	struct afs_read *req;
req               315 fs/afs/file.c  		req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
req               316 fs/afs/file.c  		if (!req)
req               323 fs/afs/file.c  		refcount_set(&req->usage, 1);
req               324 fs/afs/file.c  		req->pos = (loff_t)page->index << PAGE_SHIFT;
req               325 fs/afs/file.c  		req->len = PAGE_SIZE;
req               326 fs/afs/file.c  		req->nr_pages = 1;
req               327 fs/afs/file.c  		req->pages = req->array;
req               328 fs/afs/file.c  		req->pages[0] = page;
req               333 fs/afs/file.c  		ret = afs_fetch_data(vnode, key, req);
req               334 fs/afs/file.c  		afs_put_read(req);
req               414 fs/afs/file.c  static void afs_readpages_page_done(struct afs_read *req)
req               417 fs/afs/file.c  	struct afs_vnode *vnode = req->vnode;
req               419 fs/afs/file.c  	struct page *page = req->pages[req->index];
req               421 fs/afs/file.c  	req->pages[req->index] = NULL;
req               444 fs/afs/file.c  	struct afs_read *req;
req               465 fs/afs/file.c  	req = kzalloc(struct_size(req, array, n), GFP_NOFS);
req               466 fs/afs/file.c  	if (!req)
req               469 fs/afs/file.c  	refcount_set(&req->usage, 1);
req               470 fs/afs/file.c  	req->vnode = vnode;
req               471 fs/afs/file.c  	req->page_done = afs_readpages_page_done;
req               472 fs/afs/file.c  	req->pos = first->index;
req               473 fs/afs/file.c  	req->pos <<= PAGE_SHIFT;
req               474 fs/afs/file.c  	req->pages = req->array;
req               498 fs/afs/file.c  		req->pages[req->nr_pages++] = page;
req               499 fs/afs/file.c  		req->len += PAGE_SIZE;
req               500 fs/afs/file.c  	} while (req->nr_pages < n);
req               502 fs/afs/file.c  	if (req->nr_pages == 0) {
req               503 fs/afs/file.c  		kfree(req);
req               507 fs/afs/file.c  	ret = afs_fetch_data(vnode, key, req);
req               511 fs/afs/file.c  	task_io_account_read(PAGE_SIZE * req->nr_pages);
req               512 fs/afs/file.c  	afs_put_read(req);
req               523 fs/afs/file.c  	for (i = 0; i < req->nr_pages; i++) {
req               524 fs/afs/file.c  		page = req->pages[i];
req               534 fs/afs/file.c  	afs_put_read(req);
req               325 fs/afs/fsclient.c 	struct afs_read *req = call->read_request;
req               331 fs/afs/fsclient.c 	       call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
req               335 fs/afs/fsclient.c 		req->actual_len = 0;
req               336 fs/afs/fsclient.c 		req->index = 0;
req               337 fs/afs/fsclient.c 		req->offset = req->pos & (PAGE_SIZE - 1);
req               354 fs/afs/fsclient.c 		req->actual_len = be64_to_cpu(call->tmp64);
req               355 fs/afs/fsclient.c 		_debug("DATA length: %llu", req->actual_len);
req               356 fs/afs/fsclient.c 		req->remain = min(req->len, req->actual_len);
req               357 fs/afs/fsclient.c 		if (req->remain == 0)
req               363 fs/afs/fsclient.c 		ASSERTCMP(req->index, <, req->nr_pages);
req               364 fs/afs/fsclient.c 		if (req->remain > PAGE_SIZE - req->offset)
req               365 fs/afs/fsclient.c 			size = PAGE_SIZE - req->offset;
req               367 fs/afs/fsclient.c 			size = req->remain;
req               369 fs/afs/fsclient.c 		call->bvec[0].bv_offset = req->offset;
req               370 fs/afs/fsclient.c 		call->bvec[0].bv_page = req->pages[req->index];
req               378 fs/afs/fsclient.c 		       iov_iter_count(&call->iter), req->remain);
req               383 fs/afs/fsclient.c 		req->remain -= call->bvec[0].bv_len;
req               384 fs/afs/fsclient.c 		req->offset += call->bvec[0].bv_len;
req               385 fs/afs/fsclient.c 		ASSERTCMP(req->offset, <=, PAGE_SIZE);
req               386 fs/afs/fsclient.c 		if (req->offset == PAGE_SIZE) {
req               387 fs/afs/fsclient.c 			req->offset = 0;
req               388 fs/afs/fsclient.c 			req->index++;
req               389 fs/afs/fsclient.c 			if (req->remain > 0)
req               393 fs/afs/fsclient.c 		ASSERTCMP(req->remain, ==, 0);
req               394 fs/afs/fsclient.c 		if (req->actual_len <= req->len)
req               398 fs/afs/fsclient.c 		afs_extract_discard(call, req->actual_len - req->len);
req               404 fs/afs/fsclient.c 		       iov_iter_count(&call->iter), req->actual_len - req->len);
req               428 fs/afs/fsclient.c 		req->data_version = call->out_scb->status.data_version;
req               429 fs/afs/fsclient.c 		req->file_size = call->out_scb->status.size;
req               437 fs/afs/fsclient.c 	for (; req->index < req->nr_pages; req->index++) {
req               438 fs/afs/fsclient.c 		if (req->offset < PAGE_SIZE)
req               439 fs/afs/fsclient.c 			zero_user_segment(req->pages[req->index],
req               440 fs/afs/fsclient.c 					  req->offset, PAGE_SIZE);
req               441 fs/afs/fsclient.c 		req->offset = 0;
req               444 fs/afs/fsclient.c 	if (req->page_done)
req               445 fs/afs/fsclient.c 		for (req->index = 0; req->index < req->nr_pages; req->index++)
req               446 fs/afs/fsclient.c 			req->page_done(req);
req               454 fs/afs/fsclient.c 	struct afs_read *req = call->read_request;
req               456 fs/afs/fsclient.c 	afs_put_read(req);
req               482 fs/afs/fsclient.c 			       struct afs_read *req)
req               498 fs/afs/fsclient.c 	call->read_request = req;
req               506 fs/afs/fsclient.c 	bp[4] = htonl(upper_32_bits(req->pos));
req               507 fs/afs/fsclient.c 	bp[5] = htonl(lower_32_bits(req->pos));
req               509 fs/afs/fsclient.c 	bp[7] = htonl(lower_32_bits(req->len));
req               511 fs/afs/fsclient.c 	refcount_inc(&req->usage);
req               524 fs/afs/fsclient.c 		      struct afs_read *req)
req               532 fs/afs/fsclient.c 		return yfs_fs_fetch_data(fc, scb, req);
req               534 fs/afs/fsclient.c 	if (upper_32_bits(req->pos) ||
req               535 fs/afs/fsclient.c 	    upper_32_bits(req->len) ||
req               536 fs/afs/fsclient.c 	    upper_32_bits(req->pos + req->len))
req               537 fs/afs/fsclient.c 		return afs_fs_fetch_data64(fc, scb, req);
req               548 fs/afs/fsclient.c 	call->read_request = req;
req               556 fs/afs/fsclient.c 	bp[4] = htonl(lower_32_bits(req->pos));
req               557 fs/afs/fsclient.c 	bp[5] = htonl(lower_32_bits(req->len));
req               559 fs/afs/fsclient.c 	refcount_inc(&req->usage);
req                31 fs/afs/write.c 	struct afs_read *req;
req                47 fs/afs/write.c 	req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
req                48 fs/afs/write.c 	if (!req)
req                51 fs/afs/write.c 	refcount_set(&req->usage, 1);
req                52 fs/afs/write.c 	req->pos = pos;
req                53 fs/afs/write.c 	req->len = len;
req                54 fs/afs/write.c 	req->nr_pages = 1;
req                55 fs/afs/write.c 	req->pages = req->array;
req                56 fs/afs/write.c 	req->pages[0] = page;
req                59 fs/afs/write.c 	ret = afs_fetch_data(vnode, key, req);
req                60 fs/afs/write.c 	afs_put_read(req);
req               442 fs/afs/yfsclient.c 	struct afs_read *req = call->read_request;
req               448 fs/afs/yfsclient.c 	       call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
req               452 fs/afs/yfsclient.c 		req->actual_len = 0;
req               453 fs/afs/yfsclient.c 		req->index = 0;
req               454 fs/afs/yfsclient.c 		req->offset = req->pos & (PAGE_SIZE - 1);
req               466 fs/afs/yfsclient.c 		req->actual_len = be64_to_cpu(call->tmp64);
req               467 fs/afs/yfsclient.c 		_debug("DATA length: %llu", req->actual_len);
req               468 fs/afs/yfsclient.c 		req->remain = min(req->len, req->actual_len);
req               469 fs/afs/yfsclient.c 		if (req->remain == 0)
req               475 fs/afs/yfsclient.c 		ASSERTCMP(req->index, <, req->nr_pages);
req               476 fs/afs/yfsclient.c 		if (req->remain > PAGE_SIZE - req->offset)
req               477 fs/afs/yfsclient.c 			size = PAGE_SIZE - req->offset;
req               479 fs/afs/yfsclient.c 			size = req->remain;
req               481 fs/afs/yfsclient.c 		call->bvec[0].bv_offset = req->offset;
req               482 fs/afs/yfsclient.c 		call->bvec[0].bv_page = req->pages[req->index];
req               490 fs/afs/yfsclient.c 		       iov_iter_count(&call->iter), req->remain);
req               495 fs/afs/yfsclient.c 		req->remain -= call->bvec[0].bv_len;
req               496 fs/afs/yfsclient.c 		req->offset += call->bvec[0].bv_len;
req               497 fs/afs/yfsclient.c 		ASSERTCMP(req->offset, <=, PAGE_SIZE);
req               498 fs/afs/yfsclient.c 		if (req->offset == PAGE_SIZE) {
req               499 fs/afs/yfsclient.c 			req->offset = 0;
req               500 fs/afs/yfsclient.c 			req->index++;
req               501 fs/afs/yfsclient.c 			if (req->remain > 0)
req               505 fs/afs/yfsclient.c 		ASSERTCMP(req->remain, ==, 0);
req               506 fs/afs/yfsclient.c 		if (req->actual_len <= req->len)
req               510 fs/afs/yfsclient.c 		afs_extract_discard(call, req->actual_len - req->len);
req               516 fs/afs/yfsclient.c 		       iov_iter_count(&call->iter), req->actual_len - req->len);
req               543 fs/afs/yfsclient.c 		req->data_version = call->out_scb->status.data_version;
req               544 fs/afs/yfsclient.c 		req->file_size = call->out_scb->status.size;
req               553 fs/afs/yfsclient.c 	for (; req->index < req->nr_pages; req->index++) {
req               554 fs/afs/yfsclient.c 		if (req->offset < PAGE_SIZE)
req               555 fs/afs/yfsclient.c 			zero_user_segment(req->pages[req->index],
req               556 fs/afs/yfsclient.c 					  req->offset, PAGE_SIZE);
req               557 fs/afs/yfsclient.c 		req->offset = 0;
req               560 fs/afs/yfsclient.c 	if (req->page_done)
req               561 fs/afs/yfsclient.c 		for (req->index = 0; req->index < req->nr_pages; req->index++)
req               562 fs/afs/yfsclient.c 			req->page_done(req);
req               588 fs/afs/yfsclient.c 		      struct afs_read *req)
req               597 fs/afs/yfsclient.c 	       req->pos, req->len);
req               612 fs/afs/yfsclient.c 	call->read_request = req;
req               619 fs/afs/yfsclient.c 	bp = xdr_encode_u64(bp, req->pos);
req               620 fs/afs/yfsclient.c 	bp = xdr_encode_u64(bp, req->len);
req               623 fs/afs/yfsclient.c 	refcount_inc(&req->usage);
req               564 fs/aio.c       	struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
req               565 fs/aio.c       	struct kioctx *ctx = req->ki_ctx;
req               568 fs/aio.c       	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
req               572 fs/aio.c       	list_add_tail(&req->ki_list, &ctx->active_reqs);
req               573 fs/aio.c       	req->ki_cancel = cancel;
req               617 fs/aio.c       	struct aio_kiocb *req;
req               622 fs/aio.c       		req = list_first_entry(&ctx->active_reqs,
req               624 fs/aio.c       		req->ki_cancel(&req->rw);
req               625 fs/aio.c       		list_del_init(&req->ki_list);
req              1029 fs/aio.c       	struct aio_kiocb *req;
req              1031 fs/aio.c       	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
req              1032 fs/aio.c       	if (unlikely(!req))
req              1036 fs/aio.c       		kmem_cache_free(kiocb_cachep, req);
req              1041 fs/aio.c       	req->ki_ctx = ctx;
req              1042 fs/aio.c       	INIT_LIST_HEAD(&req->ki_list);
req              1043 fs/aio.c       	refcount_set(&req->ki_refcnt, 2);
req              1044 fs/aio.c       	req->ki_eventfd = NULL;
req              1045 fs/aio.c       	return req;
req              1446 fs/aio.c       static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
req              1450 fs/aio.c       	req->ki_complete = aio_complete_rw;
req              1451 fs/aio.c       	req->private = NULL;
req              1452 fs/aio.c       	req->ki_pos = iocb->aio_offset;
req              1453 fs/aio.c       	req->ki_flags = iocb_flags(req->ki_filp);
req              1455 fs/aio.c       		req->ki_flags |= IOCB_EVENTFD;
req              1456 fs/aio.c       	req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
req              1469 fs/aio.c       		req->ki_ioprio = iocb->aio_reqprio;
req              1471 fs/aio.c       		req->ki_ioprio = get_current_ioprio();
req              1473 fs/aio.c       	ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
req              1477 fs/aio.c       	req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
req              1501 fs/aio.c       static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
req              1517 fs/aio.c       		req->ki_complete(req, ret, 0);
req              1521 fs/aio.c       static int aio_read(struct kiocb *req, const struct iocb *iocb,
req              1529 fs/aio.c       	ret = aio_prep_rw(req, iocb);
req              1532 fs/aio.c       	file = req->ki_filp;
req              1542 fs/aio.c       	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
req              1544 fs/aio.c       		aio_rw_done(req, call_read_iter(file, req, &iter));
req              1549 fs/aio.c       static int aio_write(struct kiocb *req, const struct iocb *iocb,
req              1557 fs/aio.c       	ret = aio_prep_rw(req, iocb);
req              1560 fs/aio.c       	file = req->ki_filp;
req              1570 fs/aio.c       	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
req              1583 fs/aio.c       		req->ki_flags |= IOCB_WRITE;
req              1584 fs/aio.c       		aio_rw_done(req, call_write_iter(file, req, &iter));
req              1601 fs/aio.c       static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
req              1608 fs/aio.c       	if (unlikely(!req->file->f_op->fsync))
req              1611 fs/aio.c       	req->creds = prepare_creds();
req              1612 fs/aio.c       	if (!req->creds)
req              1615 fs/aio.c       	req->datasync = datasync;
req              1616 fs/aio.c       	INIT_WORK(&req->work, aio_fsync_work);
req              1617 fs/aio.c       	schedule_work(&req->work);
req              1623 fs/aio.c       	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
req              1624 fs/aio.c       	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
req              1631 fs/aio.c       	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
req              1632 fs/aio.c       	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
req              1633 fs/aio.c       	struct poll_table_struct pt = { ._key = req->events };
req              1637 fs/aio.c       	if (!READ_ONCE(req->cancelled))
req              1638 fs/aio.c       		mask = vfs_poll(req->file, &pt) & req->events;
req              1648 fs/aio.c       	if (!mask && !READ_ONCE(req->cancelled)) {
req              1649 fs/aio.c       		add_wait_queue(req->head, &req->wait);
req              1655 fs/aio.c       	req->done = true;
req              1665 fs/aio.c       	struct poll_iocb *req = &aiocb->poll;
req              1667 fs/aio.c       	spin_lock(&req->head->lock);
req              1668 fs/aio.c       	WRITE_ONCE(req->cancelled, true);
req              1669 fs/aio.c       	if (!list_empty(&req->wait.entry)) {
req              1670 fs/aio.c       		list_del_init(&req->wait.entry);
req              1673 fs/aio.c       	spin_unlock(&req->head->lock);
req              1681 fs/aio.c       	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
req              1682 fs/aio.c       	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
req              1687 fs/aio.c       	if (mask && !(mask & req->events))
req              1690 fs/aio.c       	list_del_init(&req->wait.entry);
req              1703 fs/aio.c       		req->done = true;
req              1706 fs/aio.c       			INIT_WORK(&req->work, aio_poll_put_work);
req              1707 fs/aio.c       			schedule_work(&req->work);
req              1713 fs/aio.c       		schedule_work(&req->work);
req              1744 fs/aio.c       	struct poll_iocb *req = &aiocb->poll;
req              1756 fs/aio.c       	INIT_WORK(&req->work, aio_poll_complete_work);
req              1757 fs/aio.c       	req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req              1759 fs/aio.c       	req->head = NULL;
req              1760 fs/aio.c       	req->done = false;
req              1761 fs/aio.c       	req->cancelled = false;
req              1764 fs/aio.c       	apt.pt._key = req->events;
req              1769 fs/aio.c       	INIT_LIST_HEAD(&req->wait.entry);
req              1770 fs/aio.c       	init_waitqueue_func_entry(&req->wait, aio_poll_wake);
req              1772 fs/aio.c       	mask = vfs_poll(req->file, &apt.pt) & req->events;
req              1774 fs/aio.c       	if (likely(req->head)) {
req              1775 fs/aio.c       		spin_lock(&req->head->lock);
req              1776 fs/aio.c       		if (unlikely(list_empty(&req->wait.entry))) {
req              1783 fs/aio.c       			list_del_init(&req->wait.entry);
req              1785 fs/aio.c       			WRITE_ONCE(req->cancelled, true);
req              1786 fs/aio.c       		} else if (!req->done) { /* actually waiting for an event */
req              1790 fs/aio.c       		spin_unlock(&req->head->lock);
req              1803 fs/aio.c       			   struct iocb __user *user_iocb, struct aio_kiocb *req,
req              1806 fs/aio.c       	req->ki_filp = fget(iocb->aio_fildes);
req              1807 fs/aio.c       	if (unlikely(!req->ki_filp))
req              1822 fs/aio.c       		req->ki_eventfd = eventfd;
req              1830 fs/aio.c       	req->ki_res.obj = (u64)(unsigned long)user_iocb;
req              1831 fs/aio.c       	req->ki_res.data = iocb->aio_data;
req              1832 fs/aio.c       	req->ki_res.res = 0;
req              1833 fs/aio.c       	req->ki_res.res2 = 0;
req              1837 fs/aio.c       		return aio_read(&req->rw, iocb, false, compat);
req              1839 fs/aio.c       		return aio_write(&req->rw, iocb, false, compat);
req              1841 fs/aio.c       		return aio_read(&req->rw, iocb, true, compat);
req              1843 fs/aio.c       		return aio_write(&req->rw, iocb, true, compat);
req              1845 fs/aio.c       		return aio_fsync(&req->fsync, iocb, false);
req              1847 fs/aio.c       		return aio_fsync(&req->fsync, iocb, true);
req              1849 fs/aio.c       		return aio_poll(req, iocb);
req              1859 fs/aio.c       	struct aio_kiocb *req;
req              1882 fs/aio.c       	req = aio_get_req(ctx);
req              1883 fs/aio.c       	if (unlikely(!req))
req              1886 fs/aio.c       	err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
req              1889 fs/aio.c       	iocb_put(req);
req              1897 fs/aio.c       		iocb_destroy(req);
req               260 fs/ceph/addr.c static void finish_read(struct ceph_osd_request *req)
req               262 fs/ceph/addr.c 	struct inode *inode = req->r_inode;
req               264 fs/ceph/addr.c 	int rc = req->r_result <= 0 ? req->r_result : 0;
req               265 fs/ceph/addr.c 	int bytes = req->r_result >= 0 ? req->r_result : 0;
req               269 fs/ceph/addr.c 	dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
req               274 fs/ceph/addr.c 	osd_data = osd_req_op_extent_osd_data(req, 0);
req               315 fs/ceph/addr.c 	struct ceph_osd_request *req;
req               365 fs/ceph/addr.c 	req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
req               370 fs/ceph/addr.c 	if (IS_ERR(req)) {
req               371 fs/ceph/addr.c 		ret = PTR_ERR(req);
req               398 fs/ceph/addr.c 				osd_req_op_extent_update(req, 0, len);
req               405 fs/ceph/addr.c 	osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
req               406 fs/ceph/addr.c 	req->r_callback = finish_read;
req               407 fs/ceph/addr.c 	req->r_inode = inode;
req               409 fs/ceph/addr.c 	dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
req               410 fs/ceph/addr.c 	ret = ceph_osdc_start_request(osdc, req, false);
req               413 fs/ceph/addr.c 	ceph_osdc_put_request(req);
req               429 fs/ceph/addr.c 	ceph_osdc_put_request(req);
req               693 fs/ceph/addr.c static void writepages_finish(struct ceph_osd_request *req)
req               695 fs/ceph/addr.c 	struct inode *inode = req->r_inode;
req               701 fs/ceph/addr.c 	int rc = req->r_result;
req               702 fs/ceph/addr.c 	struct ceph_snap_context *snapc = req->r_snapc;
req               727 fs/ceph/addr.c 	for (i = 0; i < req->r_num_ops; i++) {
req               728 fs/ceph/addr.c 		if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
req               731 fs/ceph/addr.c 		osd_data = osd_req_op_extent_osd_data(req, i);
req               767 fs/ceph/addr.c 	osd_data = osd_req_op_extent_osd_data(req, 0);
req               773 fs/ceph/addr.c 	ceph_osdc_put_request(req);
req               791 fs/ceph/addr.c 	struct ceph_osd_request *req = NULL;
req              1029 fs/ceph/addr.c 		req = ceph_osdc_new_request(&fsc->client->osdc,
req              1035 fs/ceph/addr.c 		if (IS_ERR(req)) {
req              1036 fs/ceph/addr.c 			req = ceph_osdc_new_request(&fsc->client->osdc,
req              1045 fs/ceph/addr.c 			BUG_ON(IS_ERR(req));
req              1050 fs/ceph/addr.c 		req->r_callback = writepages_finish;
req              1051 fs/ceph/addr.c 		req->r_inode = inode;
req              1060 fs/ceph/addr.c 				if (op_idx + 1 == req->r_num_ops)
req              1062 fs/ceph/addr.c 				osd_req_op_extent_dup_last(req, op_idx,
req              1066 fs/ceph/addr.c 				osd_req_op_extent_osd_data_pages(req, op_idx,
req              1069 fs/ceph/addr.c 				osd_req_op_extent_update(req, op_idx, len);
req              1094 fs/ceph/addr.c 		osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
req              1096 fs/ceph/addr.c 		osd_req_op_extent_update(req, op_idx, len);
req              1098 fs/ceph/addr.c 		BUG_ON(op_idx + 1 != req->r_num_ops);
req              1102 fs/ceph/addr.c 			BUG_ON(num_ops <= req->r_num_ops);
req              1103 fs/ceph/addr.c 			num_ops -= req->r_num_ops;
req              1120 fs/ceph/addr.c 			BUG_ON(num_ops != req->r_num_ops);
req              1126 fs/ceph/addr.c 		req->r_mtime = inode->i_mtime;
req              1127 fs/ceph/addr.c 		rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
req              1129 fs/ceph/addr.c 		req = NULL;
req              1186 fs/ceph/addr.c 	ceph_osdc_put_request(req);
req              1665 fs/ceph/addr.c 	struct ceph_osd_request *req;
req              1720 fs/ceph/addr.c 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
req              1724 fs/ceph/addr.c 	if (IS_ERR(req)) {
req              1725 fs/ceph/addr.c 		err = PTR_ERR(req);
req              1729 fs/ceph/addr.c 	req->r_mtime = inode->i_mtime;
req              1730 fs/ceph/addr.c 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
req              1732 fs/ceph/addr.c 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
req              1733 fs/ceph/addr.c 	ceph_osdc_put_request(req);
req              1737 fs/ceph/addr.c 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
req              1742 fs/ceph/addr.c 	if (IS_ERR(req)) {
req              1743 fs/ceph/addr.c 		err = PTR_ERR(req);
req              1747 fs/ceph/addr.c 	osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
req              1751 fs/ceph/addr.c 		err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
req              1764 fs/ceph/addr.c 		err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
req              1771 fs/ceph/addr.c 	req->r_mtime = inode->i_mtime;
req              1772 fs/ceph/addr.c 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
req              1774 fs/ceph/addr.c 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
req              1776 fs/ceph/addr.c 	ceph_osdc_put_request(req);
req                53 fs/ceph/debugfs.c 	struct ceph_mds_request *req;
req                61 fs/ceph/debugfs.c 		req = rb_entry(rp, struct ceph_mds_request, r_node);
req                63 fs/ceph/debugfs.c 		if (req->r_request && req->r_session)
req                64 fs/ceph/debugfs.c 			seq_printf(s, "%lld\tmds%d\t", req->r_tid,
req                65 fs/ceph/debugfs.c 				   req->r_session->s_mds);
req                66 fs/ceph/debugfs.c 		else if (!req->r_request)
req                67 fs/ceph/debugfs.c 			seq_printf(s, "%lld\t(no request)\t", req->r_tid);
req                69 fs/ceph/debugfs.c 			seq_printf(s, "%lld\t(no session)\t", req->r_tid);
req                71 fs/ceph/debugfs.c 		seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
req                73 fs/ceph/debugfs.c 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
req                78 fs/ceph/debugfs.c 		if (req->r_inode) {
req                79 fs/ceph/debugfs.c 			seq_printf(s, " #%llx", ceph_ino(req->r_inode));
req                80 fs/ceph/debugfs.c 		} else if (req->r_dentry) {
req                81 fs/ceph/debugfs.c 			path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
req                85 fs/ceph/debugfs.c 			spin_lock(&req->r_dentry->d_lock);
req                87 fs/ceph/debugfs.c 				   ceph_ino(d_inode(req->r_dentry->d_parent)),
req                88 fs/ceph/debugfs.c 				   req->r_dentry,
req                90 fs/ceph/debugfs.c 			spin_unlock(&req->r_dentry->d_lock);
req                92 fs/ceph/debugfs.c 		} else if (req->r_path1) {
req                93 fs/ceph/debugfs.c 			seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req                94 fs/ceph/debugfs.c 				   req->r_path1);
req                96 fs/ceph/debugfs.c 			seq_printf(s, " #%llx", req->r_ino1.ino);
req                99 fs/ceph/debugfs.c 		if (req->r_old_dentry) {
req               100 fs/ceph/debugfs.c 			path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
req               104 fs/ceph/debugfs.c 			spin_lock(&req->r_old_dentry->d_lock);
req               106 fs/ceph/debugfs.c 				   req->r_old_dentry_dir ?
req               107 fs/ceph/debugfs.c 				   ceph_ino(req->r_old_dentry_dir) : 0,
req               108 fs/ceph/debugfs.c 				   req->r_old_dentry,
req               110 fs/ceph/debugfs.c 			spin_unlock(&req->r_old_dentry->d_lock);
req               112 fs/ceph/debugfs.c 		} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
req               113 fs/ceph/debugfs.c 			if (req->r_ino2.ino)
req               114 fs/ceph/debugfs.c 				seq_printf(s, " #%llx/%s", req->r_ino2.ino,
req               115 fs/ceph/debugfs.c 					   req->r_path2);
req               117 fs/ceph/debugfs.c 				seq_printf(s, " %s", req->r_path2);
req               358 fs/ceph/dir.c  		struct ceph_mds_request *req;
req               380 fs/ceph/dir.c  		req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
req               381 fs/ceph/dir.c  		if (IS_ERR(req))
req               382 fs/ceph/dir.c  			return PTR_ERR(req);
req               383 fs/ceph/dir.c  		err = ceph_alloc_readdir_reply_buffer(req, inode);
req               385 fs/ceph/dir.c  			ceph_mdsc_put_request(req);
req               389 fs/ceph/dir.c  		req->r_direct_mode = USE_AUTH_MDS;
req               391 fs/ceph/dir.c  			req->r_direct_hash = ceph_frag_value(frag);
req               392 fs/ceph/dir.c  			__set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
req               393 fs/ceph/dir.c  			req->r_inode_drop = CEPH_CAP_FILE_EXCL;
req               396 fs/ceph/dir.c  			req->r_path2 = kstrdup(dfi->last_name, GFP_KERNEL);
req               397 fs/ceph/dir.c  			if (!req->r_path2) {
req               398 fs/ceph/dir.c  				ceph_mdsc_put_request(req);
req               402 fs/ceph/dir.c  			req->r_args.readdir.offset_hash =
req               406 fs/ceph/dir.c  		req->r_dir_release_cnt = dfi->dir_release_count;
req               407 fs/ceph/dir.c  		req->r_dir_ordered_cnt = dfi->dir_ordered_count;
req               408 fs/ceph/dir.c  		req->r_readdir_cache_idx = dfi->readdir_cache_idx;
req               409 fs/ceph/dir.c  		req->r_readdir_offset = dfi->next_offset;
req               410 fs/ceph/dir.c  		req->r_args.readdir.frag = cpu_to_le32(frag);
req               411 fs/ceph/dir.c  		req->r_args.readdir.flags =
req               414 fs/ceph/dir.c  		req->r_inode = inode;
req               416 fs/ceph/dir.c  		req->r_dentry = dget(file->f_path.dentry);
req               417 fs/ceph/dir.c  		err = ceph_mdsc_do_request(mdsc, NULL, req);
req               419 fs/ceph/dir.c  			ceph_mdsc_put_request(req);
req               425 fs/ceph/dir.c  		     (int)req->r_reply_info.dir_end,
req               426 fs/ceph/dir.c  		     (int)req->r_reply_info.dir_complete,
req               427 fs/ceph/dir.c  		     (int)req->r_reply_info.hash_order);
req               429 fs/ceph/dir.c  		rinfo = &req->r_reply_info;
req               433 fs/ceph/dir.c  				dfi->next_offset = req->r_readdir_offset;
req               442 fs/ceph/dir.c  		dfi->last_readdir = req;
req               444 fs/ceph/dir.c  		if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) {
req               445 fs/ceph/dir.c  			dfi->readdir_cache_idx = req->r_readdir_cache_idx;
req               453 fs/ceph/dir.c  				dfi->dir_release_count = req->r_dir_release_cnt;
req               454 fs/ceph/dir.c  				dfi->dir_ordered_count = req->r_dir_ordered_cnt;
req               468 fs/ceph/dir.c  			unsigned next_offset = req->r_reply_info.dir_end ?
req               474 fs/ceph/dir.c  		} else if (req->r_reply_info.dir_end) {
req               665 fs/ceph/dir.c  int ceph_handle_snapdir(struct ceph_mds_request *req,
req               697 fs/ceph/dir.c  struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
req               703 fs/ceph/dir.c  		if (!req->r_reply_info.head->is_dentry) {
req               716 fs/ceph/dir.c  	else if (dentry != req->r_dentry)
req               717 fs/ceph/dir.c  		dentry = dget(req->r_dentry);   /* we got spliced */
req               738 fs/ceph/dir.c  	struct ceph_mds_request *req;
req               774 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
req               775 fs/ceph/dir.c  	if (IS_ERR(req))
req               776 fs/ceph/dir.c  		return ERR_CAST(req);
req               777 fs/ceph/dir.c  	req->r_dentry = dget(dentry);
req               778 fs/ceph/dir.c  	req->r_num_caps = 2;
req               783 fs/ceph/dir.c  	req->r_args.getattr.mask = cpu_to_le32(mask);
req               785 fs/ceph/dir.c  	req->r_parent = dir;
req               786 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req               787 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               788 fs/ceph/dir.c  	err = ceph_handle_snapdir(req, dentry, err);
req               789 fs/ceph/dir.c  	dentry = ceph_finish_lookup(req, dentry, err);
req               790 fs/ceph/dir.c  	ceph_mdsc_put_request(req);  /* will dput(dentry) */
req               827 fs/ceph/dir.c  	struct ceph_mds_request *req;
req               848 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
req               849 fs/ceph/dir.c  	if (IS_ERR(req)) {
req               850 fs/ceph/dir.c  		err = PTR_ERR(req);
req               853 fs/ceph/dir.c  	req->r_dentry = dget(dentry);
req               854 fs/ceph/dir.c  	req->r_num_caps = 2;
req               855 fs/ceph/dir.c  	req->r_parent = dir;
req               856 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req               857 fs/ceph/dir.c  	req->r_args.mknod.mode = cpu_to_le32(mode);
req               858 fs/ceph/dir.c  	req->r_args.mknod.rdev = cpu_to_le32(rdev);
req               859 fs/ceph/dir.c  	req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req               860 fs/ceph/dir.c  	req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req               862 fs/ceph/dir.c  		req->r_pagelist = as_ctx.pagelist;
req               865 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, dir, req);
req               866 fs/ceph/dir.c  	if (!err && !req->r_reply_info.head->is_dentry)
req               868 fs/ceph/dir.c  	ceph_mdsc_put_request(req);
req               889 fs/ceph/dir.c  	struct ceph_mds_request *req;
req               906 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
req               907 fs/ceph/dir.c  	if (IS_ERR(req)) {
req               908 fs/ceph/dir.c  		err = PTR_ERR(req);
req               911 fs/ceph/dir.c  	req->r_path2 = kstrdup(dest, GFP_KERNEL);
req               912 fs/ceph/dir.c  	if (!req->r_path2) {
req               914 fs/ceph/dir.c  		ceph_mdsc_put_request(req);
req               917 fs/ceph/dir.c  	req->r_parent = dir;
req               918 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req               919 fs/ceph/dir.c  	req->r_dentry = dget(dentry);
req               920 fs/ceph/dir.c  	req->r_num_caps = 2;
req               921 fs/ceph/dir.c  	req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req               922 fs/ceph/dir.c  	req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req               923 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, dir, req);
req               924 fs/ceph/dir.c  	if (!err && !req->r_reply_info.head->is_dentry)
req               926 fs/ceph/dir.c  	ceph_mdsc_put_request(req);
req               938 fs/ceph/dir.c  	struct ceph_mds_request *req;
req               969 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
req               970 fs/ceph/dir.c  	if (IS_ERR(req)) {
req               971 fs/ceph/dir.c  		err = PTR_ERR(req);
req               975 fs/ceph/dir.c  	req->r_dentry = dget(dentry);
req               976 fs/ceph/dir.c  	req->r_num_caps = 2;
req               977 fs/ceph/dir.c  	req->r_parent = dir;
req               978 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req               979 fs/ceph/dir.c  	req->r_args.mkdir.mode = cpu_to_le32(mode);
req               980 fs/ceph/dir.c  	req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req               981 fs/ceph/dir.c  	req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req               983 fs/ceph/dir.c  		req->r_pagelist = as_ctx.pagelist;
req               986 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, dir, req);
req               988 fs/ceph/dir.c  	    !req->r_reply_info.head->is_target &&
req               989 fs/ceph/dir.c  	    !req->r_reply_info.head->is_dentry)
req               991 fs/ceph/dir.c  	ceph_mdsc_put_request(req);
req              1006 fs/ceph/dir.c  	struct ceph_mds_request *req;
req              1014 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
req              1015 fs/ceph/dir.c  	if (IS_ERR(req)) {
req              1017 fs/ceph/dir.c  		return PTR_ERR(req);
req              1019 fs/ceph/dir.c  	req->r_dentry = dget(dentry);
req              1020 fs/ceph/dir.c  	req->r_num_caps = 2;
req              1021 fs/ceph/dir.c  	req->r_old_dentry = dget(old_dentry);
req              1022 fs/ceph/dir.c  	req->r_parent = dir;
req              1023 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req              1024 fs/ceph/dir.c  	req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req              1025 fs/ceph/dir.c  	req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req              1027 fs/ceph/dir.c  	req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
req              1028 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, dir, req);
req              1031 fs/ceph/dir.c  	} else if (!req->r_reply_info.head->is_dentry) {
req              1035 fs/ceph/dir.c  	ceph_mdsc_put_request(req);
req              1047 fs/ceph/dir.c  	struct ceph_mds_request *req;
req              1062 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
req              1063 fs/ceph/dir.c  	if (IS_ERR(req)) {
req              1064 fs/ceph/dir.c  		err = PTR_ERR(req);
req              1067 fs/ceph/dir.c  	req->r_dentry = dget(dentry);
req              1068 fs/ceph/dir.c  	req->r_num_caps = 2;
req              1069 fs/ceph/dir.c  	req->r_parent = dir;
req              1070 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req              1071 fs/ceph/dir.c  	req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req              1072 fs/ceph/dir.c  	req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req              1073 fs/ceph/dir.c  	req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
req              1074 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, dir, req);
req              1075 fs/ceph/dir.c  	if (!err && !req->r_reply_info.head->is_dentry)
req              1077 fs/ceph/dir.c  	ceph_mdsc_put_request(req);
req              1088 fs/ceph/dir.c  	struct ceph_mds_request *req;
req              1110 fs/ceph/dir.c  	req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
req              1111 fs/ceph/dir.c  	if (IS_ERR(req))
req              1112 fs/ceph/dir.c  		return PTR_ERR(req);
req              1114 fs/ceph/dir.c  	req->r_dentry = dget(new_dentry);
req              1115 fs/ceph/dir.c  	req->r_num_caps = 2;
req              1116 fs/ceph/dir.c  	req->r_old_dentry = dget(old_dentry);
req              1117 fs/ceph/dir.c  	req->r_old_dentry_dir = old_dir;
req              1118 fs/ceph/dir.c  	req->r_parent = new_dir;
req              1119 fs/ceph/dir.c  	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req              1120 fs/ceph/dir.c  	req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
req              1121 fs/ceph/dir.c  	req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
req              1122 fs/ceph/dir.c  	req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req              1123 fs/ceph/dir.c  	req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req              1125 fs/ceph/dir.c  	req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
req              1127 fs/ceph/dir.c  		req->r_inode_drop =
req              1130 fs/ceph/dir.c  	err = ceph_mdsc_do_request(mdsc, old_dir, req);
req              1131 fs/ceph/dir.c  	if (!err && !req->r_reply_info.head->is_dentry) {
req              1139 fs/ceph/dir.c  	ceph_mdsc_put_request(req);
req              1595 fs/ceph/dir.c  		struct ceph_mds_request *req;
req              1604 fs/ceph/dir.c  		req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
req              1605 fs/ceph/dir.c  		if (!IS_ERR(req)) {
req              1606 fs/ceph/dir.c  			req->r_dentry = dget(dentry);
req              1607 fs/ceph/dir.c  			req->r_num_caps = 2;
req              1608 fs/ceph/dir.c  			req->r_parent = dir;
req              1613 fs/ceph/dir.c  			req->r_args.getattr.mask = cpu_to_le32(mask);
req              1615 fs/ceph/dir.c  			err = ceph_mdsc_do_request(mdsc, NULL, req);
req              1619 fs/ceph/dir.c  				    d_inode(dentry) == req->r_target_inode)
req              1629 fs/ceph/dir.c  			ceph_mdsc_put_request(req);
req               134 fs/ceph/export.c 		struct ceph_mds_request *req;
req               137 fs/ceph/export.c 		req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
req               139 fs/ceph/export.c 		if (IS_ERR(req))
req               140 fs/ceph/export.c 			return ERR_CAST(req);
req               145 fs/ceph/export.c 		req->r_args.lookupino.mask = cpu_to_le32(mask);
req               147 fs/ceph/export.c 		req->r_ino1 = vino;
req               148 fs/ceph/export.c 		req->r_num_caps = 1;
req               149 fs/ceph/export.c 		err = ceph_mdsc_do_request(mdsc, NULL, req);
req               150 fs/ceph/export.c 		inode = req->r_target_inode;
req               153 fs/ceph/export.c 		ceph_mdsc_put_request(req);
req               189 fs/ceph/export.c 	struct ceph_mds_request *req;
req               212 fs/ceph/export.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
req               214 fs/ceph/export.c 	if (IS_ERR(req))
req               215 fs/ceph/export.c 		return ERR_CAST(req);
req               220 fs/ceph/export.c 	req->r_args.lookupino.mask = cpu_to_le32(mask);
req               222 fs/ceph/export.c 		req->r_args.lookupino.snapid = cpu_to_le64(vino.snap);
req               224 fs/ceph/export.c 			req->r_args.lookupino.parent =
req               226 fs/ceph/export.c 			req->r_args.lookupino.hash =
req               231 fs/ceph/export.c 	req->r_ino1 = vino;
req               232 fs/ceph/export.c 	req->r_num_caps = 1;
req               233 fs/ceph/export.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               234 fs/ceph/export.c 	inode = req->r_target_inode;
req               248 fs/ceph/export.c 	ceph_mdsc_put_request(req);
req               291 fs/ceph/export.c 	struct ceph_mds_request *req;
req               296 fs/ceph/export.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPPARENT,
req               298 fs/ceph/export.c 	if (IS_ERR(req))
req               299 fs/ceph/export.c 		return ERR_CAST(req);
req               302 fs/ceph/export.c 		req->r_inode = d_inode(child);
req               305 fs/ceph/export.c 		req->r_ino1 = (struct ceph_vino) {
req               314 fs/ceph/export.c 	req->r_args.getattr.mask = cpu_to_le32(mask);
req               316 fs/ceph/export.c 	req->r_num_caps = 1;
req               317 fs/ceph/export.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               319 fs/ceph/export.c 		ceph_mdsc_put_request(req);
req               323 fs/ceph/export.c 	inode = req->r_target_inode;
req               326 fs/ceph/export.c 	ceph_mdsc_put_request(req);
req               413 fs/ceph/export.c 	struct ceph_mds_request *req = NULL;
req               435 fs/ceph/export.c 		req = ceph_mdsc_create_request(fsc->mdsc, CEPH_MDS_OP_LSSNAP,
req               437 fs/ceph/export.c 		if (IS_ERR(req)) {
req               438 fs/ceph/export.c 			err = PTR_ERR(req);
req               439 fs/ceph/export.c 			req = NULL;
req               442 fs/ceph/export.c 		err = ceph_alloc_readdir_reply_buffer(req, inode);
req               446 fs/ceph/export.c 		req->r_direct_mode = USE_AUTH_MDS;
req               447 fs/ceph/export.c 		req->r_readdir_offset = next_offset;
req               448 fs/ceph/export.c 		req->r_args.readdir.flags =
req               451 fs/ceph/export.c 			req->r_path2 = last_name;
req               455 fs/ceph/export.c 		req->r_inode = dir;
req               457 fs/ceph/export.c 		req->r_dentry = dget(parent);
req               460 fs/ceph/export.c 		err = ceph_mdsc_do_request(fsc->mdsc, NULL, req);
req               466 fs/ceph/export.c 		rinfo = &req->r_reply_info;
req               491 fs/ceph/export.c 		ceph_mdsc_put_request(req);
req               492 fs/ceph/export.c 		req = NULL;
req               496 fs/ceph/export.c 	if (req)
req               497 fs/ceph/export.c 		ceph_mdsc_put_request(req);
req               508 fs/ceph/export.c 	struct ceph_mds_request *req;
req               516 fs/ceph/export.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
req               518 fs/ceph/export.c 	if (IS_ERR(req))
req               519 fs/ceph/export.c 		return PTR_ERR(req);
req               523 fs/ceph/export.c 	req->r_inode = inode;
req               525 fs/ceph/export.c 	req->r_ino2 = ceph_vino(d_inode(parent));
req               526 fs/ceph/export.c 	req->r_parent = d_inode(parent);
req               527 fs/ceph/export.c 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req               528 fs/ceph/export.c 	req->r_num_caps = 2;
req               529 fs/ceph/export.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               534 fs/ceph/export.c 		struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
req               544 fs/ceph/export.c 	ceph_mdsc_put_request(req);
req               185 fs/ceph/file.c 	struct ceph_mds_request *req;
req               192 fs/ceph/file.c 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
req               193 fs/ceph/file.c 	if (IS_ERR(req))
req               195 fs/ceph/file.c 	req->r_fmode = ceph_flags_to_mode(flags);
req               196 fs/ceph/file.c 	req->r_args.open.flags = ceph_flags_sys2wire(flags);
req               197 fs/ceph/file.c 	req->r_args.open.mode = cpu_to_le32(create_mode);
req               199 fs/ceph/file.c 	return req;
req               292 fs/ceph/file.c 	struct ceph_mds_request *req;
req               320 fs/ceph/file.c 	req = prepare_open_request(inode->i_sb, flags, 0);
req               321 fs/ceph/file.c 	if (IS_ERR(req)) {
req               322 fs/ceph/file.c 		err = PTR_ERR(req);
req               326 fs/ceph/file.c 	req->r_inode = inode;
req               328 fs/ceph/file.c 	req->r_num_caps = 1;
req               329 fs/ceph/file.c 	req->r_fmode = -1;
req               331 fs/ceph/file.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               332 fs/ceph/file.c 	ceph_mdsc_put_request(req);
req               349 fs/ceph/file.c 	struct ceph_mds_request *req;
req               415 fs/ceph/file.c 	req = prepare_open_request(inode->i_sb, flags, 0);
req               416 fs/ceph/file.c 	if (IS_ERR(req)) {
req               417 fs/ceph/file.c 		err = PTR_ERR(req);
req               420 fs/ceph/file.c 	req->r_inode = inode;
req               423 fs/ceph/file.c 	req->r_num_caps = 1;
req               424 fs/ceph/file.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               426 fs/ceph/file.c 		err = ceph_init_file(inode, file, req->r_fmode);
req               427 fs/ceph/file.c 	ceph_mdsc_put_request(req);
req               443 fs/ceph/file.c 	struct ceph_mds_request *req;
req               471 fs/ceph/file.c 	req = prepare_open_request(dir->i_sb, flags, mode);
req               472 fs/ceph/file.c 	if (IS_ERR(req)) {
req               473 fs/ceph/file.c 		err = PTR_ERR(req);
req               476 fs/ceph/file.c 	req->r_dentry = dget(dentry);
req               477 fs/ceph/file.c 	req->r_num_caps = 2;
req               479 fs/ceph/file.c 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req               480 fs/ceph/file.c 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req               482 fs/ceph/file.c 			req->r_pagelist = as_ctx.pagelist;
req               490 fs/ceph/file.c        req->r_args.open.mask = cpu_to_le32(mask);
req               492 fs/ceph/file.c 	req->r_parent = dir;
req               493 fs/ceph/file.c 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req               496 fs/ceph/file.c 				   req);
req               497 fs/ceph/file.c 	err = ceph_handle_snapdir(req, dentry, err);
req               501 fs/ceph/file.c 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
req               505 fs/ceph/file.c 		dn = ceph_finish_lookup(req, dentry, err);
req               520 fs/ceph/file.c 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
req               527 fs/ceph/file.c 	if (!req->r_err && req->r_target_inode)
req               528 fs/ceph/file.c 		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
req               529 fs/ceph/file.c 	ceph_mdsc_put_request(req);
req               612 fs/ceph/file.c 		struct ceph_osd_request *req;
req               619 fs/ceph/file.c 		req = ceph_osdc_new_request(osdc, &ci->i_layout,
req               624 fs/ceph/file.c 		if (IS_ERR(req)) {
req               625 fs/ceph/file.c 			ret = PTR_ERR(req);
req               635 fs/ceph/file.c 				ceph_osdc_put_request(req);
req               642 fs/ceph/file.c 				osd_req_op_extent_update(req, 0, len);
req               650 fs/ceph/file.c 				ceph_osdc_put_request(req);
req               656 fs/ceph/file.c 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
req               658 fs/ceph/file.c 		ret = ceph_osdc_start_request(osdc, req, false);
req               660 fs/ceph/file.c 			ret = ceph_osdc_wait_request(osdc, req);
req               661 fs/ceph/file.c 		ceph_osdc_put_request(req);
req               742 fs/ceph/file.c 	struct ceph_osd_request *req;
req               793 fs/ceph/file.c static void ceph_aio_complete_req(struct ceph_osd_request *req)
req               795 fs/ceph/file.c 	int rc = req->r_result;
req               796 fs/ceph/file.c 	struct inode *inode = req->r_inode;
req               797 fs/ceph/file.c 	struct ceph_aio_request *aio_req = req->r_priv;
req               798 fs/ceph/file.c 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
req               813 fs/ceph/file.c 			aio_work->req = req;
req               850 fs/ceph/file.c 	ceph_osdc_put_request(req);
req               863 fs/ceph/file.c 	struct ceph_osd_request *orig_req = aio_work->req;
req               868 fs/ceph/file.c 	struct ceph_osd_request *req;
req               884 fs/ceph/file.c 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
req               886 fs/ceph/file.c 	if (!req) {
req               888 fs/ceph/file.c 		req = orig_req;
req               892 fs/ceph/file.c 	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
req               893 fs/ceph/file.c 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
req               894 fs/ceph/file.c 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
req               896 fs/ceph/file.c 	req->r_ops[0] = orig_req->r_ops[0];
req               898 fs/ceph/file.c 	req->r_mtime = aio_req->mtime;
req               899 fs/ceph/file.c 	req->r_data_offset = req->r_ops[0].extent.offset;
req               901 fs/ceph/file.c 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
req               903 fs/ceph/file.c 		ceph_osdc_put_request(req);
req               904 fs/ceph/file.c 		req = orig_req;
req               910 fs/ceph/file.c 	req->r_callback = ceph_aio_complete_req;
req               911 fs/ceph/file.c 	req->r_inode = inode;
req               912 fs/ceph/file.c 	req->r_priv = aio_req;
req               914 fs/ceph/file.c 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
req               917 fs/ceph/file.c 		req->r_result = ret;
req               918 fs/ceph/file.c 		ceph_aio_complete_req(req);
req               935 fs/ceph/file.c 	struct ceph_osd_request *req;
req               976 fs/ceph/file.c 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
req               985 fs/ceph/file.c 		if (IS_ERR(req)) {
req               986 fs/ceph/file.c 			ret = PTR_ERR(req);
req               992 fs/ceph/file.c 			ceph_osdc_put_request(req);
req               997 fs/ceph/file.c 			osd_req_op_extent_update(req, 0, len);
req              1027 fs/ceph/file.c 			req->r_mtime = mtime;
req              1030 fs/ceph/file.c 		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
req              1037 fs/ceph/file.c 			req->r_callback = ceph_aio_complete_req;
req              1038 fs/ceph/file.c 			req->r_inode = inode;
req              1039 fs/ceph/file.c 			req->r_priv = aio_req;
req              1040 fs/ceph/file.c 			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
req              1046 fs/ceph/file.c 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
req              1048 fs/ceph/file.c 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
req              1069 fs/ceph/file.c 		ceph_osdc_put_request(req);
req              1099 fs/ceph/file.c 			req = list_first_entry(&osd_reqs,
req              1102 fs/ceph/file.c 			list_del_init(&req->r_private_item);
req              1104 fs/ceph/file.c 				ret = ceph_osdc_start_request(req->r_osdc,
req              1105 fs/ceph/file.c 							      req, false);
req              1107 fs/ceph/file.c 				req->r_result = ret;
req              1108 fs/ceph/file.c 				ceph_aio_complete_req(req);
req              1137 fs/ceph/file.c 	struct ceph_osd_request *req;
req              1172 fs/ceph/file.c 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
req              1178 fs/ceph/file.c 		if (IS_ERR(req)) {
req              1179 fs/ceph/file.c 			ret = PTR_ERR(req);
req              1211 fs/ceph/file.c 		req->r_inode = inode;
req              1213 fs/ceph/file.c 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
req              1216 fs/ceph/file.c 		req->r_mtime = mtime;
req              1217 fs/ceph/file.c 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
req              1219 fs/ceph/file.c 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
req              1222 fs/ceph/file.c 		ceph_osdc_put_request(req);
req              1703 fs/ceph/file.c 	struct ceph_osd_request *req;
req              1715 fs/ceph/file.c 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
req              1721 fs/ceph/file.c 	if (IS_ERR(req)) {
req              1722 fs/ceph/file.c 		ret = PTR_ERR(req);
req              1726 fs/ceph/file.c 	req->r_mtime = inode->i_mtime;
req              1727 fs/ceph/file.c 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
req              1729 fs/ceph/file.c 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
req              1733 fs/ceph/file.c 	ceph_osdc_put_request(req);
req              1215 fs/ceph/inode.c int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
req              1217 fs/ceph/inode.c 	struct ceph_mds_session *session = req->r_session;
req              1218 fs/ceph/inode.c 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
req              1224 fs/ceph/inode.c 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
req              1229 fs/ceph/inode.c 		if (rinfo->head->result == 0 && req->r_parent)
req              1230 fs/ceph/inode.c 			ceph_invalidate_dir_request(req);
req              1235 fs/ceph/inode.c 		struct inode *dir = req->r_parent;
req              1240 fs/ceph/inode.c 					 session, req->r_request_started, -1,
req              1241 fs/ceph/inode.c 					 &req->r_caps_reservation);
req              1248 fs/ceph/inode.c 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
req              1249 fs/ceph/inode.c 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
req              1250 fs/ceph/inode.c 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
req              1255 fs/ceph/inode.c 			BUG_ON(req->r_dentry);
req              1291 fs/ceph/inode.c 			req->r_dentry = dn;
req              1305 fs/ceph/inode.c 		req->r_target_inode = in;
req              1307 fs/ceph/inode.c 		err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
req              1308 fs/ceph/inode.c 				session, req->r_request_started,
req              1309 fs/ceph/inode.c 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
req              1310 fs/ceph/inode.c 				rinfo->head->result == 0) ?  req->r_fmode : -1,
req              1311 fs/ceph/inode.c 				&req->r_caps_reservation);
req              1324 fs/ceph/inode.c             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
req              1325 fs/ceph/inode.c 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
req              1326 fs/ceph/inode.c 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
req              1328 fs/ceph/inode.c 					       req->r_dentry->d_name.len))) {
req              1334 fs/ceph/inode.c 		struct inode *dir = req->r_parent;
req              1335 fs/ceph/inode.c 		struct dentry *dn = req->r_dentry;
req              1360 fs/ceph/inode.c 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
req              1361 fs/ceph/inode.c 			struct inode *olddir = req->r_old_dentry_dir;
req              1365 fs/ceph/inode.c 			     req->r_old_dentry,
req              1366 fs/ceph/inode.c 			     req->r_old_dentry,
req              1369 fs/ceph/inode.c 			     req->r_old_dentry, dn);
req              1375 fs/ceph/inode.c 			d_move(req->r_old_dentry, dn);
req              1377 fs/ceph/inode.c 			     req->r_old_dentry,
req              1378 fs/ceph/inode.c 			     req->r_old_dentry,
req              1385 fs/ceph/inode.c 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
req              1386 fs/ceph/inode.c 			     ceph_dentry(req->r_old_dentry)->offset);
req              1391 fs/ceph/inode.c 			req->r_dentry = req->r_old_dentry;
req              1392 fs/ceph/inode.c 			req->r_old_dentry = dn;
req              1393 fs/ceph/inode.c 			dn = req->r_dentry;
req              1408 fs/ceph/inode.c 						    req->r_request_started);
req              1417 fs/ceph/inode.c 			err = splice_dentry(&req->r_dentry, in);
req              1420 fs/ceph/inode.c 			dn = req->r_dentry;  /* may have spliced */
req              1432 fs/ceph/inode.c 					    req->r_request_started);
req              1435 fs/ceph/inode.c 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req              1436 fs/ceph/inode.c 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
req              1437 fs/ceph/inode.c 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
req              1438 fs/ceph/inode.c 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
req              1439 fs/ceph/inode.c 		struct inode *dir = req->r_parent;
req              1444 fs/ceph/inode.c 		BUG_ON(!req->r_dentry);
req              1445 fs/ceph/inode.c 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
req              1448 fs/ceph/inode.c 		err = splice_dentry(&req->r_dentry, in);
req              1451 fs/ceph/inode.c 	} else if (rinfo->head->is_dentry && req->r_dentry) {
req              1461 fs/ceph/inode.c 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
req              1462 fs/ceph/inode.c 					    session, req->r_request_started,
req              1474 fs/ceph/inode.c static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
req              1477 fs/ceph/inode.c 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
req              1489 fs/ceph/inode.c 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
req              1496 fs/ceph/inode.c 				req->r_request_started, -1,
req              1497 fs/ceph/inode.c 				&req->r_caps_reservation);
req              1520 fs/ceph/inode.c 			      struct ceph_mds_request *req)
req              1545 fs/ceph/inode.c 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
req              1546 fs/ceph/inode.c 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
req              1557 fs/ceph/inode.c int ceph_readdir_prepopulate(struct ceph_mds_request *req,
req              1560 fs/ceph/inode.c 	struct dentry *parent = req->r_dentry;
req              1562 fs/ceph/inode.c 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
req              1567 fs/ceph/inode.c 	struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
req              1573 fs/ceph/inode.c 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
req              1574 fs/ceph/inode.c 		return readdir_prepopulate_inodes_only(req, session);
req              1577 fs/ceph/inode.c 		if (req->r_path2) {
req              1579 fs/ceph/inode.c 						  req->r_path2,
req              1580 fs/ceph/inode.c 						  strlen(req->r_path2));
req              1584 fs/ceph/inode.c 			WARN_ON_ONCE(req->r_readdir_offset != 2);
req              1595 fs/ceph/inode.c 			req->r_readdir_offset = 2;
req              1608 fs/ceph/inode.c 		    req->r_readdir_offset == 2 &&
req              1612 fs/ceph/inode.c 			req->r_dir_release_cnt =
req              1614 fs/ceph/inode.c 			req->r_dir_ordered_cnt =
req              1616 fs/ceph/inode.c 			req->r_readdir_cache_idx = 0;
req              1620 fs/ceph/inode.c 	cache_ctl.index = req->r_readdir_cache_idx;
req              1621 fs/ceph/inode.c 	fpos_offset = req->r_readdir_offset;
req              1697 fs/ceph/inode.c 				 req->r_request_started, -1,
req              1698 fs/ceph/inode.c 				 &req->r_caps_reservation);
req              1728 fs/ceph/inode.c 				    rde->lease, req->r_session,
req              1729 fs/ceph/inode.c 				    req->r_request_started);
req              1733 fs/ceph/inode.c 						 &cache_ctl, req);
req              1742 fs/ceph/inode.c 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
req              1743 fs/ceph/inode.c 		req->r_readdir_cache_idx = cache_ctl.index;
req              1994 fs/ceph/inode.c 	struct ceph_mds_request *req;
req              2008 fs/ceph/inode.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
req              2010 fs/ceph/inode.c 	if (IS_ERR(req)) {
req              2012 fs/ceph/inode.c 		return PTR_ERR(req);
req              2040 fs/ceph/inode.c 			req->r_args.setattr.uid = cpu_to_le32(
req              2055 fs/ceph/inode.c 			req->r_args.setattr.gid = cpu_to_le32(
req              2070 fs/ceph/inode.c 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
req              2091 fs/ceph/inode.c 			ceph_encode_timespec64(&req->r_args.setattr.atime,
req              2110 fs/ceph/inode.c 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
req              2111 fs/ceph/inode.c 			req->r_args.setattr.old_size =
req              2133 fs/ceph/inode.c 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
req              2184 fs/ceph/inode.c 		req->r_inode = inode;
req              2186 fs/ceph/inode.c 		req->r_inode_drop = release;
req              2187 fs/ceph/inode.c 		req->r_args.setattr.mask = cpu_to_le32(mask);
req              2188 fs/ceph/inode.c 		req->r_num_caps = 1;
req              2189 fs/ceph/inode.c 		req->r_stamp = attr->ia_ctime;
req              2190 fs/ceph/inode.c 		err = ceph_mdsc_do_request(mdsc, NULL, req);
req              2195 fs/ceph/inode.c 	ceph_mdsc_put_request(req);
req              2245 fs/ceph/inode.c 	struct ceph_mds_request *req;
req              2260 fs/ceph/inode.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
req              2261 fs/ceph/inode.c 	if (IS_ERR(req))
req              2262 fs/ceph/inode.c 		return PTR_ERR(req);
req              2263 fs/ceph/inode.c 	req->r_inode = inode;
req              2265 fs/ceph/inode.c 	req->r_num_caps = 1;
req              2266 fs/ceph/inode.c 	req->r_args.getattr.mask = cpu_to_le32(mask);
req              2267 fs/ceph/inode.c 	req->r_locked_page = locked_page;
req              2268 fs/ceph/inode.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req              2270 fs/ceph/inode.c 		u64 inline_version = req->r_reply_info.targeti.inline_version;
req              2277 fs/ceph/inode.c 			err = req->r_reply_info.targeti.inline_len;
req              2280 fs/ceph/inode.c 	ceph_mdsc_put_request(req);
req                68 fs/ceph/ioctl.c 	struct ceph_mds_request *req;
req               107 fs/ceph/ioctl.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
req               109 fs/ceph/ioctl.c 	if (IS_ERR(req))
req               110 fs/ceph/ioctl.c 		return PTR_ERR(req);
req               111 fs/ceph/ioctl.c 	req->r_inode = inode;
req               113 fs/ceph/ioctl.c 	req->r_num_caps = 1;
req               115 fs/ceph/ioctl.c 	req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
req               117 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_stripe_unit =
req               119 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_stripe_count =
req               121 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_object_size =
req               123 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
req               125 fs/ceph/ioctl.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               126 fs/ceph/ioctl.c 	ceph_mdsc_put_request(req);
req               139 fs/ceph/ioctl.c 	struct ceph_mds_request *req;
req               152 fs/ceph/ioctl.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
req               155 fs/ceph/ioctl.c 	if (IS_ERR(req))
req               156 fs/ceph/ioctl.c 		return PTR_ERR(req);
req               157 fs/ceph/ioctl.c 	req->r_inode = inode;
req               159 fs/ceph/ioctl.c 	req->r_num_caps = 1;
req               161 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_stripe_unit =
req               163 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_stripe_count =
req               165 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_object_size =
req               167 fs/ceph/ioctl.c 	req->r_args.setlayout.layout.fl_pg_pool =
req               170 fs/ceph/ioctl.c 	err = ceph_mdsc_do_request(mdsc, inode, req);
req               171 fs/ceph/ioctl.c 	ceph_mdsc_put_request(req);
req                14 fs/ceph/locks.c                                          struct ceph_mds_request *req);
req                67 fs/ceph/locks.c 	struct ceph_mds_request *req;
req                86 fs/ceph/locks.c 	req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
req                87 fs/ceph/locks.c 	if (IS_ERR(req))
req                88 fs/ceph/locks.c 		return PTR_ERR(req);
req                89 fs/ceph/locks.c 	req->r_inode = inode;
req                91 fs/ceph/locks.c 	req->r_num_caps = 1;
req               106 fs/ceph/locks.c 	req->r_args.filelock_change.rule = lock_type;
req               107 fs/ceph/locks.c 	req->r_args.filelock_change.type = cmd;
req               108 fs/ceph/locks.c 	req->r_args.filelock_change.owner = cpu_to_le64(owner);
req               109 fs/ceph/locks.c 	req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
req               110 fs/ceph/locks.c 	req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
req               111 fs/ceph/locks.c 	req->r_args.filelock_change.length = cpu_to_le64(length);
req               112 fs/ceph/locks.c 	req->r_args.filelock_change.wait = wait;
req               115 fs/ceph/locks.c 		req->r_wait_for_completion = ceph_lock_wait_for_completion;
req               117 fs/ceph/locks.c 	err = ceph_mdsc_do_request(mdsc, inode, req);
req               119 fs/ceph/locks.c 		fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
req               120 fs/ceph/locks.c 		if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
req               122 fs/ceph/locks.c 		else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
req               127 fs/ceph/locks.c 		fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
req               128 fs/ceph/locks.c 		length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
req               129 fs/ceph/locks.c 						 le64_to_cpu(req->r_reply_info.filelock_reply->length);
req               136 fs/ceph/locks.c 	ceph_mdsc_put_request(req);
req               145 fs/ceph/locks.c                                          struct ceph_mds_request *req)
req               148 fs/ceph/locks.c 	struct inode *inode = req->r_inode;
req               151 fs/ceph/locks.c 	BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
req               152 fs/ceph/locks.c 	if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
req               154 fs/ceph/locks.c 	else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
req               158 fs/ceph/locks.c 	BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
req               160 fs/ceph/locks.c 	err = wait_for_completion_interruptible(&req->r_completion);
req               165 fs/ceph/locks.c 	     req->r_tid);
req               168 fs/ceph/locks.c 	if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
req               176 fs/ceph/locks.c 		mutex_lock(&req->r_fill_mutex);
req               177 fs/ceph/locks.c 		req->r_err = err;
req               178 fs/ceph/locks.c 		set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
req               179 fs/ceph/locks.c 		mutex_unlock(&req->r_fill_mutex);
req               181 fs/ceph/locks.c 		if (!req->r_session) {
req               199 fs/ceph/locks.c 	intr_req->r_args.filelock_change = req->r_args.filelock_change;
req               209 fs/ceph/locks.c 	wait_for_completion_killable(&req->r_safe_completion);
req               688 fs/ceph/mds_client.c static void put_request_session(struct ceph_mds_request *req)
req               690 fs/ceph/mds_client.c 	if (req->r_session) {
req               691 fs/ceph/mds_client.c 		ceph_put_mds_session(req->r_session);
req               692 fs/ceph/mds_client.c 		req->r_session = NULL;
req               698 fs/ceph/mds_client.c 	struct ceph_mds_request *req = container_of(kref,
req               701 fs/ceph/mds_client.c 	destroy_reply_info(&req->r_reply_info);
req               702 fs/ceph/mds_client.c 	if (req->r_request)
req               703 fs/ceph/mds_client.c 		ceph_msg_put(req->r_request);
req               704 fs/ceph/mds_client.c 	if (req->r_reply)
req               705 fs/ceph/mds_client.c 		ceph_msg_put(req->r_reply);
req               706 fs/ceph/mds_client.c 	if (req->r_inode) {
req               707 fs/ceph/mds_client.c 		ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
req               709 fs/ceph/mds_client.c 		ceph_async_iput(req->r_inode);
req               711 fs/ceph/mds_client.c 	if (req->r_parent) {
req               712 fs/ceph/mds_client.c 		ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
req               713 fs/ceph/mds_client.c 		ceph_async_iput(req->r_parent);
req               715 fs/ceph/mds_client.c 	ceph_async_iput(req->r_target_inode);
req               716 fs/ceph/mds_client.c 	if (req->r_dentry)
req               717 fs/ceph/mds_client.c 		dput(req->r_dentry);
req               718 fs/ceph/mds_client.c 	if (req->r_old_dentry)
req               719 fs/ceph/mds_client.c 		dput(req->r_old_dentry);
req               720 fs/ceph/mds_client.c 	if (req->r_old_dentry_dir) {
req               727 fs/ceph/mds_client.c 		ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
req               729 fs/ceph/mds_client.c 		ceph_async_iput(req->r_old_dentry_dir);
req               731 fs/ceph/mds_client.c 	kfree(req->r_path1);
req               732 fs/ceph/mds_client.c 	kfree(req->r_path2);
req               733 fs/ceph/mds_client.c 	if (req->r_pagelist)
req               734 fs/ceph/mds_client.c 		ceph_pagelist_release(req->r_pagelist);
req               735 fs/ceph/mds_client.c 	put_request_session(req);
req               736 fs/ceph/mds_client.c 	ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
req               737 fs/ceph/mds_client.c 	WARN_ON_ONCE(!list_empty(&req->r_wait));
req               738 fs/ceph/mds_client.c 	kfree(req);
req               751 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req               753 fs/ceph/mds_client.c 	req = lookup_request(&mdsc->request_tree, tid);
req               754 fs/ceph/mds_client.c 	if (req)
req               755 fs/ceph/mds_client.c 		ceph_mdsc_get_request(req);
req               757 fs/ceph/mds_client.c 	return req;
req               767 fs/ceph/mds_client.c 			       struct ceph_mds_request *req,
req               772 fs/ceph/mds_client.c 	req->r_tid = ++mdsc->last_tid;
req               773 fs/ceph/mds_client.c 	if (req->r_num_caps) {
req               774 fs/ceph/mds_client.c 		ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
req               775 fs/ceph/mds_client.c 					req->r_num_caps);
req               778 fs/ceph/mds_client.c 			       "failed to reserve caps: %d\n", req, ret);
req               780 fs/ceph/mds_client.c 			req->r_err = ret;
req               784 fs/ceph/mds_client.c 	dout("__register_request %p tid %lld\n", req, req->r_tid);
req               785 fs/ceph/mds_client.c 	ceph_mdsc_get_request(req);
req               786 fs/ceph/mds_client.c 	insert_request(&mdsc->request_tree, req);
req               788 fs/ceph/mds_client.c 	req->r_uid = current_fsuid();
req               789 fs/ceph/mds_client.c 	req->r_gid = current_fsgid();
req               791 fs/ceph/mds_client.c 	if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
req               792 fs/ceph/mds_client.c 		mdsc->oldest_tid = req->r_tid;
req               796 fs/ceph/mds_client.c 		req->r_unsafe_dir = dir;
req               801 fs/ceph/mds_client.c 				 struct ceph_mds_request *req)
req               803 fs/ceph/mds_client.c 	dout("__unregister_request %p tid %lld\n", req, req->r_tid);
req               806 fs/ceph/mds_client.c 	list_del_init(&req->r_unsafe_item);
req               808 fs/ceph/mds_client.c 	if (req->r_tid == mdsc->oldest_tid) {
req               809 fs/ceph/mds_client.c 		struct rb_node *p = rb_next(&req->r_node);
req               822 fs/ceph/mds_client.c 	erase_request(&mdsc->request_tree, req);
req               824 fs/ceph/mds_client.c 	if (req->r_unsafe_dir  &&
req               825 fs/ceph/mds_client.c 	    test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
req               826 fs/ceph/mds_client.c 		struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
req               828 fs/ceph/mds_client.c 		list_del_init(&req->r_unsafe_dir_item);
req               831 fs/ceph/mds_client.c 	if (req->r_target_inode &&
req               832 fs/ceph/mds_client.c 	    test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
req               833 fs/ceph/mds_client.c 		struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
req               835 fs/ceph/mds_client.c 		list_del_init(&req->r_unsafe_target_item);
req               839 fs/ceph/mds_client.c 	if (req->r_unsafe_dir) {
req               841 fs/ceph/mds_client.c 		ceph_async_iput(req->r_unsafe_dir);
req               842 fs/ceph/mds_client.c 		req->r_unsafe_dir = NULL;
req               845 fs/ceph/mds_client.c 	complete_all(&req->r_safe_completion);
req               847 fs/ceph/mds_client.c 	ceph_mdsc_put_request(req);
req               881 fs/ceph/mds_client.c 			struct ceph_mds_request *req)
req               886 fs/ceph/mds_client.c 	int mode = req->r_direct_mode;
req               888 fs/ceph/mds_client.c 	u32 hash = req->r_direct_hash;
req               889 fs/ceph/mds_client.c 	bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
req               895 fs/ceph/mds_client.c 	if (req->r_resend_mds >= 0 &&
req               896 fs/ceph/mds_client.c 	    (__have_session(mdsc, req->r_resend_mds) ||
req               897 fs/ceph/mds_client.c 	     ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
req               899 fs/ceph/mds_client.c 		     req->r_resend_mds);
req               900 fs/ceph/mds_client.c 		return req->r_resend_mds;
req               907 fs/ceph/mds_client.c 	if (req->r_inode) {
req               908 fs/ceph/mds_client.c 		if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
req               909 fs/ceph/mds_client.c 			inode = req->r_inode;
req               914 fs/ceph/mds_client.c 			inode = get_nonsnap_parent(req->r_dentry);
req               918 fs/ceph/mds_client.c 	} else if (req->r_dentry) {
req               924 fs/ceph/mds_client.c 		parent = READ_ONCE(req->r_dentry->d_parent);
req               925 fs/ceph/mds_client.c 		dir = req->r_parent ? : d_inode_rcu(parent);
req               929 fs/ceph/mds_client.c 			inode = d_inode(req->r_dentry);
req               939 fs/ceph/mds_client.c 			inode = d_inode(req->r_dentry);
req               943 fs/ceph/mds_client.c 				hash = ceph_dentry_hash(dir, req->r_dentry);
req              1273 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req              1280 fs/ceph/mds_client.c 		req = list_first_entry(&session->s_unsafe,
req              1283 fs/ceph/mds_client.c 				    req->r_tid);
req              1284 fs/ceph/mds_client.c 		if (req->r_target_inode) {
req              1286 fs/ceph/mds_client.c 			ci = ceph_inode(req->r_target_inode);
req              1289 fs/ceph/mds_client.c 		if (req->r_unsafe_dir) {
req              1291 fs/ceph/mds_client.c 			ci = ceph_inode(req->r_unsafe_dir);
req              1294 fs/ceph/mds_client.c 		__unregister_request(mdsc, req);
req              1299 fs/ceph/mds_client.c 		req = rb_entry(p, struct ceph_mds_request, r_node);
req              1301 fs/ceph/mds_client.c 		if (req->r_session &&
req              1302 fs/ceph/mds_client.c 		    req->r_session->s_mds == session->s_mds)
req              1303 fs/ceph/mds_client.c 			req->r_attempts = 0;
req              2030 fs/ceph/mds_client.c int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
req              2034 fs/ceph/mds_client.c 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
req              2035 fs/ceph/mds_client.c 	struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
req              2061 fs/ceph/mds_client.c 	req->r_num_caps = num_entries + 1;
req              2062 fs/ceph/mds_client.c 	req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
req              2063 fs/ceph/mds_client.c 	req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
req              2073 fs/ceph/mds_client.c 	struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
req              2076 fs/ceph/mds_client.c 	if (!req)
req              2079 fs/ceph/mds_client.c 	mutex_init(&req->r_fill_mutex);
req              2080 fs/ceph/mds_client.c 	req->r_mdsc = mdsc;
req              2081 fs/ceph/mds_client.c 	req->r_started = jiffies;
req              2082 fs/ceph/mds_client.c 	req->r_resend_mds = -1;
req              2083 fs/ceph/mds_client.c 	INIT_LIST_HEAD(&req->r_unsafe_dir_item);
req              2084 fs/ceph/mds_client.c 	INIT_LIST_HEAD(&req->r_unsafe_target_item);
req              2085 fs/ceph/mds_client.c 	req->r_fmode = -1;
req              2086 fs/ceph/mds_client.c 	kref_init(&req->r_kref);
req              2087 fs/ceph/mds_client.c 	RB_CLEAR_NODE(&req->r_node);
req              2088 fs/ceph/mds_client.c 	INIT_LIST_HEAD(&req->r_wait);
req              2089 fs/ceph/mds_client.c 	init_completion(&req->r_completion);
req              2090 fs/ceph/mds_client.c 	init_completion(&req->r_safe_completion);
req              2091 fs/ceph/mds_client.c 	INIT_LIST_HEAD(&req->r_unsafe_item);
req              2094 fs/ceph/mds_client.c 	req->r_stamp = timespec64_trunc(ts, mdsc->fsc->sb->s_time_gran);
req              2096 fs/ceph/mds_client.c 	req->r_op = op;
req              2097 fs/ceph/mds_client.c 	req->r_direct_mode = mode;
req              2098 fs/ceph/mds_client.c 	return req;
req              2285 fs/ceph/mds_client.c 					       struct ceph_mds_request *req,
req              2300 fs/ceph/mds_client.c 	ret = set_request_path_attr(req->r_inode, req->r_dentry,
req              2301 fs/ceph/mds_client.c 			      req->r_parent, req->r_path1, req->r_ino1.ino,
req              2304 fs/ceph/mds_client.c 					&req->r_req_flags));
req              2311 fs/ceph/mds_client.c 	ret = set_request_path_attr(NULL, req->r_old_dentry,
req              2312 fs/ceph/mds_client.c 			      req->r_old_dentry_dir,
req              2313 fs/ceph/mds_client.c 			      req->r_path2, req->r_ino2.ino,
req              2326 fs/ceph/mds_client.c 		(!!req->r_inode_drop + !!req->r_dentry_drop +
req              2327 fs/ceph/mds_client.c 		 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
req              2328 fs/ceph/mds_client.c 	if (req->r_dentry_drop)
req              2330 fs/ceph/mds_client.c 	if (req->r_old_dentry_drop)
req              2340 fs/ceph/mds_client.c 	msg->hdr.tid = cpu_to_le64(req->r_tid);
req              2347 fs/ceph/mds_client.c 	head->op = cpu_to_le32(req->r_op);
req              2348 fs/ceph/mds_client.c 	head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
req              2349 fs/ceph/mds_client.c 	head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
req              2350 fs/ceph/mds_client.c 	head->args = req->r_args;
req              2356 fs/ceph/mds_client.c 	req->r_request_release_offset = p - msg->front.iov_base;
req              2360 fs/ceph/mds_client.c 	if (req->r_inode_drop)
req              2362 fs/ceph/mds_client.c 		      req->r_inode ? req->r_inode : d_inode(req->r_dentry),
req              2363 fs/ceph/mds_client.c 		      mds, req->r_inode_drop, req->r_inode_unless, 0);
req              2364 fs/ceph/mds_client.c 	if (req->r_dentry_drop)
req              2365 fs/ceph/mds_client.c 		releases += ceph_encode_dentry_release(&p, req->r_dentry,
req              2366 fs/ceph/mds_client.c 				req->r_parent, mds, req->r_dentry_drop,
req              2367 fs/ceph/mds_client.c 				req->r_dentry_unless);
req              2368 fs/ceph/mds_client.c 	if (req->r_old_dentry_drop)
req              2369 fs/ceph/mds_client.c 		releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
req              2370 fs/ceph/mds_client.c 				req->r_old_dentry_dir, mds,
req              2371 fs/ceph/mds_client.c 				req->r_old_dentry_drop,
req              2372 fs/ceph/mds_client.c 				req->r_old_dentry_unless);
req              2373 fs/ceph/mds_client.c 	if (req->r_old_inode_drop)
req              2375 fs/ceph/mds_client.c 		      d_inode(req->r_old_dentry),
req              2376 fs/ceph/mds_client.c 		      mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
req              2380 fs/ceph/mds_client.c 		p = msg->front.iov_base + req->r_request_release_offset;
req              2388 fs/ceph/mds_client.c 		ceph_encode_timespec64(&ts, &req->r_stamp);
req              2396 fs/ceph/mds_client.c 	if (req->r_pagelist) {
req              2397 fs/ceph/mds_client.c 		struct ceph_pagelist *pagelist = req->r_pagelist;
req              2421 fs/ceph/mds_client.c 			     struct ceph_mds_request *req)
req              2423 fs/ceph/mds_client.c 	if (req->r_callback)
req              2424 fs/ceph/mds_client.c 		req->r_callback(mdsc, req);
req              2425 fs/ceph/mds_client.c 	complete_all(&req->r_completion);
req              2432 fs/ceph/mds_client.c 				  struct ceph_mds_request *req,
req              2439 fs/ceph/mds_client.c 	req->r_attempts++;
req              2440 fs/ceph/mds_client.c 	if (req->r_inode) {
req              2442 fs/ceph/mds_client.c 			ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
req              2445 fs/ceph/mds_client.c 			req->r_sent_on_mseq = cap->mseq;
req              2447 fs/ceph/mds_client.c 			req->r_sent_on_mseq = -1;
req              2449 fs/ceph/mds_client.c 	dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
req              2450 fs/ceph/mds_client.c 	     req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
req              2452 fs/ceph/mds_client.c 	if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
req              2460 fs/ceph/mds_client.c 		msg = req->r_request;
req              2467 fs/ceph/mds_client.c 		if (req->r_target_inode)
req              2468 fs/ceph/mds_client.c 			rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
req              2470 fs/ceph/mds_client.c 		rhead->num_retry = req->r_attempts - 1;
req              2476 fs/ceph/mds_client.c 		p = msg->front.iov_base + req->r_request_release_offset;
req              2479 fs/ceph/mds_client.c 			ceph_encode_timespec64(&ts, &req->r_stamp);
req              2488 fs/ceph/mds_client.c 	if (req->r_request) {
req              2489 fs/ceph/mds_client.c 		ceph_msg_put(req->r_request);
req              2490 fs/ceph/mds_client.c 		req->r_request = NULL;
req              2492 fs/ceph/mds_client.c 	msg = create_request_message(mdsc, req, mds, drop_cap_releases);
req              2494 fs/ceph/mds_client.c 		req->r_err = PTR_ERR(msg);
req              2497 fs/ceph/mds_client.c 	req->r_request = msg;
req              2501 fs/ceph/mds_client.c 	if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
req              2503 fs/ceph/mds_client.c 	if (req->r_parent)
req              2506 fs/ceph/mds_client.c 	rhead->num_fwd = req->r_num_fwd;
req              2507 fs/ceph/mds_client.c 	rhead->num_retry = req->r_attempts - 1;
req              2510 fs/ceph/mds_client.c 	dout(" r_parent = %p\n", req->r_parent);
req              2518 fs/ceph/mds_client.c 			struct ceph_mds_request *req)
req              2524 fs/ceph/mds_client.c 	if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
req              2525 fs/ceph/mds_client.c 		if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
req              2526 fs/ceph/mds_client.c 			__unregister_request(mdsc, req);
req              2530 fs/ceph/mds_client.c 	if (req->r_timeout &&
req              2531 fs/ceph/mds_client.c 	    time_after_eq(jiffies, req->r_started + req->r_timeout)) {
req              2549 fs/ceph/mds_client.c 			list_add(&req->r_wait, &mdsc->waiting_for_map);
req              2560 fs/ceph/mds_client.c 	put_request_session(req);
req              2562 fs/ceph/mds_client.c 	mds = __choose_mds(mdsc, req);
req              2566 fs/ceph/mds_client.c 		list_add(&req->r_wait, &mdsc->waiting_for_map);
req              2579 fs/ceph/mds_client.c 	req->r_session = get_session(session);
req              2592 fs/ceph/mds_client.c 		list_add(&req->r_wait, &session->s_waiting);
req              2597 fs/ceph/mds_client.c 	req->r_resend_mds = -1;   /* forget any previous mds hint */
req              2599 fs/ceph/mds_client.c 	if (req->r_request_started == 0)   /* note request start time */
req              2600 fs/ceph/mds_client.c 		req->r_request_started = jiffies;
req              2602 fs/ceph/mds_client.c 	err = __prepare_send_request(mdsc, req, mds, false);
req              2604 fs/ceph/mds_client.c 		ceph_msg_get(req->r_request);
req              2605 fs/ceph/mds_client.c 		ceph_con_send(&session->s_con, req->r_request);
req              2613 fs/ceph/mds_client.c 		req->r_err = err;
req              2614 fs/ceph/mds_client.c 		complete_request(mdsc, req);
req              2615 fs/ceph/mds_client.c 		__unregister_request(mdsc, req);
req              2626 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req              2632 fs/ceph/mds_client.c 		req = list_entry(tmp_list.next,
req              2634 fs/ceph/mds_client.c 		list_del_init(&req->r_wait);
req              2635 fs/ceph/mds_client.c 		dout(" wake request %p tid %llu\n", req, req->r_tid);
req              2636 fs/ceph/mds_client.c 		__do_request(mdsc, req);
req              2646 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req              2651 fs/ceph/mds_client.c 		req = rb_entry(p, struct ceph_mds_request, r_node);
req              2653 fs/ceph/mds_client.c 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
req              2655 fs/ceph/mds_client.c 		if (req->r_attempts > 0)
req              2657 fs/ceph/mds_client.c 		if (req->r_session &&
req              2658 fs/ceph/mds_client.c 		    req->r_session->s_mds == mds) {
req              2659 fs/ceph/mds_client.c 			dout(" kicking tid %llu\n", req->r_tid);
req              2660 fs/ceph/mds_client.c 			list_del_init(&req->r_wait);
req              2661 fs/ceph/mds_client.c 			__do_request(mdsc, req);
req              2667 fs/ceph/mds_client.c 			      struct ceph_mds_request *req)
req              2672 fs/ceph/mds_client.c 	if (req->r_inode)
req              2673 fs/ceph/mds_client.c 		ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
req              2674 fs/ceph/mds_client.c 	if (req->r_parent) {
req              2675 fs/ceph/mds_client.c 		ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
req              2676 fs/ceph/mds_client.c 		ihold(req->r_parent);
req              2678 fs/ceph/mds_client.c 	if (req->r_old_dentry_dir)
req              2679 fs/ceph/mds_client.c 		ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
req              2682 fs/ceph/mds_client.c 	dout("submit_request on %p for inode %p\n", req, dir);
req              2684 fs/ceph/mds_client.c 	__register_request(mdsc, req, dir);
req              2685 fs/ceph/mds_client.c 	__do_request(mdsc, req);
req              2686 fs/ceph/mds_client.c 	err = req->r_err;
req              2692 fs/ceph/mds_client.c 				  struct ceph_mds_request *req)
req              2698 fs/ceph/mds_client.c 	if (!req->r_timeout && req->r_wait_for_completion) {
req              2699 fs/ceph/mds_client.c 		err = req->r_wait_for_completion(mdsc, req);
req              2702 fs/ceph/mds_client.c 					&req->r_completion,
req              2703 fs/ceph/mds_client.c 					ceph_timeout_jiffies(req->r_timeout));
req              2715 fs/ceph/mds_client.c 	if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
req              2716 fs/ceph/mds_client.c 		err = le32_to_cpu(req->r_reply_info.head->result);
req              2718 fs/ceph/mds_client.c 		dout("aborted request %lld with %d\n", req->r_tid, err);
req              2725 fs/ceph/mds_client.c 		mutex_lock(&req->r_fill_mutex);
req              2726 fs/ceph/mds_client.c 		req->r_err = err;
req              2727 fs/ceph/mds_client.c 		set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
req              2728 fs/ceph/mds_client.c 		mutex_unlock(&req->r_fill_mutex);
req              2730 fs/ceph/mds_client.c 		if (req->r_parent &&
req              2731 fs/ceph/mds_client.c 		    (req->r_op & CEPH_MDS_OP_WRITE))
req              2732 fs/ceph/mds_client.c 			ceph_invalidate_dir_request(req);
req              2734 fs/ceph/mds_client.c 		err = req->r_err;
req              2747 fs/ceph/mds_client.c 			 struct ceph_mds_request *req)
req              2751 fs/ceph/mds_client.c 	dout("do_request on %p\n", req);
req              2754 fs/ceph/mds_client.c 	err = ceph_mdsc_submit_request(mdsc, dir, req);
req              2756 fs/ceph/mds_client.c 		err = ceph_mdsc_wait_request(mdsc, req);
req              2757 fs/ceph/mds_client.c 	dout("do_request %p done, result %d\n", req, err);
req              2765 fs/ceph/mds_client.c void ceph_invalidate_dir_request(struct ceph_mds_request *req)
req              2767 fs/ceph/mds_client.c 	struct inode *dir = req->r_parent;
req              2768 fs/ceph/mds_client.c 	struct inode *old_dir = req->r_old_dentry_dir;
req              2775 fs/ceph/mds_client.c 	if (req->r_dentry)
req              2776 fs/ceph/mds_client.c 		ceph_invalidate_dentry_lease(req->r_dentry);
req              2777 fs/ceph/mds_client.c 	if (req->r_old_dentry)
req              2778 fs/ceph/mds_client.c 		ceph_invalidate_dentry_lease(req->r_old_dentry);
req              2791 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req              2808 fs/ceph/mds_client.c 	req = lookup_get_request(mdsc, tid);
req              2809 fs/ceph/mds_client.c 	if (!req) {
req              2814 fs/ceph/mds_client.c 	dout("handle_reply %p\n", req);
req              2817 fs/ceph/mds_client.c 	if (req->r_session != session) {
req              2820 fs/ceph/mds_client.c 		       req->r_session ? req->r_session->s_mds : -1);
req              2826 fs/ceph/mds_client.c 	if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
req              2827 fs/ceph/mds_client.c 	    (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
req              2833 fs/ceph/mds_client.c 	if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
req              2850 fs/ceph/mds_client.c 		dout("got ESTALE on request %llu\n", req->r_tid);
req              2851 fs/ceph/mds_client.c 		req->r_resend_mds = -1;
req              2852 fs/ceph/mds_client.c 		if (req->r_direct_mode != USE_AUTH_MDS) {
req              2854 fs/ceph/mds_client.c 			req->r_direct_mode = USE_AUTH_MDS;
req              2855 fs/ceph/mds_client.c 			__do_request(mdsc, req);
req              2859 fs/ceph/mds_client.c 			int mds = __choose_mds(mdsc, req);
req              2860 fs/ceph/mds_client.c 			if (mds >= 0 && mds != req->r_session->s_mds) {
req              2862 fs/ceph/mds_client.c 				__do_request(mdsc, req);
req              2867 fs/ceph/mds_client.c 		dout("have to return ESTALE on request %llu\n", req->r_tid);
req              2872 fs/ceph/mds_client.c 		set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
req              2873 fs/ceph/mds_client.c 		__unregister_request(mdsc, req);
req              2875 fs/ceph/mds_client.c 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
req              2892 fs/ceph/mds_client.c 		set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
req              2893 fs/ceph/mds_client.c 		list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
req              2894 fs/ceph/mds_client.c 		if (req->r_unsafe_dir) {
req              2896 fs/ceph/mds_client.c 					ceph_inode(req->r_unsafe_dir);
req              2898 fs/ceph/mds_client.c 			list_add_tail(&req->r_unsafe_dir_item,
req              2905 fs/ceph/mds_client.c 	rinfo = &req->r_reply_info;
req              2933 fs/ceph/mds_client.c 	mutex_lock(&req->r_fill_mutex);
req              2934 fs/ceph/mds_client.c 	current->journal_info = req;
req              2935 fs/ceph/mds_client.c 	err = ceph_fill_trace(mdsc->fsc->sb, req);
req              2937 fs/ceph/mds_client.c 		if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req              2938 fs/ceph/mds_client.c 				    req->r_op == CEPH_MDS_OP_LSSNAP))
req              2939 fs/ceph/mds_client.c 			ceph_readdir_prepopulate(req, req->r_session);
req              2942 fs/ceph/mds_client.c 	mutex_unlock(&req->r_fill_mutex);
req              2949 fs/ceph/mds_client.c 		if (req->r_target_inode &&
req              2950 fs/ceph/mds_client.c 		    test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
req              2952 fs/ceph/mds_client.c 				ceph_inode(req->r_target_inode);
req              2954 fs/ceph/mds_client.c 			list_add_tail(&req->r_unsafe_target_item,
req              2959 fs/ceph/mds_client.c 		ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
req              2963 fs/ceph/mds_client.c 	if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
req              2965 fs/ceph/mds_client.c 			req->r_err = err;
req              2967 fs/ceph/mds_client.c 			req->r_reply =  ceph_msg_get(msg);
req              2968 fs/ceph/mds_client.c 			set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
req              2978 fs/ceph/mds_client.c 	complete_request(mdsc, req);
req              2980 fs/ceph/mds_client.c 	ceph_mdsc_put_request(req);
req              2993 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req              3006 fs/ceph/mds_client.c 	req = lookup_get_request(mdsc, tid);
req              3007 fs/ceph/mds_client.c 	if (!req) {
req              3012 fs/ceph/mds_client.c 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
req              3014 fs/ceph/mds_client.c 		__unregister_request(mdsc, req);
req              3015 fs/ceph/mds_client.c 	} else if (fwd_seq <= req->r_num_fwd) {
req              3017 fs/ceph/mds_client.c 		     tid, next_mds, req->r_num_fwd, fwd_seq);
req              3021 fs/ceph/mds_client.c 		BUG_ON(req->r_err);
req              3022 fs/ceph/mds_client.c 		BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
req              3023 fs/ceph/mds_client.c 		req->r_attempts = 0;
req              3024 fs/ceph/mds_client.c 		req->r_num_fwd = fwd_seq;
req              3025 fs/ceph/mds_client.c 		req->r_resend_mds = next_mds;
req              3026 fs/ceph/mds_client.c 		put_request_session(req);
req              3027 fs/ceph/mds_client.c 		__do_request(mdsc, req);
req              3029 fs/ceph/mds_client.c 	ceph_mdsc_put_request(req);
req              3212 fs/ceph/mds_client.c 	struct ceph_mds_request *req, *nreq;
req              3219 fs/ceph/mds_client.c 	list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
req              3220 fs/ceph/mds_client.c 		err = __prepare_send_request(mdsc, req, session->s_mds, true);
req              3222 fs/ceph/mds_client.c 			ceph_msg_get(req->r_request);
req              3223 fs/ceph/mds_client.c 			ceph_con_send(&session->s_con, req->r_request);
req              3233 fs/ceph/mds_client.c 		req = rb_entry(p, struct ceph_mds_request, r_node);
req              3235 fs/ceph/mds_client.c 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
req              3237 fs/ceph/mds_client.c 		if (req->r_attempts == 0)
req              3239 fs/ceph/mds_client.c 		if (req->r_session &&
req              3240 fs/ceph/mds_client.c 		    req->r_session->s_mds == session->s_mds) {
req              3241 fs/ceph/mds_client.c 			err = __prepare_send_request(mdsc, req,
req              3244 fs/ceph/mds_client.c 				ceph_msg_get(req->r_request);
req              3245 fs/ceph/mds_client.c 				ceph_con_send(&session->s_con, req->r_request);
req              4206 fs/ceph/mds_client.c 	struct ceph_mds_request *req;
req              4218 fs/ceph/mds_client.c 		while ((req = __get_oldest_req(mdsc))) {
req              4220 fs/ceph/mds_client.c 			     req->r_tid);
req              4221 fs/ceph/mds_client.c 			list_del_init(&req->r_wait);
req              4222 fs/ceph/mds_client.c 			__unregister_request(mdsc, req);
req              4256 fs/ceph/mds_client.c 	struct ceph_mds_request *req = NULL, *nextreq;
req              4262 fs/ceph/mds_client.c 	req = __get_oldest_req(mdsc);
req              4263 fs/ceph/mds_client.c 	while (req && req->r_tid <= want_tid) {
req              4265 fs/ceph/mds_client.c 		n = rb_next(&req->r_node);
req              4270 fs/ceph/mds_client.c 		if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
req              4271 fs/ceph/mds_client.c 		    (req->r_op & CEPH_MDS_OP_WRITE)) {
req              4273 fs/ceph/mds_client.c 			ceph_mdsc_get_request(req);
req              4278 fs/ceph/mds_client.c 			     req->r_tid, want_tid);
req              4279 fs/ceph/mds_client.c 			wait_for_completion(&req->r_safe_completion);
req              4281 fs/ceph/mds_client.c 			ceph_mdsc_put_request(req);
req              4291 fs/ceph/mds_client.c 		req = nextreq;
req               212 fs/ceph/mds_client.h 					     struct ceph_mds_request *req);
req               217 fs/ceph/mds_client.h 						 struct ceph_mds_request *req);
req               466 fs/ceph/mds_client.h extern void ceph_invalidate_dir_request(struct ceph_mds_request *req);
req               467 fs/ceph/mds_client.h extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
req               473 fs/ceph/mds_client.h 				    struct ceph_mds_request *req);
req               476 fs/ceph/mds_client.h 				struct ceph_mds_request *req);
req               477 fs/ceph/mds_client.h static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
req               479 fs/ceph/mds_client.h 	kref_get(&req->r_kref);
req               482 fs/ceph/mds_client.h static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
req               484 fs/ceph/mds_client.h 	kref_put(&req->r_kref, ceph_mdsc_release_request);
req               917 fs/ceph/super.c 	struct ceph_mds_request *req = NULL;
req               923 fs/ceph/super.c 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
req               924 fs/ceph/super.c 	if (IS_ERR(req))
req               925 fs/ceph/super.c 		return ERR_CAST(req);
req               926 fs/ceph/super.c 	req->r_path1 = kstrdup(path, GFP_NOFS);
req               927 fs/ceph/super.c 	if (!req->r_path1) {
req               932 fs/ceph/super.c 	req->r_ino1.ino = CEPH_INO_ROOT;
req               933 fs/ceph/super.c 	req->r_ino1.snap = CEPH_NOSNAP;
req               934 fs/ceph/super.c 	req->r_started = started;
req               935 fs/ceph/super.c 	req->r_timeout = fsc->client->options->mount_timeout;
req               936 fs/ceph/super.c 	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
req               937 fs/ceph/super.c 	req->r_num_caps = 2;
req               938 fs/ceph/super.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req               940 fs/ceph/super.c 		struct inode *inode = req->r_target_inode;
req               941 fs/ceph/super.c 		req->r_target_inode = NULL;
req               953 fs/ceph/super.c 	ceph_mdsc_put_request(req);
req               913 fs/ceph/super.h 			   struct ceph_mds_request *req);
req               914 fs/ceph/super.h extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
req              1112 fs/ceph/super.h extern int ceph_handle_snapdir(struct ceph_mds_request *req,
req              1114 fs/ceph/super.h extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
req               808 fs/ceph/xattr.c 	struct ceph_mds_request *req = current->journal_info;
req               810 fs/ceph/xattr.c 	if (req && req->r_target_inode == in) {
req               811 fs/ceph/xattr.c 		if (req->r_op == CEPH_MDS_OP_LOOKUP ||
req               812 fs/ceph/xattr.c 		    req->r_op == CEPH_MDS_OP_LOOKUPINO ||
req               813 fs/ceph/xattr.c 		    req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
req               814 fs/ceph/xattr.c 		    req->r_op == CEPH_MDS_OP_GETATTR) {
req               815 fs/ceph/xattr.c 			mask = le32_to_cpu(req->r_args.getattr.mask);
req               816 fs/ceph/xattr.c 		} else if (req->r_op == CEPH_MDS_OP_OPEN ||
req               817 fs/ceph/xattr.c 			   req->r_op == CEPH_MDS_OP_CREATE) {
req               818 fs/ceph/xattr.c 			mask = le32_to_cpu(req->r_args.open.mask);
req               950 fs/ceph/xattr.c 	struct ceph_mds_request *req;
req               975 fs/ceph/xattr.c 	req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
req               976 fs/ceph/xattr.c 	if (IS_ERR(req)) {
req               977 fs/ceph/xattr.c 		err = PTR_ERR(req);
req               981 fs/ceph/xattr.c 	req->r_path2 = kstrdup(name, GFP_NOFS);
req               982 fs/ceph/xattr.c 	if (!req->r_path2) {
req               983 fs/ceph/xattr.c 		ceph_mdsc_put_request(req);
req               989 fs/ceph/xattr.c 		req->r_args.setxattr.flags = cpu_to_le32(flags);
req               990 fs/ceph/xattr.c 		req->r_pagelist = pagelist;
req               994 fs/ceph/xattr.c 	req->r_inode = inode;
req               996 fs/ceph/xattr.c 	req->r_num_caps = 1;
req               997 fs/ceph/xattr.c 	req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
req              1000 fs/ceph/xattr.c 	err = ceph_mdsc_do_request(mdsc, NULL, req);
req              1001 fs/ceph/xattr.c 	ceph_mdsc_put_request(req);
req               612 fs/cifs/cifspdu.h 	} __attribute__((packed)) req;	/* NTLM request format (with
req               834 fs/cifs/cifspdu.h 	} __attribute__((packed)) req;
req              1398 fs/cifs/cifssmb.c 	OPEN_REQ *req = NULL;
req              1413 fs/cifs/cifssmb.c 	rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **)&req,
req              1419 fs/cifs/cifssmb.c 	req->AndXCommand = 0xFF;
req              1421 fs/cifs/cifssmb.c 	if (req->hdr.Flags2 & SMBFLG2_UNICODE) {
req              1424 fs/cifs/cifssmb.c 		name_len = cifsConvertToUTF16((__le16 *)(req->fileName + 1),
req              1429 fs/cifs/cifssmb.c 		req->NameLength = cpu_to_le16(name_len);
req              1434 fs/cifs/cifssmb.c 		name_len = copy_path_name(req->fileName, path);
req              1435 fs/cifs/cifssmb.c 		req->NameLength = cpu_to_le16(name_len);
req              1439 fs/cifs/cifssmb.c 		req->OpenFlags = cpu_to_le32(REQ_OPLOCK);
req              1441 fs/cifs/cifssmb.c 		req->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
req              1443 fs/cifs/cifssmb.c 	req->DesiredAccess = cpu_to_le32(desired_access);
req              1444 fs/cifs/cifssmb.c 	req->AllocationSize = 0;
req              1451 fs/cifs/cifssmb.c 		req->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
req              1453 fs/cifs/cifssmb.c 		req->FileAttributes = cpu_to_le32(ATTR_NORMAL);
req              1460 fs/cifs/cifssmb.c 		req->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
req              1463 fs/cifs/cifssmb.c 		req->FileAttributes |= cpu_to_le32(ATTR_READONLY);
req              1465 fs/cifs/cifssmb.c 	req->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
req              1466 fs/cifs/cifssmb.c 	req->CreateDisposition = cpu_to_le32(disposition);
req              1467 fs/cifs/cifssmb.c 	req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
req              1470 fs/cifs/cifssmb.c 	req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
req              1471 fs/cifs/cifssmb.c 	req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY;
req              1474 fs/cifs/cifssmb.c 	inc_rfc1001_len(req, count);
req              1476 fs/cifs/cifssmb.c 	req->ByteCount = cpu_to_le16(count);
req              1477 fs/cifs/cifssmb.c 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)req,
req              1482 fs/cifs/cifssmb.c 		cifs_buf_release(req);
req              1509 fs/cifs/cifssmb.c 	cifs_buf_release(req);
req                44 fs/cifs/sess.c 	pSMB->req.AndXCommand = 0xFF;
req                45 fs/cifs/sess.c 	pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
req                48 fs/cifs/sess.c 	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
req                49 fs/cifs/sess.c 	pSMB->req.VcNumber = cpu_to_le16(1);
req                60 fs/cifs/sess.c 		pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
req                63 fs/cifs/sess.c 		pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
req                67 fs/cifs/sess.c 		pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
req                71 fs/cifs/sess.c 		pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
req               713 fs/cifs/sess.c 	pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
req              1071 fs/cifs/sess.c 	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
req              1073 fs/cifs/sess.c 	pSMB->req.Capabilities = cpu_to_le32(capabilities);
req              1076 fs/cifs/sess.c 	pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
req              1172 fs/cifs/sess.c 	if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
req              1177 fs/cifs/sess.c 	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
req              1179 fs/cifs/sess.c 	pSMB->req.Capabilities |= cpu_to_le32(capabilities);
req              1230 fs/cifs/sess.c 	build_ntlmssp_negotiate_blob(pSMB->req.SecurityBlob, ses);
req              1232 fs/cifs/sess.c 	sess_data->iov[1].iov_base = pSMB->req.SecurityBlob;
req              1233 fs/cifs/sess.c 	pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
req              1322 fs/cifs/sess.c 	pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
req              3670 fs/cifs/smb2ops.c 	struct aead_request *req;
req              3704 fs/cifs/smb2ops.c 	req = aead_request_alloc(tfm, GFP_KERNEL);
req              3705 fs/cifs/smb2ops.c 	if (!req) {
req              3737 fs/cifs/smb2ops.c 	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
req              3738 fs/cifs/smb2ops.c 	aead_request_set_ad(req, assoc_data_len);
req              3740 fs/cifs/smb2ops.c 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req              3743 fs/cifs/smb2ops.c 	rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
req              3744 fs/cifs/smb2ops.c 				: crypto_aead_decrypt(req), &wait);
req              3753 fs/cifs/smb2ops.c 	kfree(req);
req               560 fs/cifs/smb2pdu.c assemble_neg_contexts(struct smb2_negotiate_req *req,
req               563 fs/cifs/smb2pdu.c 	char *pneg_ctxt = (char *)req;
req               578 fs/cifs/smb2pdu.c 	pneg_ctxt = (*total_len) + (char *)req;
req               579 fs/cifs/smb2pdu.c 	req->NegotiateContextOffset = cpu_to_le32(*total_len);
req               599 fs/cifs/smb2pdu.c 		req->NegotiateContextCount = cpu_to_le16(5);
req               601 fs/cifs/smb2pdu.c 		req->NegotiateContextCount = cpu_to_le16(4);
req               773 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req               782 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req               783 fs/cifs/smb2pdu.c 		req->CreateContextsOffset = cpu_to_le32(
req               786 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
req               811 fs/cifs/smb2pdu.c 	struct smb2_negotiate_req *req;
req               830 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
req               834 fs/cifs/smb2pdu.c 	req->sync_hdr.SessionId = 0;
req               841 fs/cifs/smb2pdu.c 		req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req               842 fs/cifs/smb2pdu.c 		req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
req               843 fs/cifs/smb2pdu.c 		req->DialectCount = cpu_to_le16(2);
req               847 fs/cifs/smb2pdu.c 		req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
req               848 fs/cifs/smb2pdu.c 		req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
req               849 fs/cifs/smb2pdu.c 		req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
req               850 fs/cifs/smb2pdu.c 		req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
req               851 fs/cifs/smb2pdu.c 		req->DialectCount = cpu_to_le16(4);
req               855 fs/cifs/smb2pdu.c 		req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
req               856 fs/cifs/smb2pdu.c 		req->DialectCount = cpu_to_le16(1);
req               862 fs/cifs/smb2pdu.c 		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
req               864 fs/cifs/smb2pdu.c 		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
req               866 fs/cifs/smb2pdu.c 		req->SecurityMode = 0;
req               868 fs/cifs/smb2pdu.c 	req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
req               872 fs/cifs/smb2pdu.c 		memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
req               874 fs/cifs/smb2pdu.c 		memcpy(req->ClientGUID, server->client_guid,
req               879 fs/cifs/smb2pdu.c 			assemble_neg_contexts(req, server, &total_len);
req               881 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req               889 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              1196 fs/cifs/smb2pdu.c 	struct smb2_sess_setup_req *req;
req              1200 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
req              1206 fs/cifs/smb2pdu.c 	req->sync_hdr.SessionId = 0;
req              1209 fs/cifs/smb2pdu.c 	req->PreviousSessionId = sess_data->previous_session;
req              1211 fs/cifs/smb2pdu.c 	req->Flags = 0; /* MBZ */
req              1214 fs/cifs/smb2pdu.c 	req->sync_hdr.CreditRequest = cpu_to_le16(130);
req              1218 fs/cifs/smb2pdu.c 		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
req              1220 fs/cifs/smb2pdu.c 		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
req              1222 fs/cifs/smb2pdu.c 		req->SecurityMode = 0;
req              1225 fs/cifs/smb2pdu.c 	req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
req              1227 fs/cifs/smb2pdu.c 	req->Capabilities = 0;
req              1230 fs/cifs/smb2pdu.c 	req->Channel = 0; /* MBZ */
req              1232 fs/cifs/smb2pdu.c 	sess_data->iov[0].iov_base = (char *)req;
req              1256 fs/cifs/smb2pdu.c 	struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
req              1260 fs/cifs/smb2pdu.c 	req->SecurityBufferOffset =
req              1262 fs/cifs/smb2pdu.c 	req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
req              1482 fs/cifs/smb2pdu.c 	struct smb2_sess_setup_req *req;
req              1492 fs/cifs/smb2pdu.c 	req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
req              1493 fs/cifs/smb2pdu.c 	req->sync_hdr.SessionId = ses->Suid;
req              1607 fs/cifs/smb2pdu.c 	struct smb2_logoff_req *req; /* response is also trivial struct */
req              1627 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
req              1632 fs/cifs/smb2pdu.c 	req->sync_hdr.SessionId = ses->Suid;
req              1637 fs/cifs/smb2pdu.c 		req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
req              1641 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              1649 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              1679 fs/cifs/smb2pdu.c 	struct smb2_tree_connect_req *req;
req              1710 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
req              1720 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              1725 fs/cifs/smb2pdu.c 	req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
req              1727 fs/cifs/smb2pdu.c 	req->PathLength = cpu_to_le16(unc_path_len - 2);
req              1741 fs/cifs/smb2pdu.c 		req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
req              1748 fs/cifs/smb2pdu.c 	req->sync_hdr.CreditRequest = cpu_to_le16(64);
req              1751 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              1816 fs/cifs/smb2pdu.c 	struct smb2_tree_disconnect_req *req; /* response is trivial */
req              1833 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
req              1843 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              1851 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              1965 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req              1972 fs/cifs/smb2pdu.c 	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
req              1973 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req              1974 fs/cifs/smb2pdu.c 		req->CreateContextsOffset = cpu_to_le32(
req              1977 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength,
req              2057 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req              2064 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req              2065 fs/cifs/smb2pdu.c 		req->CreateContextsOffset =
req              2068 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
req              2077 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req              2087 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req              2088 fs/cifs/smb2pdu.c 		req->CreateContextsOffset =
req              2091 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength,
req              2101 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req              2121 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req              2122 fs/cifs/smb2pdu.c 		req->CreateContextsOffset =
req              2125 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
req              2159 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req              2166 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req              2167 fs/cifs/smb2pdu.c 		req->CreateContextsOffset = cpu_to_le32(
req              2170 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
req              2201 fs/cifs/smb2pdu.c 	struct smb2_create_req *req = iov[0].iov_base;
req              2208 fs/cifs/smb2pdu.c 	if (!req->CreateContextsOffset)
req              2209 fs/cifs/smb2pdu.c 		req->CreateContextsOffset = cpu_to_le32(
req              2212 fs/cifs/smb2pdu.c 	le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
req              2268 fs/cifs/smb2pdu.c 	struct smb2_create_req *req;
req              2298 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
req              2306 fs/cifs/smb2pdu.c 	req->ImpersonationLevel = IL_IMPERSONATION;
req              2307 fs/cifs/smb2pdu.c 	req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
req              2309 fs/cifs/smb2pdu.c 	req->FileAttributes = cpu_to_le32(file_attributes);
req              2310 fs/cifs/smb2pdu.c 	req->ShareAccess = FILE_SHARE_ALL_LE;
req              2311 fs/cifs/smb2pdu.c 	req->CreateDisposition = cpu_to_le32(FILE_CREATE);
req              2312 fs/cifs/smb2pdu.c 	req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
req              2314 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              2318 fs/cifs/smb2pdu.c 	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
req              2331 fs/cifs/smb2pdu.c 		req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
req              2338 fs/cifs/smb2pdu.c 		req->NameLength = cpu_to_le16(name_len * 2);
req              2346 fs/cifs/smb2pdu.c 		req->NameLength = cpu_to_le16(uni_path_len - 2);
req              2365 fs/cifs/smb2pdu.c 	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
req              2406 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              2417 fs/cifs/smb2pdu.c 	struct smb2_create_req *req;
req              2427 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
req              2431 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              2440 fs/cifs/smb2pdu.c 	req->ImpersonationLevel = IL_IMPERSONATION;
req              2441 fs/cifs/smb2pdu.c 	req->DesiredAccess = cpu_to_le32(oparms->desired_access);
req              2443 fs/cifs/smb2pdu.c 	req->FileAttributes = cpu_to_le32(file_attributes);
req              2444 fs/cifs/smb2pdu.c 	req->ShareAccess = FILE_SHARE_ALL_LE;
req              2446 fs/cifs/smb2pdu.c 	req->CreateDisposition = cpu_to_le32(oparms->disposition);
req              2447 fs/cifs/smb2pdu.c 	req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
req              2448 fs/cifs/smb2pdu.c 	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
req              2461 fs/cifs/smb2pdu.c 		req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
req              2467 fs/cifs/smb2pdu.c 		req->NameLength = cpu_to_le16(name_len * 2);
req              2473 fs/cifs/smb2pdu.c 		req->NameLength = cpu_to_le16(uni_path_len - 2);
req              2494 fs/cifs/smb2pdu.c 		req->RequestedOplockLevel = *oplock;
req              2497 fs/cifs/smb2pdu.c 		req->RequestedOplockLevel = *oplock; /* no srv lease support */
req              2682 fs/cifs/smb2pdu.c 	struct smb2_ioctl_req *req;
req              2688 fs/cifs/smb2pdu.c 	rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
req              2699 fs/cifs/smb2pdu.c 			cifs_small_buf_release(req);
req              2704 fs/cifs/smb2pdu.c 	req->CtlCode = cpu_to_le32(opcode);
req              2705 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              2706 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              2708 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              2718 fs/cifs/smb2pdu.c 		req->InputCount = cpu_to_le32(indatalen);
req              2720 fs/cifs/smb2pdu.c 		req->InputOffset =
req              2731 fs/cifs/smb2pdu.c 	req->OutputOffset = 0;
req              2732 fs/cifs/smb2pdu.c 	req->OutputCount = 0; /* MBZ */
req              2749 fs/cifs/smb2pdu.c 	req->MaxOutputResponse = cpu_to_le32(max_response_size);
req              2750 fs/cifs/smb2pdu.c 	req->sync_hdr.CreditCharge =
req              2754 fs/cifs/smb2pdu.c 		req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
req              2756 fs/cifs/smb2pdu.c 		req->Flags = 0;
req              2760 fs/cifs/smb2pdu.c 		req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
req              2921 fs/cifs/smb2pdu.c 	struct smb2_close_req *req;
req              2926 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
req              2930 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              2931 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              2932 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              3079 fs/cifs/smb2pdu.c 	struct smb2_query_info_req *req;
req              3084 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
req              3089 fs/cifs/smb2pdu.c 	req->InfoType = info_type;
req              3090 fs/cifs/smb2pdu.c 	req->FileInfoClass = info_class;
req              3091 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              3092 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              3093 fs/cifs/smb2pdu.c 	req->AdditionalInformation = cpu_to_le32(additional_info);
req              3095 fs/cifs/smb2pdu.c 	req->OutputBufferLength = cpu_to_le32(output_len);
req              3097 fs/cifs/smb2pdu.c 		req->InputBufferLength = cpu_to_le32(input_len);
req              3099 fs/cifs/smb2pdu.c 		req->InputBufferOffset = cpu_to_le16(total_len - 1);
req              3100 fs/cifs/smb2pdu.c 		memcpy(req->Buffer, input, input_len);
req              3103 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              3246 fs/cifs/smb2pdu.c 	struct smb2_change_notify_req *req;
req              3251 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, (void **) &req, &total_len);
req              3255 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              3256 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              3257 fs/cifs/smb2pdu.c 	req->OutputBufferLength =
req              3259 fs/cifs/smb2pdu.c 	req->CompletionFilter = cpu_to_le32(completion_filter);
req              3261 fs/cifs/smb2pdu.c 		req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
req              3263 fs/cifs/smb2pdu.c 		req->Flags = 0;
req              3265 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              3418 fs/cifs/smb2pdu.c 	struct smb2_echo_req *req;
req              3433 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
req              3437 fs/cifs/smb2pdu.c 	req->sync_hdr.CreditRequest = cpu_to_le16(1);
req              3440 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              3447 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              3462 fs/cifs/smb2pdu.c 	struct smb2_flush_req *req;
req              3467 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
req              3471 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              3472 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              3474 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              3535 fs/cifs/smb2pdu.c 	struct smb2_read_plain_req *req = NULL;
req              3539 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
req              3548 fs/cifs/smb2pdu.c 	shdr = &req->sync_hdr;
req              3551 fs/cifs/smb2pdu.c 	req->PersistentFileId = io_parms->persistent_fid;
req              3552 fs/cifs/smb2pdu.c 	req->VolatileFileId = io_parms->volatile_fid;
req              3553 fs/cifs/smb2pdu.c 	req->ReadChannelInfoOffset = 0; /* reserved */
req              3554 fs/cifs/smb2pdu.c 	req->ReadChannelInfoLength = 0; /* reserved */
req              3555 fs/cifs/smb2pdu.c 	req->Channel = 0; /* reserved */
req              3556 fs/cifs/smb2pdu.c 	req->MinimumCount = 0;
req              3557 fs/cifs/smb2pdu.c 	req->Length = cpu_to_le32(io_parms->length);
req              3558 fs/cifs/smb2pdu.c 	req->Offset = cpu_to_le64(io_parms->offset);
req              3583 fs/cifs/smb2pdu.c 		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
req              3585 fs/cifs/smb2pdu.c 			req->Channel = SMB2_CHANNEL_RDMA_V1;
req              3586 fs/cifs/smb2pdu.c 		req->ReadChannelInfoOffset =
req              3588 fs/cifs/smb2pdu.c 		req->ReadChannelInfoLength =
req              3590 fs/cifs/smb2pdu.c 		v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
req              3613 fs/cifs/smb2pdu.c 			req->PersistentFileId = 0xFFFFFFFF;
req              3614 fs/cifs/smb2pdu.c 			req->VolatileFileId = 0xFFFFFFFF;
req              3618 fs/cifs/smb2pdu.c 		req->RemainingBytes = cpu_to_le32(remaining_bytes);
req              3620 fs/cifs/smb2pdu.c 		req->RemainingBytes = 0;
req              3622 fs/cifs/smb2pdu.c 	*buf = req;
req              3785 fs/cifs/smb2pdu.c 	struct smb2_read_plain_req *req = NULL;
req              3794 fs/cifs/smb2pdu.c 	rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
req              3801 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              3815 fs/cifs/smb2pdu.c 			trace_smb3_read_err(xid, req->PersistentFileId,
req              3820 fs/cifs/smb2pdu.c 			trace_smb3_read_done(xid, req->PersistentFileId,
req              3824 fs/cifs/smb2pdu.c 		cifs_small_buf_release(req);
req              3827 fs/cifs/smb2pdu.c 		trace_smb3_read_done(xid, req->PersistentFileId,
req              3831 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              3943 fs/cifs/smb2pdu.c 	struct smb2_write_req *req = NULL;
req              3951 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
req              3958 fs/cifs/smb2pdu.c 	shdr = (struct smb2_sync_hdr *)req;
req              3961 fs/cifs/smb2pdu.c 	req->PersistentFileId = wdata->cfile->fid.persistent_fid;
req              3962 fs/cifs/smb2pdu.c 	req->VolatileFileId = wdata->cfile->fid.volatile_fid;
req              3963 fs/cifs/smb2pdu.c 	req->WriteChannelInfoOffset = 0;
req              3964 fs/cifs/smb2pdu.c 	req->WriteChannelInfoLength = 0;
req              3965 fs/cifs/smb2pdu.c 	req->Channel = 0;
req              3966 fs/cifs/smb2pdu.c 	req->Offset = cpu_to_le64(wdata->offset);
req              3967 fs/cifs/smb2pdu.c 	req->DataOffset = cpu_to_le16(
req              3969 fs/cifs/smb2pdu.c 	req->RemainingBytes = 0;
req              3992 fs/cifs/smb2pdu.c 		req->Length = 0;
req              3993 fs/cifs/smb2pdu.c 		req->DataOffset = 0;
req              3995 fs/cifs/smb2pdu.c 			req->RemainingBytes =
req              4001 fs/cifs/smb2pdu.c 			req->RemainingBytes = cpu_to_le32(wdata->tailsz);
req              4002 fs/cifs/smb2pdu.c 		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
req              4004 fs/cifs/smb2pdu.c 			req->Channel = SMB2_CHANNEL_RDMA_V1;
req              4005 fs/cifs/smb2pdu.c 		req->WriteChannelInfoOffset =
req              4007 fs/cifs/smb2pdu.c 		req->WriteChannelInfoLength =
req              4009 fs/cifs/smb2pdu.c 		v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
req              4016 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4037 fs/cifs/smb2pdu.c 		req->Length = cpu_to_le32(wdata->bytes);
req              4039 fs/cifs/smb2pdu.c 	req->Length = cpu_to_le32(wdata->bytes);
req              4060 fs/cifs/smb2pdu.c 		trace_smb3_write_err(0 /* no xid */, req->PersistentFileId,
req              4068 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              4084 fs/cifs/smb2pdu.c 	struct smb2_write_req *req = NULL;
req              4096 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
req              4107 fs/cifs/smb2pdu.c 	req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
req              4109 fs/cifs/smb2pdu.c 	req->PersistentFileId = io_parms->persistent_fid;
req              4110 fs/cifs/smb2pdu.c 	req->VolatileFileId = io_parms->volatile_fid;
req              4111 fs/cifs/smb2pdu.c 	req->WriteChannelInfoOffset = 0;
req              4112 fs/cifs/smb2pdu.c 	req->WriteChannelInfoLength = 0;
req              4113 fs/cifs/smb2pdu.c 	req->Channel = 0;
req              4114 fs/cifs/smb2pdu.c 	req->Length = cpu_to_le32(io_parms->length);
req              4115 fs/cifs/smb2pdu.c 	req->Offset = cpu_to_le64(io_parms->offset);
req              4116 fs/cifs/smb2pdu.c 	req->DataOffset = cpu_to_le16(
req              4118 fs/cifs/smb2pdu.c 	req->RemainingBytes = 0;
req              4124 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4137 fs/cifs/smb2pdu.c 		trace_smb3_write_err(xid, req->PersistentFileId,
req              4145 fs/cifs/smb2pdu.c 		trace_smb3_write_done(xid, req->PersistentFileId,
req              4151 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              4210 fs/cifs/smb2pdu.c 	struct smb2_query_directory_req *req;
req              4232 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
req              4242 fs/cifs/smb2pdu.c 		req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
req              4246 fs/cifs/smb2pdu.c 		req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
req              4256 fs/cifs/smb2pdu.c 	req->FileIndex = cpu_to_le32(index);
req              4257 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              4258 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              4261 fs/cifs/smb2pdu.c 	bufptr = req->Buffer;
req              4264 fs/cifs/smb2pdu.c 	req->FileNameOffset =
req              4266 fs/cifs/smb2pdu.c 	req->FileNameLength = cpu_to_le16(len);
req              4273 fs/cifs/smb2pdu.c 	req->OutputBufferLength = cpu_to_le32(output_size);
req              4275 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4279 fs/cifs/smb2pdu.c 	iov[1].iov_base = (char *)(req->Buffer);
req              4290 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              4358 fs/cifs/smb2pdu.c 	struct smb2_set_info_req *req;
req              4363 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
req              4367 fs/cifs/smb2pdu.c 	req->sync_hdr.ProcessId = cpu_to_le32(pid);
req              4368 fs/cifs/smb2pdu.c 	req->InfoType = info_type;
req              4369 fs/cifs/smb2pdu.c 	req->FileInfoClass = info_class;
req              4370 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              4371 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              4372 fs/cifs/smb2pdu.c 	req->AdditionalInformation = cpu_to_le32(additional_info);
req              4374 fs/cifs/smb2pdu.c 	req->BufferOffset =
req              4376 fs/cifs/smb2pdu.c 	req->BufferLength = cpu_to_le32(*size);
req              4378 fs/cifs/smb2pdu.c 	memcpy(req->Buffer, *data, *size);
req              4381 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4386 fs/cifs/smb2pdu.c 		le32_add_cpu(&req->BufferLength, size[i]);
req              4503 fs/cifs/smb2pdu.c 	struct smb2_oplock_break *req = NULL;
req              4512 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
req              4520 fs/cifs/smb2pdu.c 	req->VolatileFid = volatile_fid;
req              4521 fs/cifs/smb2pdu.c 	req->PersistentFid = persistent_fid;
req              4522 fs/cifs/smb2pdu.c 	req->OplockLevel = oplock_level;
req              4523 fs/cifs/smb2pdu.c 	req->sync_hdr.CreditRequest = cpu_to_le16(1);
req              4527 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4535 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              4581 fs/cifs/smb2pdu.c 	struct smb2_query_info_req *req;
req              4589 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
req              4594 fs/cifs/smb2pdu.c 	req->InfoType = SMB2_O_INFO_FILESYSTEM;
req              4595 fs/cifs/smb2pdu.c 	req->FileInfoClass = level;
req              4596 fs/cifs/smb2pdu.c 	req->PersistentFileId = persistent_fid;
req              4597 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              4599 fs/cifs/smb2pdu.c 	req->InputBufferOffset =
req              4601 fs/cifs/smb2pdu.c 	req->OutputBufferLength = cpu_to_le32(
req              4604 fs/cifs/smb2pdu.c 	iov->iov_base = (char *)req;
req              4794 fs/cifs/smb2pdu.c 	struct smb2_lock_req *req = NULL;
req              4804 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
req              4811 fs/cifs/smb2pdu.c 	req->sync_hdr.ProcessId = cpu_to_le32(pid);
req              4812 fs/cifs/smb2pdu.c 	req->LockCount = cpu_to_le16(num_lock);
req              4814 fs/cifs/smb2pdu.c 	req->PersistentFileId = persist_fid;
req              4815 fs/cifs/smb2pdu.c 	req->VolatileFileId = volatile_fid;
req              4819 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4832 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req              4866 fs/cifs/smb2pdu.c 	struct smb2_lease_ack *req = NULL;
req              4877 fs/cifs/smb2pdu.c 	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
req              4885 fs/cifs/smb2pdu.c 	req->sync_hdr.CreditRequest = cpu_to_le16(1);
req              4886 fs/cifs/smb2pdu.c 	req->StructureSize = cpu_to_le16(36);
req              4889 fs/cifs/smb2pdu.c 	memcpy(req->LeaseKey, lease_key, 16);
req              4890 fs/cifs/smb2pdu.c 	req->LeaseState = lease_state;
req              4894 fs/cifs/smb2pdu.c 	iov[0].iov_base = (char *)req;
req              4902 fs/cifs/smb2pdu.c 	cifs_small_buf_release(req);
req                95 fs/coda/psdev.c         struct upc_req *req = NULL;
req               155 fs/coda/psdev.c 			req = tmp;
req               156 fs/coda/psdev.c 			list_del(&req->uc_chain);
req               162 fs/coda/psdev.c 	if (!req) {
req               170 fs/coda/psdev.c 	if (req->uc_outSize < nbytes) {
req               172 fs/coda/psdev.c 			__func__, req->uc_outSize, (long)nbytes,
req               174 fs/coda/psdev.c 		nbytes = req->uc_outSize; /* don't have more space! */
req               176 fs/coda/psdev.c         if (copy_from_user(req->uc_data, buf, nbytes)) {
req               177 fs/coda/psdev.c 		req->uc_flags |= CODA_REQ_ABORT;
req               178 fs/coda/psdev.c 		wake_up(&req->uc_sleep);
req               184 fs/coda/psdev.c 	req->uc_outSize = nbytes;
req               185 fs/coda/psdev.c 	req->uc_flags |= CODA_REQ_WRITE;
req               189 fs/coda/psdev.c 	if (req->uc_opcode == CODA_OPEN_BY_FD) {
req               191 fs/coda/psdev.c 			(struct coda_open_by_fd_out *)req->uc_data;
req               199 fs/coda/psdev.c         wake_up(&req->uc_sleep);
req               213 fs/coda/psdev.c         struct upc_req *req;
req               244 fs/coda/psdev.c 	req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain);
req               245 fs/coda/psdev.c 	list_del(&req->uc_chain);
req               248 fs/coda/psdev.c 	count = req->uc_inSize;
req               249 fs/coda/psdev.c 	if (nbytes < req->uc_inSize) {
req               251 fs/coda/psdev.c 			__func__, (long)nbytes, req->uc_inSize);
req               255 fs/coda/psdev.c 	if (copy_to_user(buf, req->uc_data, count))
req               259 fs/coda/psdev.c 	if (!(req->uc_flags & CODA_REQ_ASYNC)) {
req               260 fs/coda/psdev.c 		req->uc_flags |= CODA_REQ_READ;
req               261 fs/coda/psdev.c 		list_add_tail(&(req->uc_chain), &vcp->vc_processing);
req               265 fs/coda/psdev.c 	kvfree(req->uc_data);
req               266 fs/coda/psdev.c 	kfree(req);
req               312 fs/coda/psdev.c 	struct upc_req *req, *tmp;
req               322 fs/coda/psdev.c 	list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) {
req               323 fs/coda/psdev.c 		list_del(&req->uc_chain);
req               326 fs/coda/psdev.c 		if (req->uc_flags & CODA_REQ_ASYNC) {
req               327 fs/coda/psdev.c 			kvfree(req->uc_data);
req               328 fs/coda/psdev.c 			kfree(req);
req               331 fs/coda/psdev.c 		req->uc_flags |= CODA_REQ_ABORT;
req               332 fs/coda/psdev.c 		wake_up(&req->uc_sleep);
req               335 fs/coda/psdev.c 	list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) {
req               336 fs/coda/psdev.c 		list_del(&req->uc_chain);
req               338 fs/coda/psdev.c 		req->uc_flags |= CODA_REQ_ABORT;
req               339 fs/coda/psdev.c 		wake_up(&req->uc_sleep);
req               652 fs/coda/upcall.c 				       struct upc_req *req)
req               662 fs/coda/upcall.c 	add_wait_queue(&req->uc_sleep, &wait);
req               664 fs/coda/upcall.c 		if (CODA_INTERRUPTIBLE(req))
req               670 fs/coda/upcall.c 		if (req->uc_flags & (CODA_REQ_WRITE | CODA_REQ_ABORT))
req               674 fs/coda/upcall.c 		    CODA_INTERRUPTIBLE(req))
req               681 fs/coda/upcall.c 			list_del(&req->uc_chain);
req               695 fs/coda/upcall.c 	remove_wait_queue(&req->uc_sleep, &wait);
req               715 fs/coda/upcall.c 	struct upc_req *req = NULL, *sig_req;
req               727 fs/coda/upcall.c 	req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
req               728 fs/coda/upcall.c 	if (!req) {
req               735 fs/coda/upcall.c 	req->uc_data = (void *)buffer;
req               736 fs/coda/upcall.c 	req->uc_flags = outSize ? 0 : CODA_REQ_ASYNC;
req               737 fs/coda/upcall.c 	req->uc_inSize = inSize;
req               738 fs/coda/upcall.c 	req->uc_outSize = (outSize && *outSize) ? *outSize : inSize;
req               739 fs/coda/upcall.c 	req->uc_opcode = buffer->ih.opcode;
req               740 fs/coda/upcall.c 	req->uc_unique = buffer->ih.unique;
req               741 fs/coda/upcall.c 	init_waitqueue_head(&req->uc_sleep);
req               744 fs/coda/upcall.c 	list_add_tail(&req->uc_chain, &vcp->vc_pending);
req               747 fs/coda/upcall.c 	if (req->uc_flags & CODA_REQ_ASYNC) {
req               762 fs/coda/upcall.c 	coda_waitfor_upcall(vcp, req);
req               765 fs/coda/upcall.c 	if (req->uc_flags & CODA_REQ_WRITE) {
req               766 fs/coda/upcall.c 		out = (union outputArgs *)req->uc_data;
req               769 fs/coda/upcall.c 		*outSize = req->uc_outSize;
req               774 fs/coda/upcall.c 	if ((req->uc_flags & CODA_REQ_ABORT) || !signal_pending(current)) {
req               780 fs/coda/upcall.c 	if (!(req->uc_flags & CODA_REQ_READ))
req               801 fs/coda/upcall.c 	sig_inputArgs->ih.unique = req->uc_unique;
req               815 fs/coda/upcall.c 	kfree(req);
req               158 fs/crypto/crypto.c 	struct skcipher_request *req = NULL;
req               172 fs/crypto/crypto.c 	req = skcipher_request_alloc(tfm, gfp_flags);
req               173 fs/crypto/crypto.c 	if (!req)
req               177 fs/crypto/crypto.c 		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
req               184 fs/crypto/crypto.c 	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
req               186 fs/crypto/crypto.c 		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
req               188 fs/crypto/crypto.c 		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
req               189 fs/crypto/crypto.c 	skcipher_request_free(req);
req                40 fs/crypto/fname.c 	struct skcipher_request *req = NULL;
req                61 fs/crypto/fname.c 	req = skcipher_request_alloc(tfm, GFP_NOFS);
req                62 fs/crypto/fname.c 	if (!req)
req                64 fs/crypto/fname.c 	skcipher_request_set_callback(req,
req                68 fs/crypto/fname.c 	skcipher_request_set_crypt(req, &sg, &sg, olen, &iv);
req                71 fs/crypto/fname.c 	res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
req                72 fs/crypto/fname.c 	skcipher_request_free(req);
req                92 fs/crypto/fname.c 	struct skcipher_request *req = NULL;
req               101 fs/crypto/fname.c 	req = skcipher_request_alloc(tfm, GFP_NOFS);
req               102 fs/crypto/fname.c 	if (!req)
req               104 fs/crypto/fname.c 	skcipher_request_set_callback(req,
req               114 fs/crypto/fname.c 	skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv);
req               115 fs/crypto/fname.c 	res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
req               116 fs/crypto/fname.c 	skcipher_request_free(req);
req                52 fs/crypto/keysetup_v1.c 	struct skcipher_request *req = NULL;
req                63 fs/crypto/keysetup_v1.c 	req = skcipher_request_alloc(tfm, GFP_NOFS);
req                64 fs/crypto/keysetup_v1.c 	if (!req) {
req                68 fs/crypto/keysetup_v1.c 	skcipher_request_set_callback(req,
req                77 fs/crypto/keysetup_v1.c 	skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize,
req                79 fs/crypto/keysetup_v1.c 	res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
req                81 fs/crypto/keysetup_v1.c 	skcipher_request_free(req);
req               459 fs/dlm/user.c  static int check_version(struct dlm_write_request *req)
req               461 fs/dlm/user.c  	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
req               462 fs/dlm/user.c  	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
req               463 fs/dlm/user.c  	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
req               469 fs/dlm/user.c  		       req->version[0],
req               470 fs/dlm/user.c  		       req->version[1],
req               471 fs/dlm/user.c  		       req->version[2],
req               283 fs/ecryptfs/crypto.c static void extent_crypt_complete(struct crypto_async_request *req, int rc)
req               285 fs/ecryptfs/crypto.c 	struct extent_crypt_result *ecr = req->data;
req               310 fs/ecryptfs/crypto.c 	struct skcipher_request *req = NULL;
req               328 fs/ecryptfs/crypto.c 	req = skcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
req               329 fs/ecryptfs/crypto.c 	if (!req) {
req               335 fs/ecryptfs/crypto.c 	skcipher_request_set_callback(req,
req               353 fs/ecryptfs/crypto.c 	skcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
req               354 fs/ecryptfs/crypto.c 	rc = op == ENCRYPT ? crypto_skcipher_encrypt(req) :
req               355 fs/ecryptfs/crypto.c 			     crypto_skcipher_decrypt(req);
req               357 fs/ecryptfs/crypto.c 		struct extent_crypt_result *ecr = req->base.data;
req               364 fs/ecryptfs/crypto.c 	skcipher_request_free(req);
req              1652 fs/ecryptfs/keystore.c 	struct skcipher_request *req = NULL;
req              1694 fs/ecryptfs/keystore.c 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
req              1695 fs/ecryptfs/keystore.c 	if (!req) {
req              1704 fs/ecryptfs/keystore.c 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
req              1715 fs/ecryptfs/keystore.c 	skcipher_request_set_crypt(req, src_sg, dst_sg,
req              1718 fs/ecryptfs/keystore.c 	rc = crypto_skcipher_decrypt(req);
req              1735 fs/ecryptfs/keystore.c 	skcipher_request_free(req);
req              2190 fs/ecryptfs/keystore.c 	struct skcipher_request *req;
req              2288 fs/ecryptfs/keystore.c 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
req              2289 fs/ecryptfs/keystore.c 	if (!req) {
req              2298 fs/ecryptfs/keystore.c 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
req              2304 fs/ecryptfs/keystore.c 	skcipher_request_set_crypt(req, src_sg, dst_sg,
req              2306 fs/ecryptfs/keystore.c 	rc = crypto_skcipher_encrypt(req);
req              2308 fs/ecryptfs/keystore.c 	skcipher_request_free(req);
req                46 fs/ecryptfs/kthread.c 		struct ecryptfs_open_req *req;
req                58 fs/ecryptfs/kthread.c 			req = list_first_entry(&ecryptfs_kthread_ctl.req_list,
req                61 fs/ecryptfs/kthread.c 			list_del(&req->kthread_ctl_list);
req                62 fs/ecryptfs/kthread.c 			*req->lower_file = dentry_open(&req->path,
req                64 fs/ecryptfs/kthread.c 			complete(&req->done);
req                91 fs/ecryptfs/kthread.c 	struct ecryptfs_open_req *req, *tmp;
req                95 fs/ecryptfs/kthread.c 	list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list,
req                97 fs/ecryptfs/kthread.c 		list_del(&req->kthread_ctl_list);
req                98 fs/ecryptfs/kthread.c 		*req->lower_file = ERR_PTR(-EIO);
req                99 fs/ecryptfs/kthread.c 		complete(&req->done);
req               121 fs/ecryptfs/kthread.c 	struct ecryptfs_open_req req;
req               125 fs/ecryptfs/kthread.c 	init_completion(&req.done);
req               126 fs/ecryptfs/kthread.c 	req.lower_file = lower_file;
req               127 fs/ecryptfs/kthread.c 	req.path.dentry = lower_dentry;
req               128 fs/ecryptfs/kthread.c 	req.path.mnt = lower_mnt;
req               134 fs/ecryptfs/kthread.c 	(*lower_file) = dentry_open(&req.path, flags, cred);
req               150 fs/ecryptfs/kthread.c 	list_add_tail(&req.kthread_ctl_list, &ecryptfs_kthread_ctl.req_list);
req               153 fs/ecryptfs/kthread.c 	wait_for_completion(&req.done);
req              3126 fs/ext4/mballoc.c #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
req              3127 fs/ext4/mballoc.c 		(req <= (size) || max <= (chunk_size))
req                43 fs/fuse/dev.c  static void fuse_request_init(struct fuse_req *req)
req                45 fs/fuse/dev.c  	INIT_LIST_HEAD(&req->list);
req                46 fs/fuse/dev.c  	INIT_LIST_HEAD(&req->intr_entry);
req                47 fs/fuse/dev.c  	init_waitqueue_head(&req->waitq);
req                48 fs/fuse/dev.c  	refcount_set(&req->count, 1);
req                49 fs/fuse/dev.c  	__set_bit(FR_PENDING, &req->flags);
req                54 fs/fuse/dev.c  	struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
req                55 fs/fuse/dev.c  	if (req)
req                56 fs/fuse/dev.c  		fuse_request_init(req);
req                58 fs/fuse/dev.c  	return req;
req                61 fs/fuse/dev.c  static void fuse_request_free(struct fuse_req *req)
req                63 fs/fuse/dev.c  	kmem_cache_free(fuse_req_cachep, req);
req                66 fs/fuse/dev.c  static void __fuse_get_request(struct fuse_req *req)
req                68 fs/fuse/dev.c  	refcount_inc(&req->count);
req                72 fs/fuse/dev.c  static void __fuse_put_request(struct fuse_req *req)
req                74 fs/fuse/dev.c  	refcount_dec(&req->count);
req               103 fs/fuse/dev.c  static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
req               107 fs/fuse/dev.c  	struct fuse_req *req;
req               128 fs/fuse/dev.c  	req = fuse_request_alloc(GFP_KERNEL);
req               130 fs/fuse/dev.c  	if (!req) {
req               136 fs/fuse/dev.c  	req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
req               137 fs/fuse/dev.c  	req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
req               138 fs/fuse/dev.c  	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
req               140 fs/fuse/dev.c  	__set_bit(FR_WAITING, &req->flags);
req               142 fs/fuse/dev.c  		__set_bit(FR_BACKGROUND, &req->flags);
req               144 fs/fuse/dev.c  	if (unlikely(req->in.h.uid == ((uid_t)-1) ||
req               145 fs/fuse/dev.c  		     req->in.h.gid == ((gid_t)-1))) {
req               146 fs/fuse/dev.c  		fuse_put_request(fc, req);
req               149 fs/fuse/dev.c  	return req;
req               156 fs/fuse/dev.c  static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
req               158 fs/fuse/dev.c  	if (refcount_dec_and_test(&req->count)) {
req               159 fs/fuse/dev.c  		if (test_bit(FR_BACKGROUND, &req->flags)) {
req               170 fs/fuse/dev.c  		if (test_bit(FR_WAITING, &req->flags)) {
req               171 fs/fuse/dev.c  			__clear_bit(FR_WAITING, &req->flags);
req               175 fs/fuse/dev.c  		fuse_request_free(req);
req               222 fs/fuse/dev.c  				     struct fuse_req *req)
req               225 fs/fuse/dev.c  	req->in.h.len = sizeof(struct fuse_in_header) +
req               226 fs/fuse/dev.c  		fuse_len_args(req->args->in_numargs,
req               227 fs/fuse/dev.c  			      (struct fuse_arg *) req->args->in_args);
req               228 fs/fuse/dev.c  	list_add_tail(&req->list, &fiq->pending);
req               257 fs/fuse/dev.c  		struct fuse_req *req;
req               259 fs/fuse/dev.c  		req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
req               260 fs/fuse/dev.c  		list_del(&req->list);
req               263 fs/fuse/dev.c  		req->in.h.unique = fuse_get_unique(fiq);
req               264 fs/fuse/dev.c  		queue_request_and_unlock(fiq, req);
req               276 fs/fuse/dev.c  void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
req               280 fs/fuse/dev.c  	if (test_and_set_bit(FR_FINISHED, &req->flags))
req               288 fs/fuse/dev.c  	if (!list_empty(&req->intr_entry)) {
req               290 fs/fuse/dev.c  		list_del_init(&req->intr_entry);
req               293 fs/fuse/dev.c  	WARN_ON(test_bit(FR_PENDING, &req->flags));
req               294 fs/fuse/dev.c  	WARN_ON(test_bit(FR_SENT, &req->flags));
req               295 fs/fuse/dev.c  	if (test_bit(FR_BACKGROUND, &req->flags)) {
req               297 fs/fuse/dev.c  		clear_bit(FR_BACKGROUND, &req->flags);
req               322 fs/fuse/dev.c  		wake_up(&req->waitq);
req               325 fs/fuse/dev.c  	if (test_bit(FR_ASYNC, &req->flags))
req               326 fs/fuse/dev.c  		req->args->end(fc, req->args, req->out.h.error);
req               328 fs/fuse/dev.c  	fuse_put_request(fc, req);
req               332 fs/fuse/dev.c  static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
req               336 fs/fuse/dev.c  	if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
req               341 fs/fuse/dev.c  	if (list_empty(&req->intr_entry)) {
req               342 fs/fuse/dev.c  		list_add_tail(&req->intr_entry, &fiq->interrupts);
req               348 fs/fuse/dev.c  		if (test_bit(FR_FINISHED, &req->flags)) {
req               349 fs/fuse/dev.c  			list_del_init(&req->intr_entry);
req               360 fs/fuse/dev.c  static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
req               367 fs/fuse/dev.c  		err = wait_event_interruptible(req->waitq,
req               368 fs/fuse/dev.c  					test_bit(FR_FINISHED, &req->flags));
req               372 fs/fuse/dev.c  		set_bit(FR_INTERRUPTED, &req->flags);
req               375 fs/fuse/dev.c  		if (test_bit(FR_SENT, &req->flags))
req               376 fs/fuse/dev.c  			queue_interrupt(fiq, req);
req               379 fs/fuse/dev.c  	if (!test_bit(FR_FORCE, &req->flags)) {
req               381 fs/fuse/dev.c  		err = wait_event_killable(req->waitq,
req               382 fs/fuse/dev.c  					test_bit(FR_FINISHED, &req->flags));
req               388 fs/fuse/dev.c  		if (test_bit(FR_PENDING, &req->flags)) {
req               389 fs/fuse/dev.c  			list_del(&req->list);
req               391 fs/fuse/dev.c  			__fuse_put_request(req);
req               392 fs/fuse/dev.c  			req->out.h.error = -EINTR;
req               402 fs/fuse/dev.c  	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
req               405 fs/fuse/dev.c  static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
req               409 fs/fuse/dev.c  	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
req               413 fs/fuse/dev.c  		req->out.h.error = -ENOTCONN;
req               415 fs/fuse/dev.c  		req->in.h.unique = fuse_get_unique(fiq);
req               418 fs/fuse/dev.c  		__fuse_get_request(req);
req               419 fs/fuse/dev.c  		queue_request_and_unlock(fiq, req);
req               421 fs/fuse/dev.c  		request_wait_answer(fc, req);
req               460 fs/fuse/dev.c  static void fuse_force_creds(struct fuse_conn *fc, struct fuse_req *req)
req               462 fs/fuse/dev.c  	req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
req               463 fs/fuse/dev.c  	req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
req               464 fs/fuse/dev.c  	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
req               467 fs/fuse/dev.c  static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
req               469 fs/fuse/dev.c  	req->in.h.opcode = args->opcode;
req               470 fs/fuse/dev.c  	req->in.h.nodeid = args->nodeid;
req               471 fs/fuse/dev.c  	req->args = args;
req               473 fs/fuse/dev.c  		__set_bit(FR_ASYNC, &req->flags);
req               478 fs/fuse/dev.c  	struct fuse_req *req;
req               483 fs/fuse/dev.c  		req = fuse_request_alloc(GFP_KERNEL | __GFP_NOFAIL);
req               486 fs/fuse/dev.c  			fuse_force_creds(fc, req);
req               488 fs/fuse/dev.c  		__set_bit(FR_WAITING, &req->flags);
req               489 fs/fuse/dev.c  		__set_bit(FR_FORCE, &req->flags);
req               492 fs/fuse/dev.c  		req = fuse_get_req(fc, false);
req               493 fs/fuse/dev.c  		if (IS_ERR(req))
req               494 fs/fuse/dev.c  			return PTR_ERR(req);
req               499 fs/fuse/dev.c  	fuse_args_to_req(req, args);
req               502 fs/fuse/dev.c  		__set_bit(FR_ISREPLY, &req->flags);
req               503 fs/fuse/dev.c  	__fuse_request_send(fc, req);
req               504 fs/fuse/dev.c  	ret = req->out.h.error;
req               509 fs/fuse/dev.c  	fuse_put_request(fc, req);
req               515 fs/fuse/dev.c  					  struct fuse_req *req)
req               519 fs/fuse/dev.c  	WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
req               520 fs/fuse/dev.c  	if (!test_bit(FR_WAITING, &req->flags)) {
req               521 fs/fuse/dev.c  		__set_bit(FR_WAITING, &req->flags);
req               524 fs/fuse/dev.c  	__set_bit(FR_ISREPLY, &req->flags);
req               534 fs/fuse/dev.c  		list_add_tail(&req->list, &fc->bg_queue);
req               546 fs/fuse/dev.c  	struct fuse_req *req;
req               550 fs/fuse/dev.c  		req = fuse_request_alloc(gfp_flags);
req               551 fs/fuse/dev.c  		if (!req)
req               553 fs/fuse/dev.c  		__set_bit(FR_BACKGROUND, &req->flags);
req               556 fs/fuse/dev.c  		req = fuse_get_req(fc, true);
req               557 fs/fuse/dev.c  		if (IS_ERR(req))
req               558 fs/fuse/dev.c  			return PTR_ERR(req);
req               561 fs/fuse/dev.c  	fuse_args_to_req(req, args);
req               563 fs/fuse/dev.c  	if (!fuse_request_queue_background(fc, req)) {
req               564 fs/fuse/dev.c  		fuse_put_request(fc, req);
req               575 fs/fuse/dev.c  	struct fuse_req *req;
req               579 fs/fuse/dev.c  	req = fuse_get_req(fc, false);
req               580 fs/fuse/dev.c  	if (IS_ERR(req))
req               581 fs/fuse/dev.c  		return PTR_ERR(req);
req               583 fs/fuse/dev.c  	__clear_bit(FR_ISREPLY, &req->flags);
req               584 fs/fuse/dev.c  	req->in.h.unique = unique;
req               586 fs/fuse/dev.c  	fuse_args_to_req(req, args);
req               590 fs/fuse/dev.c  		queue_request_and_unlock(fiq, req);
req               594 fs/fuse/dev.c  		fuse_put_request(fc, req);
req               605 fs/fuse/dev.c  static int lock_request(struct fuse_req *req)
req               608 fs/fuse/dev.c  	if (req) {
req               609 fs/fuse/dev.c  		spin_lock(&req->waitq.lock);
req               610 fs/fuse/dev.c  		if (test_bit(FR_ABORTED, &req->flags))
req               613 fs/fuse/dev.c  			set_bit(FR_LOCKED, &req->flags);
req               614 fs/fuse/dev.c  		spin_unlock(&req->waitq.lock);
req               623 fs/fuse/dev.c  static int unlock_request(struct fuse_req *req)
req               626 fs/fuse/dev.c  	if (req) {
req               627 fs/fuse/dev.c  		spin_lock(&req->waitq.lock);
req               628 fs/fuse/dev.c  		if (test_bit(FR_ABORTED, &req->flags))
req               631 fs/fuse/dev.c  			clear_bit(FR_LOCKED, &req->flags);
req               632 fs/fuse/dev.c  		spin_unlock(&req->waitq.lock);
req               639 fs/fuse/dev.c  	struct fuse_req *req;
req               687 fs/fuse/dev.c  	err = unlock_request(cs->req);
req               738 fs/fuse/dev.c  	return lock_request(cs->req);
req               789 fs/fuse/dev.c  	err = unlock_request(cs->req);
req               846 fs/fuse/dev.c  	spin_lock(&cs->req->waitq.lock);
req               847 fs/fuse/dev.c  	if (test_bit(FR_ABORTED, &cs->req->flags))
req               851 fs/fuse/dev.c  	spin_unlock(&cs->req->waitq.lock);
req               871 fs/fuse/dev.c  	err = lock_request(cs->req);
req               887 fs/fuse/dev.c  	err = unlock_request(cs->req);
req               952 fs/fuse/dev.c  	struct fuse_req *req = cs->req;
req               953 fs/fuse/dev.c  	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
req              1023 fs/fuse/dev.c  			       size_t nbytes, struct fuse_req *req)
req              1031 fs/fuse/dev.c  	list_del_init(&req->intr_entry);
req              1036 fs/fuse/dev.c  	ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
req              1037 fs/fuse/dev.c  	arg.unique = req->in.h.unique;
req              1183 fs/fuse/dev.c  	struct fuse_req *req;
req              1227 fs/fuse/dev.c  		req = list_entry(fiq->interrupts.next, struct fuse_req,
req              1229 fs/fuse/dev.c  		return fuse_read_interrupt(fiq, cs, nbytes, req);
req              1240 fs/fuse/dev.c  	req = list_entry(fiq->pending.next, struct fuse_req, list);
req              1241 fs/fuse/dev.c  	clear_bit(FR_PENDING, &req->flags);
req              1242 fs/fuse/dev.c  	list_del_init(&req->list);
req              1245 fs/fuse/dev.c  	args = req->args;
req              1246 fs/fuse/dev.c  	reqsize = req->in.h.len;
req              1250 fs/fuse/dev.c  		req->out.h.error = -EIO;
req              1253 fs/fuse/dev.c  			req->out.h.error = -E2BIG;
req              1254 fs/fuse/dev.c  		fuse_request_end(fc, req);
req              1258 fs/fuse/dev.c  	list_add(&req->list, &fpq->io);
req              1260 fs/fuse/dev.c  	cs->req = req;
req              1261 fs/fuse/dev.c  	err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
req              1267 fs/fuse/dev.c  	clear_bit(FR_LOCKED, &req->flags);
req              1273 fs/fuse/dev.c  		req->out.h.error = -EIO;
req              1276 fs/fuse/dev.c  	if (!test_bit(FR_ISREPLY, &req->flags)) {
req              1280 fs/fuse/dev.c  	hash = fuse_req_hash(req->in.h.unique);
req              1281 fs/fuse/dev.c  	list_move_tail(&req->list, &fpq->processing[hash]);
req              1282 fs/fuse/dev.c  	__fuse_get_request(req);
req              1283 fs/fuse/dev.c  	set_bit(FR_SENT, &req->flags);
req              1287 fs/fuse/dev.c  	if (test_bit(FR_INTERRUPTED, &req->flags))
req              1288 fs/fuse/dev.c  		queue_interrupt(fiq, req);
req              1289 fs/fuse/dev.c  	fuse_put_request(fc, req);
req              1294 fs/fuse/dev.c  	if (!test_bit(FR_PRIVATE, &req->flags))
req              1295 fs/fuse/dev.c  		list_del_init(&req->list);
req              1297 fs/fuse/dev.c  	fuse_request_end(fc, req);
req              1786 fs/fuse/dev.c  	struct fuse_req *req;
req              1788 fs/fuse/dev.c  	list_for_each_entry(req, &fpq->processing[hash], list) {
req              1789 fs/fuse/dev.c  		if (req->in.h.unique == unique)
req              1790 fs/fuse/dev.c  			return req;
req              1829 fs/fuse/dev.c  	struct fuse_req *req;
req              1858 fs/fuse/dev.c  	req = NULL;
req              1860 fs/fuse/dev.c  		req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
req              1863 fs/fuse/dev.c  	if (!req) {
req              1870 fs/fuse/dev.c  		__fuse_get_request(req);
req              1879 fs/fuse/dev.c  			err = queue_interrupt(&fc->iq, req);
req              1881 fs/fuse/dev.c  		fuse_put_request(fc, req);
req              1886 fs/fuse/dev.c  	clear_bit(FR_SENT, &req->flags);
req              1887 fs/fuse/dev.c  	list_move(&req->list, &fpq->io);
req              1888 fs/fuse/dev.c  	req->out.h = oh;
req              1889 fs/fuse/dev.c  	set_bit(FR_LOCKED, &req->flags);
req              1891 fs/fuse/dev.c  	cs->req = req;
req              1892 fs/fuse/dev.c  	if (!req->args->page_replace)
req              1898 fs/fuse/dev.c  		err = copy_out_args(cs, req->args, nbytes);
req              1902 fs/fuse/dev.c  	clear_bit(FR_LOCKED, &req->flags);
req              1906 fs/fuse/dev.c  		req->out.h.error = -EIO;
req              1907 fs/fuse/dev.c  	if (!test_bit(FR_PRIVATE, &req->flags))
req              1908 fs/fuse/dev.c  		list_del_init(&req->list);
req              1911 fs/fuse/dev.c  	fuse_request_end(fc, req);
req              2046 fs/fuse/dev.c  		struct fuse_req *req;
req              2047 fs/fuse/dev.c  		req = list_entry(head->next, struct fuse_req, list);
req              2048 fs/fuse/dev.c  		req->out.h.error = -ECONNABORTED;
req              2049 fs/fuse/dev.c  		clear_bit(FR_SENT, &req->flags);
req              2050 fs/fuse/dev.c  		list_del_init(&req->list);
req              2051 fs/fuse/dev.c  		fuse_request_end(fc, req);
req              2095 fs/fuse/dev.c  		struct fuse_req *req, *next;
req              2110 fs/fuse/dev.c  			list_for_each_entry_safe(req, next, &fpq->io, list) {
req              2111 fs/fuse/dev.c  				req->out.h.error = -ECONNABORTED;
req              2112 fs/fuse/dev.c  				spin_lock(&req->waitq.lock);
req              2113 fs/fuse/dev.c  				set_bit(FR_ABORTED, &req->flags);
req              2114 fs/fuse/dev.c  				if (!test_bit(FR_LOCKED, &req->flags)) {
req              2115 fs/fuse/dev.c  					set_bit(FR_PRIVATE, &req->flags);
req              2116 fs/fuse/dev.c  					__fuse_get_request(req);
req              2117 fs/fuse/dev.c  					list_move(&req->list, &to_end);
req              2119 fs/fuse/dev.c  				spin_unlock(&req->waitq.lock);
req              2134 fs/fuse/dev.c  		list_for_each_entry(req, &fiq->pending, list)
req              2135 fs/fuse/dev.c  			clear_bit(FR_PENDING, &req->flags);
req               925 fs/fuse/fuse_i.h void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req);
req                59 fs/fuse/virtio_fs.c 				 struct fuse_req *req, bool in_flight);
req               248 fs/fuse/virtio_fs.c 		void *req;
req               252 fs/fuse/virtio_fs.c 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
req               253 fs/fuse/virtio_fs.c 			kfree(req);
req               262 fs/fuse/virtio_fs.c 	struct fuse_req *req;
req               271 fs/fuse/virtio_fs.c 		req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
req               273 fs/fuse/virtio_fs.c 		if (!req) {
req               278 fs/fuse/virtio_fs.c 		list_del_init(&req->list);
req               280 fs/fuse/virtio_fs.c 		fuse_request_end(fc, req);
req               286 fs/fuse/virtio_fs.c 		req = list_first_entry_or_null(&fsvq->queued_reqs,
req               288 fs/fuse/virtio_fs.c 		if (!req) {
req               292 fs/fuse/virtio_fs.c 		list_del_init(&req->list);
req               295 fs/fuse/virtio_fs.c 		ret = virtio_fs_enqueue_req(fsvq, req, true);
req               299 fs/fuse/virtio_fs.c 				list_add_tail(&req->list, &fsvq->queued_reqs);
req               305 fs/fuse/virtio_fs.c 			req->out.h.error = ret;
req               311 fs/fuse/virtio_fs.c 			fuse_request_end(fc, req);
req               379 fs/fuse/virtio_fs.c static int copy_args_to_argbuf(struct fuse_req *req)
req               381 fs/fuse/virtio_fs.c 	struct fuse_args *args = req->args;
req               393 fs/fuse/virtio_fs.c 	req->argbuf = kmalloc(len, GFP_ATOMIC);
req               394 fs/fuse/virtio_fs.c 	if (!req->argbuf)
req               398 fs/fuse/virtio_fs.c 		memcpy(req->argbuf + offset,
req               408 fs/fuse/virtio_fs.c static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
req               416 fs/fuse/virtio_fs.c 	remaining = req->out.h.len - sizeof(req->out.h);
req               430 fs/fuse/virtio_fs.c 		memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
req               441 fs/fuse/virtio_fs.c 	kfree(req->argbuf);
req               442 fs/fuse/virtio_fs.c 	req->argbuf = NULL;
req               453 fs/fuse/virtio_fs.c 	struct fuse_req *req;
req               466 fs/fuse/virtio_fs.c 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
req               468 fs/fuse/virtio_fs.c 			list_move_tail(&req->list, &reqs);
req               475 fs/fuse/virtio_fs.c 	list_for_each_entry_safe(req, next, &reqs, list) {
req               480 fs/fuse/virtio_fs.c 		args = req->args;
req               481 fs/fuse/virtio_fs.c 		copy_args_from_argbuf(args, req);
req               500 fs/fuse/virtio_fs.c 		clear_bit(FR_SENT, &req->flags);
req               501 fs/fuse/virtio_fs.c 		list_del_init(&req->list);
req               504 fs/fuse/virtio_fs.c 		fuse_request_end(fc, req);
req               799 fs/fuse/virtio_fs.c static unsigned int sg_count_fuse_req(struct fuse_req *req)
req               801 fs/fuse/virtio_fs.c 	struct fuse_args *args = req->args;
req               811 fs/fuse/virtio_fs.c 	if (!test_bit(FR_ISREPLY, &req->flags))
req               847 fs/fuse/virtio_fs.c 				      struct fuse_req *req,
req               854 fs/fuse/virtio_fs.c 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
req               876 fs/fuse/virtio_fs.c 				 struct fuse_req *req, bool in_flight)
req               884 fs/fuse/virtio_fs.c 	struct fuse_args *args = req->args;
req               895 fs/fuse/virtio_fs.c 	total_sgs = sg_count_fuse_req(req);
req               906 fs/fuse/virtio_fs.c 	ret = copy_args_to_argbuf(req);
req               911 fs/fuse/virtio_fs.c 	sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
req               912 fs/fuse/virtio_fs.c 	out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
req               915 fs/fuse/virtio_fs.c 				     req->argbuf, &argbuf_used);
req               918 fs/fuse/virtio_fs.c 	if (test_bit(FR_ISREPLY, &req->flags)) {
req               920 fs/fuse/virtio_fs.c 			    &req->out.h, sizeof(req->out.h));
req               921 fs/fuse/virtio_fs.c 		in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
req               924 fs/fuse/virtio_fs.c 					    req->argbuf + argbuf_used, NULL);
req               941 fs/fuse/virtio_fs.c 	ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
req               950 fs/fuse/virtio_fs.c 	list_add_tail(&req->list, fpq->processing);
req               952 fs/fuse/virtio_fs.c 	set_bit(FR_SENT, &req->flags);
req               966 fs/fuse/virtio_fs.c 	if (ret < 0 && req->argbuf) {
req               967 fs/fuse/virtio_fs.c 		kfree(req->argbuf);
req               968 fs/fuse/virtio_fs.c 		req->argbuf = NULL;
req               983 fs/fuse/virtio_fs.c 	struct fuse_req *req;
req               988 fs/fuse/virtio_fs.c 	req = list_last_entry(&fiq->pending, struct fuse_req, list);
req               989 fs/fuse/virtio_fs.c 	clear_bit(FR_PENDING, &req->flags);
req               990 fs/fuse/virtio_fs.c 	list_del_init(&req->list);
req               997 fs/fuse/virtio_fs.c 		  __func__, req->in.h.opcode, req->in.h.unique,
req               998 fs/fuse/virtio_fs.c 		 req->in.h.nodeid, req->in.h.len,
req               999 fs/fuse/virtio_fs.c 		 fuse_len_args(req->args->out_numargs, req->args->out_args));
req              1002 fs/fuse/virtio_fs.c 	ret = virtio_fs_enqueue_req(fsvq, req, false);
req              1010 fs/fuse/virtio_fs.c 			list_add_tail(&req->list, &fsvq->queued_reqs);
req              1017 fs/fuse/virtio_fs.c 		req->out.h.error = ret;
req              1022 fs/fuse/virtio_fs.c 		list_add_tail(&req->list, &fsvq->end_reqs);
req               203 fs/gfs2/lock_dlm.c 		      const int req)
req               224 fs/gfs2/lock_dlm.c 		if (req == DLM_LOCK_PR)
req               226 fs/gfs2/lock_dlm.c 		else if (req == DLM_LOCK_CW)
req               254 fs/gfs2/lock_dlm.c 	int req;
req               258 fs/gfs2/lock_dlm.c 	req = make_mode(gl->gl_name.ln_sbd, req_state);
req               259 fs/gfs2/lock_dlm.c 	lkf = make_flags(gl, flags, req);
req               275 fs/gfs2/lock_dlm.c 	return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
req               370 fs/io_uring.c  static void __io_free_req(struct io_kiocb *req);
req               432 fs/io_uring.c  				       struct io_kiocb *req)
req               434 fs/io_uring.c  	return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
req               439 fs/io_uring.c  				     struct io_kiocb *req)
req               441 fs/io_uring.c  	if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
req               444 fs/io_uring.c  	return __io_sequence_defer(ctx, req);
req               449 fs/io_uring.c  	struct io_kiocb *req;
req               451 fs/io_uring.c  	req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
req               452 fs/io_uring.c  	if (req && !io_sequence_defer(ctx, req)) {
req               453 fs/io_uring.c  		list_del_init(&req->list);
req               454 fs/io_uring.c  		return req;
req               462 fs/io_uring.c  	struct io_kiocb *req;
req               464 fs/io_uring.c  	req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
req               465 fs/io_uring.c  	if (req) {
req               466 fs/io_uring.c  		if (req->flags & REQ_F_TIMEOUT_NOSEQ)
req               468 fs/io_uring.c  		if (!__io_sequence_defer(ctx, req)) {
req               469 fs/io_uring.c  			list_del_init(&req->list);
req               470 fs/io_uring.c  			return req;
req               493 fs/io_uring.c  				       struct io_kiocb *req)
req               497 fs/io_uring.c  	if (req->submit.sqe) {
req               498 fs/io_uring.c  		switch (req->submit.sqe->opcode) {
req               501 fs/io_uring.c  			rw = !(req->rw.ki_flags & IOCB_DIRECT);
req               506 fs/io_uring.c  	queue_work(ctx->sqo_wq[rw], &req->work);
req               509 fs/io_uring.c  static void io_kill_timeout(struct io_kiocb *req)
req               513 fs/io_uring.c  	ret = hrtimer_try_to_cancel(&req->timeout.timer);
req               515 fs/io_uring.c  		atomic_inc(&req->ctx->cq_timeouts);
req               516 fs/io_uring.c  		list_del(&req->list);
req               517 fs/io_uring.c  		io_cqring_fill_event(req->ctx, req->user_data, 0);
req               518 fs/io_uring.c  		__io_free_req(req);
req               524 fs/io_uring.c  	struct io_kiocb *req, *tmp;
req               527 fs/io_uring.c  	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
req               528 fs/io_uring.c  		io_kill_timeout(req);
req               534 fs/io_uring.c  	struct io_kiocb *req;
req               536 fs/io_uring.c  	while ((req = io_get_timeout_req(ctx)) != NULL)
req               537 fs/io_uring.c  		io_kill_timeout(req);
req               541 fs/io_uring.c  	while ((req = io_get_deferred_req(ctx)) != NULL) {
req               542 fs/io_uring.c  		if (req->flags & REQ_F_SHADOW_DRAIN) {
req               544 fs/io_uring.c  			__io_free_req(req);
req               547 fs/io_uring.c  		req->flags |= REQ_F_IO_DRAINED;
req               548 fs/io_uring.c  		io_queue_async_work(ctx, req);
req               618 fs/io_uring.c  	struct io_kiocb *req;
req               624 fs/io_uring.c  		req = kmem_cache_alloc(req_cachep, gfp);
req               625 fs/io_uring.c  		if (unlikely(!req))
req               646 fs/io_uring.c  		req = state->reqs[0];
req               648 fs/io_uring.c  		req = state->reqs[state->cur_req];
req               653 fs/io_uring.c  	req->file = NULL;
req               654 fs/io_uring.c  	req->ctx = ctx;
req               655 fs/io_uring.c  	req->flags = 0;
req               657 fs/io_uring.c  	refcount_set(&req->refs, 2);
req               658 fs/io_uring.c  	req->result = 0;
req               659 fs/io_uring.c  	req->fs = NULL;
req               660 fs/io_uring.c  	return req;
req               675 fs/io_uring.c  static void __io_free_req(struct io_kiocb *req)
req               677 fs/io_uring.c  	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
req               678 fs/io_uring.c  		fput(req->file);
req               679 fs/io_uring.c  	percpu_ref_put(&req->ctx->refs);
req               680 fs/io_uring.c  	kmem_cache_free(req_cachep, req);
req               683 fs/io_uring.c  static void io_req_link_next(struct io_kiocb *req)
req               692 fs/io_uring.c  	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
req               695 fs/io_uring.c  		if (!list_empty(&req->link_list)) {
req               697 fs/io_uring.c  			list_splice(&req->link_list, &nxt->link_list);
req               703 fs/io_uring.c  		io_queue_async_work(req->ctx, nxt);
req               710 fs/io_uring.c  static void io_fail_links(struct io_kiocb *req)
req               714 fs/io_uring.c  	while (!list_empty(&req->link_list)) {
req               715 fs/io_uring.c  		link = list_first_entry(&req->link_list, struct io_kiocb, list);
req               718 fs/io_uring.c  		io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
req               723 fs/io_uring.c  static void io_free_req(struct io_kiocb *req)
req               731 fs/io_uring.c  	if (req->flags & REQ_F_LINK) {
req               732 fs/io_uring.c  		if (req->flags & REQ_F_FAIL_LINK)
req               733 fs/io_uring.c  			io_fail_links(req);
req               735 fs/io_uring.c  			io_req_link_next(req);
req               738 fs/io_uring.c  	__io_free_req(req);
req               741 fs/io_uring.c  static void io_put_req(struct io_kiocb *req)
req               743 fs/io_uring.c  	if (refcount_dec_and_test(&req->refs))
req               744 fs/io_uring.c  		io_free_req(req);
req               769 fs/io_uring.c  	struct io_kiocb *req;
req               774 fs/io_uring.c  		req = list_first_entry(done, struct io_kiocb, list);
req               775 fs/io_uring.c  		list_del(&req->list);
req               777 fs/io_uring.c  		io_cqring_fill_event(ctx, req->user_data, req->result);
req               780 fs/io_uring.c  		if (refcount_dec_and_test(&req->refs)) {
req               786 fs/io_uring.c  			if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
req               788 fs/io_uring.c  				reqs[to_free++] = req;
req               792 fs/io_uring.c  				io_free_req(req);
req               804 fs/io_uring.c  	struct io_kiocb *req, *tmp;
req               816 fs/io_uring.c  	list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
req               817 fs/io_uring.c  		struct kiocb *kiocb = &req->rw;
req               824 fs/io_uring.c  		if (req->flags & REQ_F_IOPOLL_COMPLETED) {
req               825 fs/io_uring.c  			list_move_tail(&req->list, &done);
req               941 fs/io_uring.c  static void kiocb_end_write(struct io_kiocb *req)
req               947 fs/io_uring.c  	if (req->flags & REQ_F_ISREG) {
req               948 fs/io_uring.c  		struct inode *inode = file_inode(req->file);
req               952 fs/io_uring.c  	file_end_write(req->file);
req               957 fs/io_uring.c  	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
req               960 fs/io_uring.c  		kiocb_end_write(req);
req               962 fs/io_uring.c  	if ((req->flags & REQ_F_LINK) && res != req->result)
req               963 fs/io_uring.c  		req->flags |= REQ_F_FAIL_LINK;
req               964 fs/io_uring.c  	io_cqring_add_event(req->ctx, req->user_data, res);
req               965 fs/io_uring.c  	io_put_req(req);
req               970 fs/io_uring.c  	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
req               973 fs/io_uring.c  		kiocb_end_write(req);
req               975 fs/io_uring.c  	if ((req->flags & REQ_F_LINK) && res != req->result)
req               976 fs/io_uring.c  		req->flags |= REQ_F_FAIL_LINK;
req               977 fs/io_uring.c  	req->result = res;
req               979 fs/io_uring.c  		req->flags |= REQ_F_IOPOLL_COMPLETED;
req               988 fs/io_uring.c  static void io_iopoll_req_issued(struct io_kiocb *req)
req               990 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1004 fs/io_uring.c  		if (list_req->rw.ki_filp != req->rw.ki_filp)
req              1012 fs/io_uring.c  	if (req->flags & REQ_F_IOPOLL_COMPLETED)
req              1013 fs/io_uring.c  		list_add(&req->list, &ctx->poll_list);
req              1015 fs/io_uring.c  		list_add_tail(&req->list, &ctx->poll_list);
req              1075 fs/io_uring.c  static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
req              1079 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1080 fs/io_uring.c  	struct kiocb *kiocb = &req->rw;
req              1084 fs/io_uring.c  	if (!req->file)
req              1087 fs/io_uring.c  	if (S_ISREG(file_inode(req->file)->i_mode))
req              1088 fs/io_uring.c  		req->flags |= REQ_F_ISREG;
req              1091 fs/io_uring.c  		req->fsize = rlimit(RLIMIT_FSIZE);
req              1097 fs/io_uring.c  	if (force_nonblock && !io_file_supports_async(req->file)) {
req              1098 fs/io_uring.c  		req->flags |= REQ_F_MUST_PUNT;
req              1122 fs/io_uring.c  	    (req->file->f_flags & O_NONBLOCK))
req              1123 fs/io_uring.c  		req->flags |= REQ_F_NOWAIT;
req              1135 fs/io_uring.c  		req->result = 0;
req              1304 fs/io_uring.c  static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
req              1306 fs/io_uring.c  	struct async_list *async_list = &req->ctx->pending_async[rw];
req              1307 fs/io_uring.c  	struct kiocb *kiocb = &req->rw;
req              1320 fs/io_uring.c  			req->flags |= REQ_F_SEQ_PREV;
req              1393 fs/io_uring.c  static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
req              1397 fs/io_uring.c  	struct kiocb *kiocb = &req->rw;
req              1403 fs/io_uring.c  	ret = io_prep_rw(req, s, force_nonblock);
req              1411 fs/io_uring.c  	ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
req              1416 fs/io_uring.c  	if (req->flags & REQ_F_LINK)
req              1417 fs/io_uring.c  		req->result = read_size;
req              1437 fs/io_uring.c  		if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
req              1438 fs/io_uring.c  		    (req->flags & REQ_F_ISREG) &&
req              1450 fs/io_uring.c  				io_async_list_note(READ, req, iov_count);
req              1458 fs/io_uring.c  static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
req              1462 fs/io_uring.c  	struct kiocb *kiocb = &req->rw;
req              1468 fs/io_uring.c  	ret = io_prep_rw(req, s, force_nonblock);
req              1476 fs/io_uring.c  	ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
req              1480 fs/io_uring.c  	if (req->flags & REQ_F_LINK)
req              1481 fs/io_uring.c  		req->result = ret;
req              1489 fs/io_uring.c  			io_async_list_note(WRITE, req, iov_count);
req              1504 fs/io_uring.c  		if (req->flags & REQ_F_ISREG) {
req              1513 fs/io_uring.c  			current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
req              1531 fs/io_uring.c  				io_async_list_note(WRITE, req, iov_count);
req              1543 fs/io_uring.c  static int io_nop(struct io_kiocb *req, u64 user_data)
req              1545 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1552 fs/io_uring.c  	io_put_req(req);
req              1556 fs/io_uring.c  static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req              1558 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1560 fs/io_uring.c  	if (!req->file)
req              1571 fs/io_uring.c  static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req              1584 fs/io_uring.c  	ret = io_prep_fsync(req, sqe);
req              1592 fs/io_uring.c  	ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
req              1596 fs/io_uring.c  	if (ret < 0 && (req->flags & REQ_F_LINK))
req              1597 fs/io_uring.c  		req->flags |= REQ_F_FAIL_LINK;
req              1598 fs/io_uring.c  	io_cqring_add_event(req->ctx, sqe->user_data, ret);
req              1599 fs/io_uring.c  	io_put_req(req);
req              1603 fs/io_uring.c  static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req              1605 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1608 fs/io_uring.c  	if (!req->file)
req              1619 fs/io_uring.c  static int io_sync_file_range(struct io_kiocb *req,
req              1628 fs/io_uring.c  	ret = io_prep_sfr(req, sqe);
req              1640 fs/io_uring.c  	ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
req              1642 fs/io_uring.c  	if (ret < 0 && (req->flags & REQ_F_LINK))
req              1643 fs/io_uring.c  		req->flags |= REQ_F_FAIL_LINK;
req              1644 fs/io_uring.c  	io_cqring_add_event(req->ctx, sqe->user_data, ret);
req              1645 fs/io_uring.c  	io_put_req(req);
req              1650 fs/io_uring.c  static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req              1658 fs/io_uring.c  	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
req              1661 fs/io_uring.c  	sock = sock_from_file(req->file, &ret);
req              1668 fs/io_uring.c  			req->flags |= REQ_F_NOWAIT;
req              1673 fs/io_uring.c  		if (req->ctx->compat)
req              1687 fs/io_uring.c  	if (req->fs) {
req              1688 fs/io_uring.c  		struct fs_struct *fs = req->fs;
req              1690 fs/io_uring.c  		spin_lock(&req->fs->lock);
req              1693 fs/io_uring.c  		spin_unlock(&req->fs->lock);
req              1697 fs/io_uring.c  	io_cqring_add_event(req->ctx, sqe->user_data, ret);
req              1698 fs/io_uring.c  	io_put_req(req);
req              1703 fs/io_uring.c  static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req              1707 fs/io_uring.c  	return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
req              1713 fs/io_uring.c  static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req              1717 fs/io_uring.c  	return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
req              1723 fs/io_uring.c  static void io_poll_remove_one(struct io_kiocb *req)
req              1725 fs/io_uring.c  	struct io_poll_iocb *poll = &req->poll;
req              1731 fs/io_uring.c  		io_queue_async_work(req->ctx, req);
req              1735 fs/io_uring.c  	list_del_init(&req->list);
req              1740 fs/io_uring.c  	struct io_kiocb *req;
req              1744 fs/io_uring.c  		req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
req              1745 fs/io_uring.c  		io_poll_remove_one(req);
req              1754 fs/io_uring.c  static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req              1756 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1760 fs/io_uring.c  	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
req              1776 fs/io_uring.c  	io_cqring_add_event(req->ctx, sqe->user_data, ret);
req              1777 fs/io_uring.c  	io_put_req(req);
req              1781 fs/io_uring.c  static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
req              1784 fs/io_uring.c  	req->poll.done = true;
req              1785 fs/io_uring.c  	io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
req              1791 fs/io_uring.c  	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
req              1792 fs/io_uring.c  	struct io_poll_iocb *poll = &req->poll;
req              1794 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1816 fs/io_uring.c  	list_del_init(&req->list);
req              1817 fs/io_uring.c  	io_poll_complete(ctx, req, mask);
req              1821 fs/io_uring.c  	io_put_req(req);
req              1831 fs/io_uring.c  	struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
req              1832 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1843 fs/io_uring.c  		list_del(&req->list);
req              1844 fs/io_uring.c  		io_poll_complete(ctx, req, mask);
req              1848 fs/io_uring.c  		io_put_req(req);
req              1850 fs/io_uring.c  		io_queue_async_work(ctx, req);
req              1858 fs/io_uring.c  	struct io_kiocb *req;
req              1867 fs/io_uring.c  	if (unlikely(pt->req->poll.head)) {
req              1873 fs/io_uring.c  	pt->req->poll.head = head;
req              1874 fs/io_uring.c  	add_wait_queue(head, &pt->req->poll.wait);
req              1877 fs/io_uring.c  static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req              1879 fs/io_uring.c  	struct io_poll_iocb *poll = &req->poll;
req              1880 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1886 fs/io_uring.c  	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
req              1893 fs/io_uring.c  	req->submit.sqe = NULL;
req              1894 fs/io_uring.c  	INIT_WORK(&req->work, io_poll_complete_work);
req              1904 fs/io_uring.c  	ipt.req = req;
req              1911 fs/io_uring.c  	INIT_LIST_HEAD(&req->list);
req              1929 fs/io_uring.c  			list_add_tail(&req->list, &ctx->cancel_list);
req              1934 fs/io_uring.c  		io_poll_complete(ctx, req, mask);
req              1940 fs/io_uring.c  		io_put_req(req);
req              1948 fs/io_uring.c  	struct io_kiocb *req, *prev;
req              1951 fs/io_uring.c  	req = container_of(timer, struct io_kiocb, timeout.timer);
req              1952 fs/io_uring.c  	ctx = req->ctx;
req              1962 fs/io_uring.c  	prev = req;
req              1965 fs/io_uring.c  	list_del(&req->list);
req              1967 fs/io_uring.c  	io_cqring_fill_event(ctx, req->user_data, -ETIME);
req              1973 fs/io_uring.c  	io_put_req(req);
req              1977 fs/io_uring.c  static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req              1980 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              1994 fs/io_uring.c  	req->flags |= REQ_F_TIMEOUT;
req              2003 fs/io_uring.c  		req->flags |= REQ_F_TIMEOUT_NOSEQ;
req              2009 fs/io_uring.c  	req->sequence = ctx->cached_sq_head + count - 1;
req              2011 fs/io_uring.c  	req->submit.sequence = count;
req              2051 fs/io_uring.c  	req->sequence -= span;
req              2053 fs/io_uring.c  	list_add(&req->list, entry);
req              2056 fs/io_uring.c  	hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
req              2057 fs/io_uring.c  	req->timeout.timer.function = io_timeout_fn;
req              2058 fs/io_uring.c  	hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
req              2063 fs/io_uring.c  static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
req              2068 fs/io_uring.c  	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
req              2076 fs/io_uring.c  	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
req              2082 fs/io_uring.c  	memcpy(&req->submit, s, sizeof(*s));
req              2084 fs/io_uring.c  	req->submit.sqe = sqe_copy;
req              2086 fs/io_uring.c  	INIT_WORK(&req->work, io_sq_wq_submit_work);
req              2087 fs/io_uring.c  	list_add_tail(&req->list, &ctx->defer_list);
req              2092 fs/io_uring.c  static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
req              2097 fs/io_uring.c  	req->user_data = READ_ONCE(s->sqe->user_data);
req              2105 fs/io_uring.c  		ret = io_nop(req, req->user_data);
req              2110 fs/io_uring.c  		ret = io_read(req, s, force_nonblock);
req              2115 fs/io_uring.c  		ret = io_write(req, s, force_nonblock);
req              2118 fs/io_uring.c  		ret = io_read(req, s, force_nonblock);
req              2121 fs/io_uring.c  		ret = io_write(req, s, force_nonblock);
req              2124 fs/io_uring.c  		ret = io_fsync(req, s->sqe, force_nonblock);
req              2127 fs/io_uring.c  		ret = io_poll_add(req, s->sqe);
req              2130 fs/io_uring.c  		ret = io_poll_remove(req, s->sqe);
req              2133 fs/io_uring.c  		ret = io_sync_file_range(req, s->sqe, force_nonblock);
req              2136 fs/io_uring.c  		ret = io_sendmsg(req, s->sqe, force_nonblock);
req              2139 fs/io_uring.c  		ret = io_recvmsg(req, s->sqe, force_nonblock);
req              2142 fs/io_uring.c  		ret = io_timeout(req, s->sqe);
req              2153 fs/io_uring.c  		if (req->result == -EAGAIN)
req              2159 fs/io_uring.c  		io_iopoll_req_issued(req);
req              2192 fs/io_uring.c  	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
req              2194 fs/io_uring.c  	struct io_ring_ctx *ctx = req->ctx;
req              2203 fs/io_uring.c  	async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
req              2206 fs/io_uring.c  		struct sqe_submit *s = &req->submit;
req              2208 fs/io_uring.c  		unsigned int flags = req->flags;
req              2211 fs/io_uring.c  		req->rw.ki_flags &= ~IOCB_NOWAIT;
req              2213 fs/io_uring.c  		if (req->fs != current->fs && current->fs != old_fs_struct) {
req              2215 fs/io_uring.c  			if (req->fs)
req              2216 fs/io_uring.c  				current->fs = req->fs;
req              2238 fs/io_uring.c  				ret = __io_submit_sqe(ctx, req, s, false);
req              2252 fs/io_uring.c  		io_put_req(req);
req              2256 fs/io_uring.c  			io_put_req(req);
req              2269 fs/io_uring.c  			req = list_first_entry(&req_list, struct io_kiocb,
req              2271 fs/io_uring.c  			list_del(&req->list);
req              2277 fs/io_uring.c  		req = NULL;
req              2286 fs/io_uring.c  		req = list_first_entry(&req_list, struct io_kiocb, list);
req              2287 fs/io_uring.c  		list_del(&req->list);
req              2288 fs/io_uring.c  	} while (req);
req              2304 fs/io_uring.c  				req = list_first_entry(&req_list,
req              2306 fs/io_uring.c  				list_del(&req->list);
req              2332 fs/io_uring.c  static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
req              2338 fs/io_uring.c  	if (!(req->flags & REQ_F_SEQ_PREV))
req              2345 fs/io_uring.c  	list_add_tail(&req->list, &list->list);
req              2351 fs/io_uring.c  		list_del_init(&req->list);
req              2373 fs/io_uring.c  			   struct io_submit_state *state, struct io_kiocb *req)
req              2382 fs/io_uring.c  		req->flags |= REQ_F_IO_DRAIN;
req              2388 fs/io_uring.c  	req->sequence = s->sequence;
req              2397 fs/io_uring.c  		req->file = ctx->user_files[fd];
req              2398 fs/io_uring.c  		req->flags |= REQ_F_FIXED_FILE;
req              2402 fs/io_uring.c  		req->file = io_file_get(state, fd);
req              2403 fs/io_uring.c  		if (unlikely(!req->file))
req              2410 fs/io_uring.c  static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
req              2415 fs/io_uring.c  	ret = __io_submit_sqe(ctx, req, s, true);
req              2421 fs/io_uring.c  	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
req              2422 fs/io_uring.c  	    (req->flags & REQ_F_MUST_PUNT))) {
req              2430 fs/io_uring.c  			memcpy(&req->submit, s, sizeof(*s));
req              2432 fs/io_uring.c  			if (!io_add_to_prev_work(list, req)) {
req              2435 fs/io_uring.c  				INIT_WORK(&req->work, io_sq_wq_submit_work);
req              2436 fs/io_uring.c  				io_queue_async_work(ctx, req);
req              2448 fs/io_uring.c  	io_put_req(req);
req              2452 fs/io_uring.c  		io_cqring_add_event(ctx, req->user_data, ret);
req              2453 fs/io_uring.c  		if (req->flags & REQ_F_LINK)
req              2454 fs/io_uring.c  			req->flags |= REQ_F_FAIL_LINK;
req              2455 fs/io_uring.c  		io_put_req(req);
req              2461 fs/io_uring.c  static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
req              2466 fs/io_uring.c  	ret = io_req_defer(ctx, req, s);
req              2469 fs/io_uring.c  			io_free_req(req);
req              2475 fs/io_uring.c  	return __io_queue_sqe(ctx, req, s);
req              2478 fs/io_uring.c  static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
req              2485 fs/io_uring.c  		return io_queue_sqe(ctx, req, s);
req              2492 fs/io_uring.c  	req->flags |= REQ_F_IO_DRAIN;
req              2493 fs/io_uring.c  	ret = io_req_defer(ctx, req, s);
req              2496 fs/io_uring.c  			io_free_req(req);
req              2515 fs/io_uring.c  		return __io_queue_sqe(ctx, req, s);
req              2526 fs/io_uring.c  	struct io_kiocb *req;
req              2535 fs/io_uring.c  	req = io_get_req(ctx, state);
req              2536 fs/io_uring.c  	if (unlikely(!req)) {
req              2541 fs/io_uring.c  	ret = io_req_set_file(ctx, s, state, req);
req              2544 fs/io_uring.c  		io_free_req(req);
req              2550 fs/io_uring.c  	req->user_data = s->sqe->user_data;
req              2558 fs/io_uring.c  			req->fs = current->fs;
req              2559 fs/io_uring.c  			req->fs->users++;
req              2562 fs/io_uring.c  		if (!req->fs) {
req              2586 fs/io_uring.c  		memcpy(&req->submit, s, sizeof(*s));
req              2587 fs/io_uring.c  		list_add_tail(&req->list, &prev->link_list);
req              2589 fs/io_uring.c  		req->flags |= REQ_F_LINK;
req              2591 fs/io_uring.c  		memcpy(&req->submit, s, sizeof(*s));
req              2592 fs/io_uring.c  		INIT_LIST_HEAD(&req->link_list);
req              2593 fs/io_uring.c  		*link = req;
req              2595 fs/io_uring.c  		io_queue_sqe(ctx, req, s);
req               359 fs/lockd/clnt4xdr.c static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
req               381 fs/lockd/clnt4xdr.c static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
req               404 fs/lockd/clnt4xdr.c static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
req               423 fs/lockd/clnt4xdr.c static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
req               440 fs/lockd/clnt4xdr.c static void nlm4_xdr_enc_res(struct rpc_rqst *req,
req               463 fs/lockd/clnt4xdr.c static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
req               511 fs/lockd/clnt4xdr.c static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
req               532 fs/lockd/clnt4xdr.c static int nlm4_xdr_dec_res(struct rpc_rqst *req,
req               131 fs/lockd/clntlock.c int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
req               157 fs/lockd/clntlock.c 	req->a_res.status = block->b_status;
req               233 fs/lockd/clntlock.c 	struct nlm_rqst   *req;
req               238 fs/lockd/clntlock.c 	req = kmalloc(sizeof(*req), GFP_KERNEL);
req               239 fs/lockd/clntlock.c 	if (!req)
req               271 fs/lockd/clntlock.c 		if (nlmclnt_reclaim(host, fl, req) != 0)
req               297 fs/lockd/clntlock.c 	kfree(req);
req               126 fs/lockd/clntproc.c static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
req               128 fs/lockd/clntproc.c 	struct nlm_args	*argp = &req->a_args;
req               130 fs/lockd/clntproc.c 	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
req               135 fs/lockd/clntproc.c 	lock->oh.data = req->a_owner;
req               136 fs/lockd/clntproc.c 	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
req               145 fs/lockd/clntproc.c static void nlmclnt_release_lockargs(struct nlm_rqst *req)
req               147 fs/lockd/clntproc.c 	WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
req               261 fs/lockd/clntproc.c nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
req               263 fs/lockd/clntproc.c 	struct nlm_host	*host = req->a_host;
req               265 fs/lockd/clntproc.c 	struct nlm_args	*argp = &req->a_args;
req               266 fs/lockd/clntproc.c 	struct nlm_res	*resp = &req->a_res;
req               339 fs/lockd/clntproc.c static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
req               341 fs/lockd/clntproc.c 	struct nlm_host	*host = req->a_host;
req               346 fs/lockd/clntproc.c 		.callback_data = req,
req               363 fs/lockd/clntproc.c 	tk_ops->rpc_release(req);
req               367 fs/lockd/clntproc.c static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
req               371 fs/lockd/clntproc.c 	task = __nlm_async_call(req, proc, msg, tk_ops);
req               381 fs/lockd/clntproc.c int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
req               384 fs/lockd/clntproc.c 		.rpc_argp	= &req->a_args,
req               385 fs/lockd/clntproc.c 		.rpc_resp	= &req->a_res,
req               387 fs/lockd/clntproc.c 	return nlm_do_async_call(req, proc, &msg, tk_ops);
req               390 fs/lockd/clntproc.c int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
req               393 fs/lockd/clntproc.c 		.rpc_argp	= &req->a_res,
req               395 fs/lockd/clntproc.c 	return nlm_do_async_call(req, proc, &msg, tk_ops);
req               406 fs/lockd/clntproc.c static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
req               409 fs/lockd/clntproc.c 		.rpc_argp	= &req->a_args,
req               410 fs/lockd/clntproc.c 		.rpc_resp	= &req->a_res,
req               416 fs/lockd/clntproc.c 	task = __nlm_async_call(req, proc, &msg, tk_ops);
req               428 fs/lockd/clntproc.c nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
req               432 fs/lockd/clntproc.c 	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
req               436 fs/lockd/clntproc.c 	switch (req->a_res.status) {
req               444 fs/lockd/clntproc.c 			fl->fl_start = req->a_res.lock.fl.fl_start;
req               445 fs/lockd/clntproc.c 			fl->fl_end = req->a_res.lock.fl.fl_end;
req               446 fs/lockd/clntproc.c 			fl->fl_type = req->a_res.lock.fl.fl_type;
req               447 fs/lockd/clntproc.c 			fl->fl_pid = -req->a_res.lock.fl.fl_pid;
req               450 fs/lockd/clntproc.c 			status = nlm_stat_to_errno(req->a_res.status);
req               453 fs/lockd/clntproc.c 	nlmclnt_release_call(req);
req               513 fs/lockd/clntproc.c nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
req               516 fs/lockd/clntproc.c 	struct nlm_host	*host = req->a_host;
req               517 fs/lockd/clntproc.c 	struct nlm_res	*resp = &req->a_res;
req               525 fs/lockd/clntproc.c 	req->a_args.state = nsm_local_state;
req               543 fs/lockd/clntproc.c 		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
req               552 fs/lockd/clntproc.c 		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
req               563 fs/lockd/clntproc.c 		if (!req->a_args.block)
req               565 fs/lockd/clntproc.c 		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
req               598 fs/lockd/clntproc.c 	nlmclnt_release_call(req);
req               612 fs/lockd/clntproc.c 	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
req               621 fs/lockd/clntproc.c 		struct nlm_rqst *req)
req               625 fs/lockd/clntproc.c 	memset(req, 0, sizeof(*req));
req               626 fs/lockd/clntproc.c 	locks_init_lock(&req->a_args.lock.fl);
req               627 fs/lockd/clntproc.c 	locks_init_lock(&req->a_res.lock.fl);
req               628 fs/lockd/clntproc.c 	req->a_host  = host;
req               631 fs/lockd/clntproc.c 	nlmclnt_setlockargs(req, fl);
req               632 fs/lockd/clntproc.c 	req->a_args.reclaim = 1;
req               634 fs/lockd/clntproc.c 	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
req               635 fs/lockd/clntproc.c 	if (status >= 0 && req->a_res.status == nlm_granted)
req               640 fs/lockd/clntproc.c 				status, ntohl(req->a_res.status));
req               661 fs/lockd/clntproc.c nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
req               663 fs/lockd/clntproc.c 	struct nlm_host	*host = req->a_host;
req               664 fs/lockd/clntproc.c 	struct nlm_res	*resp = &req->a_res;
req               683 fs/lockd/clntproc.c 	refcount_inc(&req->a_count);
req               684 fs/lockd/clntproc.c 	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
req               698 fs/lockd/clntproc.c 	nlmclnt_release_call(req);
req               704 fs/lockd/clntproc.c 	struct nlm_rqst	*req = data;
req               705 fs/lockd/clntproc.c 	const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
req               709 fs/lockd/clntproc.c 		defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
req               717 fs/lockd/clntproc.c 	struct nlm_rqst	*req = data;
req               718 fs/lockd/clntproc.c 	u32 status = ntohl(req->a_res.status);
req               742 fs/lockd/clntproc.c 	nlm_rebind_host(req->a_host);
req               760 fs/lockd/clntproc.c 	struct nlm_rqst	*req;
req               766 fs/lockd/clntproc.c 	req = nlm_alloc_call(host);
req               767 fs/lockd/clntproc.c 	if (!req)
req               769 fs/lockd/clntproc.c 	req->a_flags = RPC_TASK_ASYNC;
req               771 fs/lockd/clntproc.c 	nlmclnt_setlockargs(req, fl);
req               772 fs/lockd/clntproc.c 	req->a_args.block = block;
req               774 fs/lockd/clntproc.c 	refcount_inc(&req->a_count);
req               775 fs/lockd/clntproc.c 	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
req               777 fs/lockd/clntproc.c 	if (status == 0 && req->a_res.status == nlm_lck_denied)
req               779 fs/lockd/clntproc.c 	nlmclnt_release_call(req);
req               785 fs/lockd/clntproc.c 	struct nlm_rqst	*req = data;
req               786 fs/lockd/clntproc.c 	u32 status = ntohl(req->a_res.status);
req               819 fs/lockd/clntproc.c 	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
req               821 fs/lockd/clntproc.c 	nlm_rebind_host(req->a_host);
req               352 fs/lockd/clntxdr.c static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
req               374 fs/lockd/clntxdr.c static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
req               397 fs/lockd/clntxdr.c static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
req               416 fs/lockd/clntxdr.c static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
req               433 fs/lockd/clntxdr.c static void nlm_xdr_enc_res(struct rpc_rqst *req,
req               463 fs/lockd/clntxdr.c static void nlm_xdr_enc_testres(struct rpc_rqst *req,
req               509 fs/lockd/clntxdr.c static int nlm_xdr_dec_testres(struct rpc_rqst *req,
req               530 fs/lockd/clntxdr.c static int nlm_xdr_dec_res(struct rpc_rqst *req,
req               481 fs/lockd/mon.c static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
req               488 fs/lockd/mon.c static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
req               825 fs/nfs/blocklayout/blocklayout.c 		struct nfs_page *req, unsigned int alignment, bool is_write)
req               834 fs/nfs/blocklayout/blocklayout.c 	if (!IS_ALIGNED(req->wb_offset, alignment))
req               837 fs/nfs/blocklayout/blocklayout.c 	if (IS_ALIGNED(req->wb_bytes, alignment))
req               841 fs/nfs/blocklayout/blocklayout.c 	    (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
req               856 fs/nfs/blocklayout/blocklayout.c bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
req               858 fs/nfs/blocklayout/blocklayout.c 	if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
req               863 fs/nfs/blocklayout/blocklayout.c 	pnfs_generic_pg_init_read(pgio, req);
req               879 fs/nfs/blocklayout/blocklayout.c 		struct nfs_page *req)
req               881 fs/nfs/blocklayout/blocklayout.c 	if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
req               883 fs/nfs/blocklayout/blocklayout.c 	return pnfs_generic_pg_test(pgio, prev, req);
req               910 fs/nfs/blocklayout/blocklayout.c bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
req               914 fs/nfs/blocklayout/blocklayout.c 	if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
req               921 fs/nfs/blocklayout/blocklayout.c 					      req->wb_index);
req               925 fs/nfs/blocklayout/blocklayout.c 	pnfs_generic_pg_init_write(pgio, req, wb_size);
req               942 fs/nfs/blocklayout/blocklayout.c 		 struct nfs_page *req)
req               944 fs/nfs/blocklayout/blocklayout.c 	if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
req               946 fs/nfs/blocklayout/blocklayout.c 	return pnfs_generic_pg_test(pgio, prev, req);
req                84 fs/nfs/cache_lib.c static struct cache_deferred_req *nfs_dns_cache_defer(struct cache_req *req)
req                88 fs/nfs/cache_lib.c 	dreq = container_of(req, struct nfs_cache_defer_req, req);
req               103 fs/nfs/cache_lib.c 		dreq->req.defer = nfs_dns_cache_defer;
req                16 fs/nfs/cache_lib.h 	struct cache_req req;
req               109 fs/nfs/callback.c 	struct rpc_rqst *req;
req               123 fs/nfs/callback.c 			req = list_first_entry(&serv->sv_cb_list,
req               125 fs/nfs/callback.c 			list_del(&req->rq_bc_list);
req               129 fs/nfs/callback.c 			error = bc_svc_process(serv, req, rqstp);
req               401 fs/nfs/direct.c 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
req               402 fs/nfs/direct.c 		struct page *page = req->wb_page;
req               407 fs/nfs/direct.c 		bytes += req->wb_bytes;
req               408 fs/nfs/direct.c 		nfs_list_remove_request(req);
req               409 fs/nfs/direct.c 		nfs_release_request(req);
req               419 fs/nfs/direct.c 	struct nfs_page *req;
req               422 fs/nfs/direct.c 		req = nfs_list_entry(head->next);
req               423 fs/nfs/direct.c 		nfs_list_remove_request(req);
req               424 fs/nfs/direct.c 		nfs_release_request(req);
req               478 fs/nfs/direct.c 			struct nfs_page *req;
req               481 fs/nfs/direct.c 			req = nfs_create_request(dreq->ctx, pagevec[i],
req               483 fs/nfs/direct.c 			if (IS_ERR(req)) {
req               484 fs/nfs/direct.c 				result = PTR_ERR(req);
req               487 fs/nfs/direct.c 			req->wb_index = pos >> PAGE_SHIFT;
req               488 fs/nfs/direct.c 			req->wb_offset = pos & ~PAGE_MASK;
req               489 fs/nfs/direct.c 			if (!nfs_pageio_add_request(&desc, req)) {
req               491 fs/nfs/direct.c 				nfs_release_request(req);
req               625 fs/nfs/direct.c 	struct nfs_page *req, *tmp;
req               635 fs/nfs/direct.c 	list_for_each_entry(req, &reqs, wb_list)
req               636 fs/nfs/direct.c 		dreq->max_count += req->wb_bytes;
req               645 fs/nfs/direct.c 	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
req               647 fs/nfs/direct.c 		req->wb_nio++;
req               648 fs/nfs/direct.c 		if (!nfs_pageio_add_request(&desc, req)) {
req               649 fs/nfs/direct.c 			nfs_list_move_request(req, &failed);
req               658 fs/nfs/direct.c 		nfs_release_request(req);
req               663 fs/nfs/direct.c 		req = nfs_list_entry(failed.next);
req               664 fs/nfs/direct.c 		nfs_list_remove_request(req);
req               665 fs/nfs/direct.c 		nfs_unlock_and_release_request(req);
req               676 fs/nfs/direct.c 	struct nfs_page *req;
req               684 fs/nfs/direct.c 		req = nfs_list_entry(data->pages.next);
req               685 fs/nfs/direct.c 		nfs_list_remove_request(req);
req               691 fs/nfs/direct.c 			req->wb_nio = 0;
req               693 fs/nfs/direct.c 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
req               695 fs/nfs/direct.c 			nfs_release_request(req);
req               696 fs/nfs/direct.c 		nfs_unlock_and_release_request(req);
req               704 fs/nfs/direct.c 		struct nfs_page *req)
req               711 fs/nfs/direct.c 	nfs_mark_request_commit(req, NULL, cinfo, 0);
req               761 fs/nfs/direct.c 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
req               792 fs/nfs/direct.c 		req = nfs_list_entry(hdr->pages.next);
req               793 fs/nfs/direct.c 		nfs_list_remove_request(req);
req               795 fs/nfs/direct.c 			kref_get(&req->wb_kref);
req               796 fs/nfs/direct.c 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
req               799 fs/nfs/direct.c 		nfs_unlock_and_release_request(req);
req               810 fs/nfs/direct.c 	struct nfs_page *req;
req               813 fs/nfs/direct.c 		req = nfs_list_entry(head->next);
req               814 fs/nfs/direct.c 		nfs_list_remove_request(req);
req               815 fs/nfs/direct.c 		nfs_unlock_and_release_request(req);
req               884 fs/nfs/direct.c 			struct nfs_page *req;
req               887 fs/nfs/direct.c 			req = nfs_create_request(dreq->ctx, pagevec[i],
req               889 fs/nfs/direct.c 			if (IS_ERR(req)) {
req               890 fs/nfs/direct.c 				result = PTR_ERR(req);
req               895 fs/nfs/direct.c 				nfs_free_request(req);
req               900 fs/nfs/direct.c 			nfs_lock_request(req);
req               901 fs/nfs/direct.c 			req->wb_index = pos >> PAGE_SHIFT;
req               902 fs/nfs/direct.c 			req->wb_offset = pos & ~PAGE_MASK;
req               903 fs/nfs/direct.c 			if (!nfs_pageio_add_request(&desc, req)) {
req               905 fs/nfs/direct.c 				nfs_unlock_and_release_request(req);
req               290 fs/nfs/dns_resolve.c 		ret = cache_check(cd, &(*item)->h, &dreq->req);
req               848 fs/nfs/filelayout/filelayout.c 		   struct nfs_page *req)
req               857 fs/nfs/filelayout/filelayout.c 	size = pnfs_generic_pg_test(pgio, prev, req);
req               864 fs/nfs/filelayout/filelayout.c 		r_stripe = (u64)req_offset(req) - segment_offset;
req               873 fs/nfs/filelayout/filelayout.c 	div_u64_rem((u64)req_offset(req) - segment_offset,
req               915 fs/nfs/filelayout/filelayout.c 			struct nfs_page *req)
req               920 fs/nfs/filelayout/filelayout.c 						      nfs_req_openctx(req),
req               939 fs/nfs/filelayout/filelayout.c 			 struct nfs_page *req)
req               947 fs/nfs/filelayout/filelayout.c 						      nfs_req_openctx(req),
req               998 fs/nfs/filelayout/filelayout.c filelayout_mark_request_commit(struct nfs_page *req,
req              1008 fs/nfs/filelayout/filelayout.c 		nfs_request_add_commit_list(req, cinfo);
req              1016 fs/nfs/filelayout/filelayout.c 		j = nfs4_fl_calc_j_index(lseg, req_offset(req));
req              1018 fs/nfs/filelayout/filelayout.c 		pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
req               873 fs/nfs/flexfilelayout/flexfilelayout.c 		      struct nfs_page *req,
req               878 fs/nfs/flexfilelayout/flexfilelayout.c 					   nfs_req_openctx(req),
req               892 fs/nfs/flexfilelayout/flexfilelayout.c 			struct nfs_page *req)
req               903 fs/nfs/flexfilelayout/flexfilelayout.c 		ff_layout_pg_get_read(pgio, req, false);
req               908 fs/nfs/flexfilelayout/flexfilelayout.c 		ff_layout_pg_get_read(pgio, req, true);
req               952 fs/nfs/flexfilelayout/flexfilelayout.c 			struct nfs_page *req)
req               965 fs/nfs/flexfilelayout/flexfilelayout.c 						   nfs_req_openctx(req),
req              1025 fs/nfs/flexfilelayout/flexfilelayout.c 				    struct nfs_page *req)
req              1029 fs/nfs/flexfilelayout/flexfilelayout.c 						   nfs_req_openctx(req),
req              1673 fs/nfs/flexfilelayout/flexfilelayout.c 	struct nfs_page *req;
req              1680 fs/nfs/flexfilelayout/flexfilelayout.c 		list_for_each_entry(req, &cdata->pages, wb_list)
req              1681 fs/nfs/flexfilelayout/flexfilelayout.c 			count += req->wb_bytes;
req               254 fs/nfs/internal.h void nfs_free_request(struct nfs_page *req);
req               487 fs/nfs/internal.h void nfs_mark_request_commit(struct nfs_page *req,
req               500 fs/nfs/internal.h void nfs_request_add_commit_list(struct nfs_page *req,
req               502 fs/nfs/internal.h void nfs_request_add_commit_list_locked(struct nfs_page *req,
req               505 fs/nfs/internal.h void nfs_request_remove_commit_list(struct nfs_page *req,
req               310 fs/nfs/mount_clnt.c static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
req               362 fs/nfs/mount_clnt.c static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
req               455 fs/nfs/mount_clnt.c static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
req               557 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
req               574 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
req               581 fs/nfs/nfs2xdr.c 	encode_sattr(xdr, args->sattr, rpc_rqst_userns(req));
req               584 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
req               593 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
req               600 fs/nfs/nfs2xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req               629 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
req               636 fs/nfs/nfs2xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req               638 fs/nfs/nfs2xdr.c 	req->rq_rcv_buf.flags |= XDRBUF_READ;
req               671 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
req               689 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
req               696 fs/nfs/nfs2xdr.c 	encode_sattr(xdr, args->sattr, rpc_rqst_userns(req));
req               699 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
req               716 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
req               736 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
req               755 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
req               763 fs/nfs/nfs2xdr.c 	encode_sattr(xdr, args->sattr, rpc_rqst_userns(req));
req               787 fs/nfs/nfs2xdr.c static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
req               794 fs/nfs/nfs2xdr.c 	rpc_prepare_reply_pages(req, args->pages, 0,
req               805 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
req               822 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
req               825 fs/nfs/nfs2xdr.c 	return decode_attrstat(xdr, result, NULL, rpc_rqst_userns(req));
req               828 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
req               831 fs/nfs/nfs2xdr.c 	return decode_diropres(xdr, result, rpc_rqst_userns(req));
req               844 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
req               873 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
req               886 fs/nfs/nfs2xdr.c 	error = decode_fattr(xdr, result->fattr, rpc_rqst_userns(req));
req               896 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
req               904 fs/nfs/nfs2xdr.c 			rpc_rqst_userns(req));
req               995 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
req              1044 fs/nfs/nfs2xdr.c static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
req               812 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
req               852 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
req               858 fs/nfs/nfs3xdr.c 	encode_sattr3(xdr, args->sattr, rpc_rqst_userns(req));
req               869 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
req               893 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
req               909 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
req               916 fs/nfs/nfs3xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req               941 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
req               949 fs/nfs/nfs3xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req               951 fs/nfs/nfs3xdr.c 	req->rq_rcv_buf.flags |= XDRBUF_READ;
req               986 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
req              1036 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
req              1043 fs/nfs/nfs3xdr.c 	encode_createhow3(xdr, args, rpc_rqst_userns(req));
req              1054 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
req              1061 fs/nfs/nfs3xdr.c 	encode_sattr3(xdr, args->sattr, rpc_rqst_userns(req));
req              1087 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
req              1094 fs/nfs/nfs3xdr.c 	encode_symlinkdata3(xdr, args, rpc_rqst_userns(req));
req              1152 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
req              1159 fs/nfs/nfs3xdr.c 	encode_mknoddata3(xdr, args, rpc_rqst_userns(req));
req              1169 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
req              1186 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
req              1206 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
req              1239 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
req              1246 fs/nfs/nfs3xdr.c 	rpc_prepare_reply_pages(req, args->pages, 0,
req              1281 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
req              1288 fs/nfs/nfs3xdr.c 	rpc_prepare_reply_pages(req, args->pages, 0,
req              1313 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
req              1324 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
req              1333 fs/nfs/nfs3xdr.c 		rpc_prepare_reply_pages(req, args->pages, 0,
req              1336 fs/nfs/nfs3xdr.c 		req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
req              1340 fs/nfs/nfs3xdr.c static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
req              1351 fs/nfs/nfs3xdr.c 	base = req->rq_slen;
req              1392 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
req              1404 fs/nfs/nfs3xdr.c 	error = decode_fattr3(xdr, result, rpc_rqst_userns(req));
req              1429 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
req              1439 fs/nfs/nfs3xdr.c 	error = decode_wcc_data(xdr, result, rpc_rqst_userns(req));
req              1470 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
req              1474 fs/nfs/nfs3xdr.c 	struct user_namespace *userns = rpc_rqst_userns(req);
req              1519 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
req              1530 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result->fattr, rpc_rqst_userns(req));
req              1561 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
req              1571 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result, rpc_rqst_userns(req));
req              1637 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
req              1649 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result->fattr, rpc_rqst_userns(req));
req              1710 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
req              1720 fs/nfs/nfs3xdr.c 	error = decode_wcc_data(xdr, result->fattr, rpc_rqst_userns(req));
req              1776 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
req              1780 fs/nfs/nfs3xdr.c 	struct user_namespace *userns = rpc_rqst_userns(req);
req              1818 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
req              1829 fs/nfs/nfs3xdr.c 	error = decode_wcc_data(xdr, result->dir_attr, rpc_rqst_userns(req));
req              1860 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
req              1864 fs/nfs/nfs3xdr.c 	struct user_namespace *userns = rpc_rqst_userns(req);
req              1906 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
req              1909 fs/nfs/nfs3xdr.c 	struct user_namespace *userns = rpc_rqst_userns(req);
req              2093 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
req              2106 fs/nfs/nfs3xdr.c 	error = decode_readdir3resok(xdr, result, rpc_rqst_userns(req));
req              2110 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result->dir_attr, rpc_rqst_userns(req));
req              2159 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
req              2170 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result->fattr, rpc_rqst_userns(req));
req              2233 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
req              2244 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result->fattr, rpc_rqst_userns(req));
req              2294 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
req              2305 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result->fattr, rpc_rqst_userns(req));
req              2336 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
req              2348 fs/nfs/nfs3xdr.c 	error = decode_wcc_data(xdr, result->fattr, rpc_rqst_userns(req));
req              2410 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
req              2422 fs/nfs/nfs3xdr.c 	error = decode_getacl3resok(xdr, result, rpc_rqst_userns(req));
req              2429 fs/nfs/nfs3xdr.c static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
req              2441 fs/nfs/nfs3xdr.c 	error = decode_post_op_attr(xdr, result, rpc_rqst_userns(req));
req               286 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
req               295 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               318 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
req               327 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               341 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
req               350 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               360 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
req               369 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               380 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
req               389 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               399 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
req               410 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               422 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_clone(struct rpc_rqst *req,
req               431 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req               444 fs/nfs/nfs42xdr.c static void nfs4_xdr_enc_layouterror(struct rpc_rqst *req,
req               454 fs/nfs/nfs42xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req                74 fs/nfs/nfs4xdr.c static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
req              1018 fs/nfs/nfs4xdr.c 				struct rpc_rqst *req,
req              1593 fs/nfs/nfs4xdr.c static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
req              1643 fs/nfs/nfs4xdr.c static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr)
req              2117 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2125 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2137 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2145 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2157 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_lookupp(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2165 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2177 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
req              2186 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2197 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2205 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2215 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2223 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2235 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2243 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2257 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2265 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2277 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2282 fs/nfs/nfs4xdr.c 	nfs4_xdr_enc_create(req, xdr, args);
req              2288 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2296 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2306 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2314 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2328 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2336 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2346 fs/nfs/nfs4xdr.c 		rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0,
req              2356 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
req              2365 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2374 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
req              2383 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2392 fs/nfs/nfs4xdr.c 		rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0,
req              2402 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
req              2411 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2423 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2431 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2441 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2449 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2459 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2467 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2474 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
req              2483 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2491 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2499 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2502 fs/nfs/nfs4xdr.c 	encode_readlink(xdr, args, req, &hdr);
req              2504 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req              2512 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2520 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2523 fs/nfs/nfs4xdr.c 	encode_readdir(xdr, args, req, &hdr);
req              2525 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req              2533 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2541 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2546 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
req              2548 fs/nfs/nfs4xdr.c 	req->rq_rcv_buf.flags |= XDRBUF_READ;
req              2555 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2563 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2574 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2586 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2593 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, args->acl_pages, 0,
req              2601 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2609 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2613 fs/nfs/nfs4xdr.c 	req->rq_snd_buf.flags |= XDRBUF_WRITE;
req              2622 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2630 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2640 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2648 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2658 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2666 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2677 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2685 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2696 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
req              2706 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2716 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2725 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2733 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
req              2742 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2750 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
req              2759 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2767 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
req              2776 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2790 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
req              2800 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2815 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, (struct page **)&args->page, 0,
req              2823 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
req              2832 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2842 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
req              2851 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2864 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
req              2873 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2881 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
req              2890 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2898 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
req              2907 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2915 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
req              2924 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2932 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
req              2941 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2949 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
req              2957 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2967 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
req              2977 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              2989 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
req              2998 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3007 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
req              3016 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3022 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase,
req              3030 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
req              3039 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3044 fs/nfs/nfs4xdr.c 	rpc_prepare_reply_pages(req, args->layout.pages, 0,
req              3052 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
req              3063 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3074 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
req              3083 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3093 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
req              3102 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3112 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
req              3121 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              3130 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
req              3139 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              5160 fs/nfs/nfs4xdr.c static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req,
req              5187 fs/nfs/nfs4xdr.c static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
req              5203 fs/nfs/nfs4xdr.c static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
req              5205 fs/nfs/nfs4xdr.c 	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
req              5278 fs/nfs/nfs4xdr.c static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
req              5856 fs/nfs/nfs4xdr.c static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
req              5948 fs/nfs/nfs4xdr.c 			       struct rpc_rqst *req,
req              6014 fs/nfs/nfs4xdr.c static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
req              6333 fs/nfs/nfs4xdr.c static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
req              6341 fs/nfs/nfs4xdr.c 	encode_compound_hdr(xdr, req, &hdr);
req              6773 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
req              6782 fs/nfs/nfs4xdr.c 		status = decode_sequence(xdr, &res->seq_res, req);
req              6793 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
req              6802 fs/nfs/nfs4xdr.c 		status = decode_sequence(xdr, &res->seq_res, req);
req              6813 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
req              6822 fs/nfs/nfs4xdr.c 		status = decode_sequence(xdr, &res->seq_res, req);
req              6833 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
req              6844 fs/nfs/nfs4xdr.c 	status = decode_sequence(xdr, &res->seq_res, req);
req              6873 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
req              6890 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
req              6942 fs/nfs/nfs4xdr.c static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
req              6953 fs/nfs/nfs4xdr.c 	status = decode_sequence(xdr, &res->seq_res, req);
req                49 fs/nfs/pagelist.c 	hdr->req = nfs_list_entry(mirror->pg_list.next);
req                51 fs/nfs/pagelist.c 	hdr->cred = nfs_req_openctx(hdr->req)->cred;
req                52 fs/nfs/pagelist.c 	hdr->io_start = req_offset(hdr->req);
req               145 fs/nfs/pagelist.c nfs_page_group_lock(struct nfs_page *req)
req               147 fs/nfs/pagelist.c 	struct nfs_page *head = req->wb_head;
req               165 fs/nfs/pagelist.c nfs_page_group_unlock(struct nfs_page *req)
req               167 fs/nfs/pagelist.c 	struct nfs_page *head = req->wb_head;
req               185 fs/nfs/pagelist.c nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
req               187 fs/nfs/pagelist.c 	struct nfs_page *head = req->wb_head;
req               191 fs/nfs/pagelist.c 	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
req               193 fs/nfs/pagelist.c 	tmp = req->wb_this_page;
req               194 fs/nfs/pagelist.c 	while (tmp != req) {
req               201 fs/nfs/pagelist.c 	tmp = req;
req               205 fs/nfs/pagelist.c 	} while (tmp != req);
req               216 fs/nfs/pagelist.c bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
req               220 fs/nfs/pagelist.c 	nfs_page_group_lock(req);
req               221 fs/nfs/pagelist.c 	ret = nfs_page_group_sync_on_bit_locked(req, bit);
req               222 fs/nfs/pagelist.c 	nfs_page_group_unlock(req);
req               234 fs/nfs/pagelist.c nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
req               237 fs/nfs/pagelist.c 	WARN_ON_ONCE(prev == req);
req               241 fs/nfs/pagelist.c 		req->wb_head = req;
req               242 fs/nfs/pagelist.c 		req->wb_this_page = req;
req               247 fs/nfs/pagelist.c 		req->wb_head = prev->wb_head;
req               248 fs/nfs/pagelist.c 		req->wb_this_page = prev->wb_this_page;
req               249 fs/nfs/pagelist.c 		prev->wb_this_page = req;
req               253 fs/nfs/pagelist.c 		kref_get(&req->wb_head->wb_kref);
req               259 fs/nfs/pagelist.c 			inode = page_file_mapping(req->wb_page)->host;
req               260 fs/nfs/pagelist.c 			set_bit(PG_INODE_REF, &req->wb_flags);
req               261 fs/nfs/pagelist.c 			kref_get(&req->wb_kref);
req               277 fs/nfs/pagelist.c 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
req               278 fs/nfs/pagelist.c 	struct nfs_page *head = req->wb_head;
req               281 fs/nfs/pagelist.c 	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
req               284 fs/nfs/pagelist.c 	tmp = req;
req               292 fs/nfs/pagelist.c 	} while (tmp != req);
req               295 fs/nfs/pagelist.c 	if (head != req)
req               304 fs/nfs/pagelist.c 	struct nfs_page		*req;
req               310 fs/nfs/pagelist.c 	req = nfs_page_alloc();
req               311 fs/nfs/pagelist.c 	if (req == NULL)
req               314 fs/nfs/pagelist.c 	req->wb_lock_context = l_ctx;
req               321 fs/nfs/pagelist.c 	req->wb_page    = page;
req               323 fs/nfs/pagelist.c 		req->wb_index = page_index(page);
req               326 fs/nfs/pagelist.c 	req->wb_offset  = offset;
req               327 fs/nfs/pagelist.c 	req->wb_pgbase	= pgbase;
req               328 fs/nfs/pagelist.c 	req->wb_bytes   = count;
req               329 fs/nfs/pagelist.c 	kref_init(&req->wb_kref);
req               330 fs/nfs/pagelist.c 	req->wb_nio = 0;
req               331 fs/nfs/pagelist.c 	return req;
req               362 fs/nfs/pagelist.c nfs_create_subreq(struct nfs_page *req, struct nfs_page *last,
req               368 fs/nfs/pagelist.c 	ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
req               372 fs/nfs/pagelist.c 		ret->wb_index = req->wb_index;
req               374 fs/nfs/pagelist.c 		ret->wb_nio = req->wb_nio;
req               383 fs/nfs/pagelist.c void nfs_unlock_request(struct nfs_page *req)
req               385 fs/nfs/pagelist.c 	if (!NFS_WBACK_BUSY(req)) {
req               390 fs/nfs/pagelist.c 	clear_bit(PG_BUSY, &req->wb_flags);
req               392 fs/nfs/pagelist.c 	if (!test_bit(PG_CONTENDED2, &req->wb_flags))
req               394 fs/nfs/pagelist.c 	wake_up_bit(&req->wb_flags, PG_BUSY);
req               401 fs/nfs/pagelist.c void nfs_unlock_and_release_request(struct nfs_page *req)
req               403 fs/nfs/pagelist.c 	nfs_unlock_request(req);
req               404 fs/nfs/pagelist.c 	nfs_release_request(req);
req               414 fs/nfs/pagelist.c static void nfs_clear_request(struct nfs_page *req)
req               416 fs/nfs/pagelist.c 	struct page *page = req->wb_page;
req               417 fs/nfs/pagelist.c 	struct nfs_lock_context *l_ctx = req->wb_lock_context;
req               422 fs/nfs/pagelist.c 		req->wb_page = NULL;
req               432 fs/nfs/pagelist.c 		req->wb_lock_context = NULL;
req               442 fs/nfs/pagelist.c void nfs_free_request(struct nfs_page *req)
req               444 fs/nfs/pagelist.c 	WARN_ON_ONCE(req->wb_this_page != req);
req               447 fs/nfs/pagelist.c 	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
req               448 fs/nfs/pagelist.c 	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
req               449 fs/nfs/pagelist.c 	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
req               450 fs/nfs/pagelist.c 	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
req               451 fs/nfs/pagelist.c 	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
req               454 fs/nfs/pagelist.c 	nfs_clear_request(req);
req               455 fs/nfs/pagelist.c 	nfs_page_free(req);
req               458 fs/nfs/pagelist.c void nfs_release_request(struct nfs_page *req)
req               460 fs/nfs/pagelist.c 	kref_put(&req->wb_kref, nfs_page_group_destroy);
req               472 fs/nfs/pagelist.c nfs_wait_on_request(struct nfs_page *req)
req               474 fs/nfs/pagelist.c 	if (!test_bit(PG_BUSY, &req->wb_flags))
req               476 fs/nfs/pagelist.c 	set_bit(PG_CONTENDED2, &req->wb_flags);
req               478 fs/nfs/pagelist.c 	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
req               493 fs/nfs/pagelist.c 			   struct nfs_page *prev, struct nfs_page *req)
req               508 fs/nfs/pagelist.c 	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
req               512 fs/nfs/pagelist.c 	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
req               566 fs/nfs/pagelist.c 	struct nfs_page *req = hdr->req;
req               572 fs/nfs/pagelist.c 	hdr->args.offset = req_offset(req);
req               575 fs/nfs/pagelist.c 	hdr->args.pgbase = req->wb_pgbase;
req               578 fs/nfs/pagelist.c 	hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
req               579 fs/nfs/pagelist.c 	hdr->args.lock_context = req->wb_lock_context;
req               763 fs/nfs/pagelist.c 	struct nfs_page		*req;
req               792 fs/nfs/pagelist.c 		req = nfs_list_entry(head->next);
req               793 fs/nfs/pagelist.c 		nfs_list_move_request(req, &hdr->pages);
req               795 fs/nfs/pagelist.c 		if (!last_page || last_page != req->wb_page) {
req               799 fs/nfs/pagelist.c 			*pages++ = last_page = req->wb_page;
req               866 fs/nfs/pagelist.c 				       struct nfs_page *req)
req               871 fs/nfs/pagelist.c 		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
req               917 fs/nfs/pagelist.c 				      struct nfs_page *req,
req               924 fs/nfs/pagelist.c 		if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
req               926 fs/nfs/pagelist.c 		flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
req               930 fs/nfs/pagelist.c 		    !nfs_match_lock_context(req->wb_lock_context,
req               933 fs/nfs/pagelist.c 		if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
req               935 fs/nfs/pagelist.c 		if (req->wb_page == prev->wb_page) {
req               936 fs/nfs/pagelist.c 			if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
req               939 fs/nfs/pagelist.c 			if (req->wb_pgbase != 0 ||
req               944 fs/nfs/pagelist.c 	size = pgio->pg_ops->pg_test(pgio, prev, req);
req               945 fs/nfs/pagelist.c 	WARN_ON_ONCE(size > req->wb_bytes);
req               946 fs/nfs/pagelist.c 	if (size && size < req->wb_bytes)
req               947 fs/nfs/pagelist.c 		req->wb_bytes = size;
req               960 fs/nfs/pagelist.c 				     struct nfs_page *req)
req               970 fs/nfs/pagelist.c 			desc->pg_ops->pg_init(desc, req);
req               973 fs/nfs/pagelist.c 		mirror->pg_base = req->wb_pgbase;
req               976 fs/nfs/pagelist.c 	if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
req               984 fs/nfs/pagelist.c 	if (!nfs_can_coalesce_requests(prev, req, desc))
req               986 fs/nfs/pagelist.c 	nfs_list_move_request(req, &mirror->pg_list);
req               987 fs/nfs/pagelist.c 	mirror->pg_count += req->wb_bytes;
req              1014 fs/nfs/pagelist.c 		struct nfs_page *req)
req              1018 fs/nfs/pagelist.c 	nfs_list_move_request(req, &head);
req              1034 fs/nfs/pagelist.c 			   struct nfs_page *req)
req              1042 fs/nfs/pagelist.c 	nfs_page_group_lock(req);
req              1044 fs/nfs/pagelist.c 	subreq = req;
req              1056 fs/nfs/pagelist.c 			nfs_page_group_unlock(req);
req              1062 fs/nfs/pagelist.c 			nfs_page_group_lock(req);
req              1076 fs/nfs/pagelist.c 			subreq = nfs_create_subreq(req, subreq, pgbase,
req              1083 fs/nfs/pagelist.c 	nfs_page_group_unlock(req);
req              1087 fs/nfs/pagelist.c 	nfs_page_group_unlock(req);
req              1090 fs/nfs/pagelist.c 	if (req != subreq)
req              1108 fs/nfs/pagelist.c 			struct nfs_page *req;
req              1110 fs/nfs/pagelist.c 			req = list_first_entry(&head, struct nfs_page, wb_list);
req              1111 fs/nfs/pagelist.c 			if (__nfs_pageio_add_request(desc, req))
req              1125 fs/nfs/pagelist.c 		struct nfs_page *req)
req              1130 fs/nfs/pagelist.c 		ret = __nfs_pageio_add_request(desc, req);
req              1157 fs/nfs/pagelist.c 			   struct nfs_page *req)
req              1163 fs/nfs/pagelist.c 	pgbase = req->wb_pgbase;
req              1164 fs/nfs/pagelist.c 	offset = req->wb_offset;
req              1165 fs/nfs/pagelist.c 	bytes = req->wb_bytes;
req              1167 fs/nfs/pagelist.c 	nfs_pageio_setup_mirroring(desc, req);
req              1173 fs/nfs/pagelist.c 		nfs_page_group_lock(req);
req              1176 fs/nfs/pagelist.c 		for (lastreq = req->wb_head;
req              1177 fs/nfs/pagelist.c 		     lastreq->wb_this_page != req->wb_head;
req              1181 fs/nfs/pagelist.c 		dupreq = nfs_create_subreq(req, lastreq,
req              1184 fs/nfs/pagelist.c 		nfs_page_group_unlock(req);
req              1196 fs/nfs/pagelist.c 	if (!nfs_pageio_add_request_mirror(desc, req))
req              1251 fs/nfs/pagelist.c 		struct nfs_page *req = nfs_list_entry(pages.next);
req              1253 fs/nfs/pagelist.c 		if (!nfs_pageio_add_request(desc, req))
req              2483 fs/nfs/pnfs.c  pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
req              2485 fs/nfs/pnfs.c  	if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
req              2492 fs/nfs/pnfs.c  pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
req              2494 fs/nfs/pnfs.c  	u64 rd_size = req->wb_bytes;
req              2497 fs/nfs/pnfs.c  	pnfs_generic_pg_check_range(pgio, req);
req              2500 fs/nfs/pnfs.c  			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
req              2505 fs/nfs/pnfs.c  						   nfs_req_openctx(req),
req              2506 fs/nfs/pnfs.c  						   req_offset(req),
req              2526 fs/nfs/pnfs.c  			   struct nfs_page *req, u64 wb_size)
req              2529 fs/nfs/pnfs.c  	pnfs_generic_pg_check_range(pgio, req);
req              2532 fs/nfs/pnfs.c  						   nfs_req_openctx(req),
req              2533 fs/nfs/pnfs.c  						   req_offset(req),
req              2566 fs/nfs/pnfs.c  		     struct nfs_page *prev, struct nfs_page *req)
req              2571 fs/nfs/pnfs.c  	size = nfs_generic_pg_test(pgio, prev, req);
req              2589 fs/nfs/pnfs.c  		req_start = req_offset(req);
req               151 fs/nfs/pnfs.h  	void (*mark_request_commit) (struct nfs_page *req,
req               155 fs/nfs/pnfs.h  	void (*clear_request_commit) (struct nfs_page *req,
req               248 fs/nfs/pnfs.h  			        struct nfs_page *req, u64 wb_size);
req               252 fs/nfs/pnfs.h  			    struct nfs_page *prev, struct nfs_page *req);
req               363 fs/nfs/pnfs.h  void pnfs_generic_clear_request_commit(struct nfs_page *req,
req               388 fs/nfs/pnfs.h  void pnfs_layout_mark_request_commit(struct nfs_page *req,
req               463 fs/nfs/pnfs.h  pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
req               466 fs/nfs/pnfs.h  	struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
req               471 fs/nfs/pnfs.h  	ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx);
req               476 fs/nfs/pnfs.h  pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
req               478 fs/nfs/pnfs.h  	struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
req               483 fs/nfs/pnfs.h  	ld->clear_request_commit(req, cinfo);
req               620 fs/nfs/pnfs.h  pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
req               623 fs/nfs/pnfs.h  	u64 req_last = req_offset(req) + req->wb_bytes;
req               626 fs/nfs/pnfs.h  				req_offset(req), req_last);
req               754 fs/nfs/pnfs.h  pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
req               761 fs/nfs/pnfs.h  pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
req                67 fs/nfs/pnfs_nfs.c pnfs_generic_clear_request_commit(struct nfs_page *req,
req                72 fs/nfs/pnfs_nfs.c 	if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
req                75 fs/nfs/pnfs_nfs.c 	if (list_is_singular(&req->wb_list)) {
req                78 fs/nfs/pnfs_nfs.c 		bucket = list_first_entry(&req->wb_list,
req                85 fs/nfs/pnfs_nfs.c 	nfs_request_remove_commit_list(req, cinfo);
req               927 fs/nfs/pnfs_nfs.c pnfs_layout_mark_request_commit(struct nfs_page *req,
req               941 fs/nfs/pnfs_nfs.c 			cinfo->completion_ops->resched_write(cinfo, req);
req               953 fs/nfs/pnfs_nfs.c 	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
req               956 fs/nfs/pnfs_nfs.c 	nfs_request_add_commit_list_locked(req, list, cinfo);
req               958 fs/nfs/pnfs_nfs.c 	nfs_mark_page_unstable(req->wb_page, cinfo);
req                94 fs/nfs/read.c  static void nfs_readpage_release(struct nfs_page *req, int error)
req                96 fs/nfs/read.c  	struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
req                97 fs/nfs/read.c  	struct page *page = req->wb_page;
req               100 fs/nfs/read.c  		(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
req               101 fs/nfs/read.c  		(long long)req_offset(req));
req               105 fs/nfs/read.c  	if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
req               114 fs/nfs/read.c  	nfs_release_request(req);
req               153 fs/nfs/read.c  static void nfs_page_group_set_uptodate(struct nfs_page *req)
req               155 fs/nfs/read.c  	if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
req               156 fs/nfs/read.c  		SetPageUptodate(req->wb_page);
req               167 fs/nfs/read.c  		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
req               168 fs/nfs/read.c  		struct page *page = req->wb_page;
req               169 fs/nfs/read.c  		unsigned long start = req->wb_pgbase;
req               170 fs/nfs/read.c  		unsigned long end = req->wb_pgbase + req->wb_bytes;
req               181 fs/nfs/read.c  			} else if (hdr->good_bytes - bytes < req->wb_bytes) {
req               185 fs/nfs/read.c  				WARN_ON(start < req->wb_pgbase);
req               190 fs/nfs/read.c  		bytes += req->wb_bytes;
req               193 fs/nfs/read.c  				nfs_page_group_set_uptodate(req);
req               196 fs/nfs/read.c  				xchg(&nfs_req_openctx(req)->error, error);
req               199 fs/nfs/read.c  			nfs_page_group_set_uptodate(req);
req               200 fs/nfs/read.c  		nfs_list_remove_request(req);
req               201 fs/nfs/read.c  		nfs_readpage_release(req, error);
req               223 fs/nfs/read.c  	struct nfs_page	*req;
req               226 fs/nfs/read.c  		req = nfs_list_entry(head->next);
req               227 fs/nfs/read.c  		nfs_list_remove_request(req);
req               228 fs/nfs/read.c  		nfs_readpage_release(req, error);
req                55 fs/nfs/write.c static void nfs_redirty_request(struct nfs_page *req);
req                60 fs/nfs/write.c static void nfs_inode_remove_request(struct nfs_page *req);
req                61 fs/nfs/write.c static void nfs_clear_request_commit(struct nfs_page *req);
req               171 fs/nfs/write.c 	struct nfs_page *req;
req               176 fs/nfs/write.c 	req = nfs_page_private_request(page);
req               177 fs/nfs/write.c 	if (req) {
req               178 fs/nfs/write.c 		WARN_ON_ONCE(req->wb_head != req);
req               179 fs/nfs/write.c 		kref_get(&req->wb_kref);
req               182 fs/nfs/write.c 	return req;
req               190 fs/nfs/write.c 	struct nfs_page *req = NULL;
req               195 fs/nfs/write.c 		req = nfs_page_search_commits_for_head_request_locked(nfsi,
req               197 fs/nfs/write.c 		if (req) {
req               198 fs/nfs/write.c 			WARN_ON_ONCE(req->wb_head != req);
req               199 fs/nfs/write.c 			kref_get(&req->wb_kref);
req               203 fs/nfs/write.c 	return req;
req               213 fs/nfs/write.c 	struct nfs_page *req;
req               215 fs/nfs/write.c 	req = nfs_page_find_private_request(page);
req               216 fs/nfs/write.c 	if (!req)
req               217 fs/nfs/write.c 		req = nfs_page_find_swap_request(page);
req               218 fs/nfs/write.c 	return req;
req               279 fs/nfs/write.c 	struct nfs_page *req;
req               281 fs/nfs/write.c 	req = head;
req               283 fs/nfs/write.c 		if (page_offset >= req->wb_pgbase &&
req               284 fs/nfs/write.c 		    page_offset < (req->wb_pgbase + req->wb_bytes))
req               285 fs/nfs/write.c 			return req;
req               287 fs/nfs/write.c 		req = req->wb_this_page;
req               288 fs/nfs/write.c 	} while (req != head);
req               300 fs/nfs/write.c static bool nfs_page_group_covers_page(struct nfs_page *req)
req               304 fs/nfs/write.c 	unsigned int len = nfs_page_length(req->wb_page);
req               306 fs/nfs/write.c 	nfs_page_group_lock(req);
req               309 fs/nfs/write.c 		tmp = nfs_page_group_search_locked(req->wb_head, pos);
req               315 fs/nfs/write.c 	nfs_page_group_unlock(req);
req               322 fs/nfs/write.c static void nfs_mark_uptodate(struct nfs_page *req)
req               324 fs/nfs/write.c 	if (PageUptodate(req->wb_page))
req               326 fs/nfs/write.c 	if (!nfs_page_group_covers_page(req))
req               328 fs/nfs/write.c 	SetPageUptodate(req->wb_page);
req               363 fs/nfs/write.c static void nfs_end_page_writeback(struct nfs_page *req)
req               365 fs/nfs/write.c 	struct inode *inode = page_file_mapping(req->wb_page)->host;
req               369 fs/nfs/write.c 	is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
req               370 fs/nfs/write.c 	nfs_unlock_request(req);
req               374 fs/nfs/write.c 	end_page_writeback(req->wb_page);
req               395 fs/nfs/write.c 			  struct nfs_page *req)
req               400 fs/nfs/write.c 	for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
req               602 fs/nfs/write.c static void nfs_write_error(struct nfs_page *req, int error)
req               604 fs/nfs/write.c 	nfs_set_pageerror(page_file_mapping(req->wb_page));
req               605 fs/nfs/write.c 	nfs_mapping_set_error(req->wb_page, error);
req               606 fs/nfs/write.c 	nfs_inode_remove_request(req);
req               607 fs/nfs/write.c 	nfs_end_page_writeback(req);
req               608 fs/nfs/write.c 	nfs_release_request(req);
req               618 fs/nfs/write.c 	struct nfs_page *req;
req               621 fs/nfs/write.c 	req = nfs_lock_and_join_requests(page);
req               622 fs/nfs/write.c 	if (!req)
req               624 fs/nfs/write.c 	ret = PTR_ERR(req);
req               625 fs/nfs/write.c 	if (IS_ERR(req))
req               629 fs/nfs/write.c 	WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
req               637 fs/nfs/write.c 	if (!nfs_pageio_add_request(pgio, req)) {
req               647 fs/nfs/write.c 		nfs_redirty_request(req);
req               655 fs/nfs/write.c 	nfs_write_error(req, ret);
req               755 fs/nfs/write.c static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
req               757 fs/nfs/write.c 	struct address_space *mapping = page_file_mapping(req->wb_page);
req               760 fs/nfs/write.c 	WARN_ON_ONCE(req->wb_this_page != req);
req               763 fs/nfs/write.c 	nfs_lock_request(req);
req               773 fs/nfs/write.c 	if (likely(!PageSwapCache(req->wb_page))) {
req               774 fs/nfs/write.c 		set_bit(PG_MAPPED, &req->wb_flags);
req               775 fs/nfs/write.c 		SetPagePrivate(req->wb_page);
req               776 fs/nfs/write.c 		set_page_private(req->wb_page, (unsigned long)req);
req               784 fs/nfs/write.c 	WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
req               785 fs/nfs/write.c 	kref_get(&req->wb_kref);
req               791 fs/nfs/write.c static void nfs_inode_remove_request(struct nfs_page *req)
req               793 fs/nfs/write.c 	struct address_space *mapping = page_file_mapping(req->wb_page);
req               798 fs/nfs/write.c 	if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
req               799 fs/nfs/write.c 		head = req->wb_head;
req               810 fs/nfs/write.c 	if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
req               811 fs/nfs/write.c 		nfs_release_request(req);
req               817 fs/nfs/write.c nfs_mark_request_dirty(struct nfs_page *req)
req               819 fs/nfs/write.c 	if (req->wb_page)
req               820 fs/nfs/write.c 		__set_page_dirty_nobuffers(req->wb_page);
req               869 fs/nfs/write.c nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
req               872 fs/nfs/write.c 	set_bit(PG_CLEAN, &req->wb_flags);
req               873 fs/nfs/write.c 	nfs_list_add_request(req, dst);
req               891 fs/nfs/write.c nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
req               894 fs/nfs/write.c 	nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
req               896 fs/nfs/write.c 	if (req->wb_page)
req               897 fs/nfs/write.c 		nfs_mark_page_unstable(req->wb_page, cinfo);
req               913 fs/nfs/write.c nfs_request_remove_commit_list(struct nfs_page *req,
req               916 fs/nfs/write.c 	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
req               918 fs/nfs/write.c 	nfs_list_remove_request(req);
req               948 fs/nfs/write.c nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
req               951 fs/nfs/write.c 	if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
req               953 fs/nfs/write.c 	nfs_request_add_commit_list(req, cinfo);
req               966 fs/nfs/write.c nfs_clear_request_commit(struct nfs_page *req)
req               968 fs/nfs/write.c 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
req               969 fs/nfs/write.c 		struct nfs_open_context *ctx = nfs_req_openctx(req);
req               975 fs/nfs/write.c 		if (!pnfs_clear_request_commit(req, &cinfo)) {
req               976 fs/nfs/write.c 			nfs_request_remove_commit_list(req, &cinfo);
req               979 fs/nfs/write.c 		nfs_clear_page_commit(req->wb_page);
req              1004 fs/nfs/write.c 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
req              1006 fs/nfs/write.c 		bytes += req->wb_bytes;
req              1007 fs/nfs/write.c 		nfs_list_remove_request(req);
req              1010 fs/nfs/write.c 			nfs_set_pageerror(page_file_mapping(req->wb_page));
req              1011 fs/nfs/write.c 			nfs_mapping_set_error(req->wb_page, hdr->error);
req              1016 fs/nfs/write.c 			req->wb_nio = 0;
req              1017 fs/nfs/write.c 			memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
req              1018 fs/nfs/write.c 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
req              1023 fs/nfs/write.c 		nfs_inode_remove_request(req);
req              1025 fs/nfs/write.c 		nfs_end_page_writeback(req);
req              1026 fs/nfs/write.c 		nfs_release_request(req);
req              1044 fs/nfs/write.c 	struct nfs_page *req, *tmp;
req              1048 fs/nfs/write.c 	list_for_each_entry_safe(req, tmp, src, wb_list) {
req              1049 fs/nfs/write.c 		kref_get(&req->wb_kref);
req              1050 fs/nfs/write.c 		if (!nfs_lock_request(req)) {
req              1055 fs/nfs/write.c 				nfs_release_request(req);
req              1060 fs/nfs/write.c 			status = nfs_wait_on_request(req);
req              1061 fs/nfs/write.c 			nfs_release_request(req);
req              1067 fs/nfs/write.c 		nfs_request_remove_commit_list(req, cinfo);
req              1068 fs/nfs/write.c 		clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
req              1069 fs/nfs/write.c 		nfs_list_add_request(req, dst);
req              1120 fs/nfs/write.c 	struct nfs_page *req;
req              1127 fs/nfs/write.c 	req = nfs_lock_and_join_requests(page);
req              1128 fs/nfs/write.c 	if (IS_ERR_OR_NULL(req))
req              1129 fs/nfs/write.c 		return req;
req              1131 fs/nfs/write.c 	rqend = req->wb_offset + req->wb_bytes;
req              1138 fs/nfs/write.c 	if (offset > rqend || end < req->wb_offset)
req              1142 fs/nfs/write.c 	if (offset < req->wb_offset) {
req              1143 fs/nfs/write.c 		req->wb_offset = offset;
req              1144 fs/nfs/write.c 		req->wb_pgbase = offset;
req              1147 fs/nfs/write.c 		req->wb_bytes = end - req->wb_offset;
req              1149 fs/nfs/write.c 		req->wb_bytes = rqend - req->wb_offset;
req              1150 fs/nfs/write.c 	req->wb_nio = 0;
req              1151 fs/nfs/write.c 	return req;
req              1158 fs/nfs/write.c 	nfs_mark_request_dirty(req);
req              1159 fs/nfs/write.c 	nfs_unlock_and_release_request(req);
req              1175 fs/nfs/write.c 	struct nfs_page	*req;
req              1177 fs/nfs/write.c 	req = nfs_try_to_update_request(inode, page, offset, bytes);
req              1178 fs/nfs/write.c 	if (req != NULL)
req              1180 fs/nfs/write.c 	req = nfs_create_request(ctx, page, offset, bytes);
req              1181 fs/nfs/write.c 	if (IS_ERR(req))
req              1183 fs/nfs/write.c 	nfs_inode_add_request(inode, req);
req              1185 fs/nfs/write.c 	return req;
req              1191 fs/nfs/write.c 	struct nfs_page	*req;
req              1193 fs/nfs/write.c 	req = nfs_setup_write_request(ctx, page, offset, count);
req              1194 fs/nfs/write.c 	if (IS_ERR(req))
req              1195 fs/nfs/write.c 		return PTR_ERR(req);
req              1198 fs/nfs/write.c 	nfs_mark_uptodate(req);
req              1199 fs/nfs/write.c 	nfs_mark_request_dirty(req);
req              1200 fs/nfs/write.c 	nfs_unlock_and_release_request(req);
req              1209 fs/nfs/write.c 	struct nfs_page	*req;
req              1220 fs/nfs/write.c 		req = nfs_page_find_head_request(page);
req              1221 fs/nfs/write.c 		if (req == NULL)
req              1223 fs/nfs/write.c 		l_ctx = req->wb_lock_context;
req              1224 fs/nfs/write.c 		do_flush = req->wb_page != page ||
req              1225 fs/nfs/write.c 			!nfs_match_open_context(nfs_req_openctx(req), ctx);
req              1231 fs/nfs/write.c 		nfs_release_request(req);
req              1423 fs/nfs/write.c static void nfs_redirty_request(struct nfs_page *req)
req              1426 fs/nfs/write.c 	req->wb_nio++;
req              1427 fs/nfs/write.c 	nfs_mark_request_dirty(req);
req              1428 fs/nfs/write.c 	set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
req              1429 fs/nfs/write.c 	nfs_end_page_writeback(req);
req              1430 fs/nfs/write.c 	nfs_release_request(req);
req              1435 fs/nfs/write.c 	struct nfs_page	*req;
req              1438 fs/nfs/write.c 		req = nfs_list_entry(head->next);
req              1439 fs/nfs/write.c 		nfs_list_remove_request(req);
req              1441 fs/nfs/write.c 			nfs_write_error(req, error);
req              1443 fs/nfs/write.c 			nfs_redirty_request(req);
req              1730 fs/nfs/write.c 	struct nfs_page *req;
req              1732 fs/nfs/write.c 	list_for_each_entry(req, head, wb_list)
req              1733 fs/nfs/write.c 		if (lwb < (req_offset(req) + req->wb_bytes))
req              1734 fs/nfs/write.c 			lwb = req_offset(req) + req->wb_bytes;
req              1782 fs/nfs/write.c 	struct nfs_page *req;
req              1785 fs/nfs/write.c 		req = nfs_list_entry(page_list->next);
req              1786 fs/nfs/write.c 		nfs_list_remove_request(req);
req              1787 fs/nfs/write.c 		nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
req              1789 fs/nfs/write.c 			nfs_clear_page_commit(req->wb_page);
req              1790 fs/nfs/write.c 		nfs_unlock_and_release_request(req);
req              1797 fs/nfs/write.c 		struct nfs_page *req)
req              1799 fs/nfs/write.c 	__set_page_dirty_nobuffers(req->wb_page);
req              1842 fs/nfs/write.c 	struct nfs_page	*req;
req              1848 fs/nfs/write.c 		req = nfs_list_entry(data->pages.next);
req              1849 fs/nfs/write.c 		nfs_list_remove_request(req);
req              1850 fs/nfs/write.c 		if (req->wb_page)
req              1851 fs/nfs/write.c 			nfs_clear_page_commit(req->wb_page);
req              1854 fs/nfs/write.c 			nfs_req_openctx(req)->dentry->d_sb->s_id,
req              1855 fs/nfs/write.c 			(unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)),
req              1856 fs/nfs/write.c 			req->wb_bytes,
req              1857 fs/nfs/write.c 			(long long)req_offset(req));
req              1859 fs/nfs/write.c 			if (req->wb_page) {
req              1860 fs/nfs/write.c 				nfs_mapping_set_error(req->wb_page, status);
req              1861 fs/nfs/write.c 				nfs_inode_remove_request(req);
req              1870 fs/nfs/write.c 		    !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
req              1872 fs/nfs/write.c 			if (req->wb_page)
req              1873 fs/nfs/write.c 				nfs_inode_remove_request(req);
req              1879 fs/nfs/write.c 		nfs_mark_request_dirty(req);
req              1880 fs/nfs/write.c 		set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
req              1882 fs/nfs/write.c 		nfs_unlock_and_release_request(req);
req              2045 fs/nfs/write.c 	struct nfs_page *req;
req              2052 fs/nfs/write.c 	req = nfs_lock_and_join_requests(page);
req              2054 fs/nfs/write.c 	if (IS_ERR(req)) {
req              2055 fs/nfs/write.c 		ret = PTR_ERR(req);
req              2056 fs/nfs/write.c 	} else if (req) {
req              2061 fs/nfs/write.c 		nfs_inode_remove_request(req);
req              2062 fs/nfs/write.c 		nfs_unlock_and_release_request(req);
req               219 fs/nfsd/blocklayout.c 	struct scsi_request *req;
req               244 fs/nfsd/blocklayout.c 	req = scsi_req(rq);
req               250 fs/nfsd/blocklayout.c 	req->cmd[0] = INQUIRY;
req               251 fs/nfsd/blocklayout.c 	req->cmd[1] = 1;
req               252 fs/nfsd/blocklayout.c 	req->cmd[2] = 0x83;
req               253 fs/nfsd/blocklayout.c 	req->cmd[3] = bufflen >> 8;
req               254 fs/nfsd/blocklayout.c 	req->cmd[4] = bufflen & 0xff;
req               255 fs/nfsd/blocklayout.c 	req->cmd_len = COMMAND_SIZE(INQUIRY);
req               258 fs/nfsd/blocklayout.c 	if (req->result) {
req               260 fs/nfsd/blocklayout.c 			req->result);
req               459 fs/nfsd/nfs4callback.c static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
req               468 fs/nfsd/nfs4callback.c static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
req               494 fs/nfsd/nfs4callback.c static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
req               575 fs/nfsd/nfs4callback.c static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
req               622 fs/nfsd/nfs4callback.c static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
req               728 fs/nfsd/nfs4callback.c static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
req               499 fs/nilfs2/alloc.c 				     struct nilfs_palloc_req *req)
req               514 fs/nilfs2/alloc.c 	group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
req               521 fs/nilfs2/alloc.c 			maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
req               548 fs/nilfs2/alloc.c 					req->pr_entry_nr =
req               553 fs/nilfs2/alloc.c 					req->pr_desc_bh = desc_bh;
req               554 fs/nilfs2/alloc.c 					req->pr_bitmap_bh = bitmap_bh;
req               583 fs/nilfs2/alloc.c 				     struct nilfs_palloc_req *req)
req               585 fs/nilfs2/alloc.c 	mark_buffer_dirty(req->pr_bitmap_bh);
req               586 fs/nilfs2/alloc.c 	mark_buffer_dirty(req->pr_desc_bh);
req               589 fs/nilfs2/alloc.c 	brelse(req->pr_bitmap_bh);
req               590 fs/nilfs2/alloc.c 	brelse(req->pr_desc_bh);
req               599 fs/nilfs2/alloc.c 				    struct nilfs_palloc_req *req)
req               607 fs/nilfs2/alloc.c 	group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
req               608 fs/nilfs2/alloc.c 	desc_kaddr = kmap(req->pr_desc_bh->b_page);
req               610 fs/nilfs2/alloc.c 						 req->pr_desc_bh, desc_kaddr);
req               611 fs/nilfs2/alloc.c 	bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
req               612 fs/nilfs2/alloc.c 	bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
req               619 fs/nilfs2/alloc.c 			  (unsigned long long)req->pr_entry_nr);
req               623 fs/nilfs2/alloc.c 	kunmap(req->pr_bitmap_bh->b_page);
req               624 fs/nilfs2/alloc.c 	kunmap(req->pr_desc_bh->b_page);
req               626 fs/nilfs2/alloc.c 	mark_buffer_dirty(req->pr_desc_bh);
req               627 fs/nilfs2/alloc.c 	mark_buffer_dirty(req->pr_bitmap_bh);
req               630 fs/nilfs2/alloc.c 	brelse(req->pr_bitmap_bh);
req               631 fs/nilfs2/alloc.c 	brelse(req->pr_desc_bh);
req               640 fs/nilfs2/alloc.c 				    struct nilfs_palloc_req *req)
req               648 fs/nilfs2/alloc.c 	group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
req               649 fs/nilfs2/alloc.c 	desc_kaddr = kmap(req->pr_desc_bh->b_page);
req               651 fs/nilfs2/alloc.c 						 req->pr_desc_bh, desc_kaddr);
req               652 fs/nilfs2/alloc.c 	bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
req               653 fs/nilfs2/alloc.c 	bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
req               660 fs/nilfs2/alloc.c 			  (unsigned long long)req->pr_entry_nr);
req               664 fs/nilfs2/alloc.c 	kunmap(req->pr_bitmap_bh->b_page);
req               665 fs/nilfs2/alloc.c 	kunmap(req->pr_desc_bh->b_page);
req               667 fs/nilfs2/alloc.c 	brelse(req->pr_bitmap_bh);
req               668 fs/nilfs2/alloc.c 	brelse(req->pr_desc_bh);
req               670 fs/nilfs2/alloc.c 	req->pr_entry_nr = 0;
req               671 fs/nilfs2/alloc.c 	req->pr_bitmap_bh = NULL;
req               672 fs/nilfs2/alloc.c 	req->pr_desc_bh = NULL;
req               681 fs/nilfs2/alloc.c 				    struct nilfs_palloc_req *req)
req               687 fs/nilfs2/alloc.c 	group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
req               697 fs/nilfs2/alloc.c 	req->pr_desc_bh = desc_bh;
req               698 fs/nilfs2/alloc.c 	req->pr_bitmap_bh = bitmap_bh;
req               708 fs/nilfs2/alloc.c 				   struct nilfs_palloc_req *req)
req               710 fs/nilfs2/alloc.c 	brelse(req->pr_bitmap_bh);
req               711 fs/nilfs2/alloc.c 	brelse(req->pr_desc_bh);
req               713 fs/nilfs2/alloc.c 	req->pr_entry_nr = 0;
req               714 fs/nilfs2/alloc.c 	req->pr_bitmap_bh = NULL;
req               715 fs/nilfs2/alloc.c 	req->pr_desc_bh = NULL;
req               179 fs/nilfs2/bmap.h 					       union nilfs_bmap_ptr_req *req,
req               183 fs/nilfs2/bmap.h 		return nilfs_dat_prepare_alloc(dat, &req->bpr_req);
req               185 fs/nilfs2/bmap.h 	req->bpr_ptr = bmap->b_last_allocated_ptr++;
req               190 fs/nilfs2/bmap.h 					       union nilfs_bmap_ptr_req *req,
req               194 fs/nilfs2/bmap.h 		nilfs_dat_commit_alloc(dat, &req->bpr_req);
req               198 fs/nilfs2/bmap.h 					      union nilfs_bmap_ptr_req *req,
req               202 fs/nilfs2/bmap.h 		nilfs_dat_abort_alloc(dat, &req->bpr_req);
req               208 fs/nilfs2/bmap.h 					     union nilfs_bmap_ptr_req *req,
req               211 fs/nilfs2/bmap.h 	return dat ? nilfs_dat_prepare_end(dat, &req->bpr_req) : 0;
req               215 fs/nilfs2/bmap.h 					     union nilfs_bmap_ptr_req *req,
req               219 fs/nilfs2/bmap.h 		nilfs_dat_commit_end(dat, &req->bpr_req,
req               224 fs/nilfs2/bmap.h 					    union nilfs_bmap_ptr_req *req,
req               228 fs/nilfs2/bmap.h 		nilfs_dat_abort_end(dat, &req->bpr_req);
req              2224 fs/nilfs2/btree.c 	union nilfs_bmap_ptr_req req;
req              2230 fs/nilfs2/btree.c 	req.bpr_ptr = ptr;
req              2231 fs/nilfs2/btree.c 	ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
req              2234 fs/nilfs2/btree.c 	nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
req                41 fs/nilfs2/dat.c 				   struct nilfs_palloc_req *req, int create)
req                43 fs/nilfs2/dat.c 	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
req                44 fs/nilfs2/dat.c 					    create, &req->pr_entry_bh);
req                48 fs/nilfs2/dat.c 				   struct nilfs_palloc_req *req)
req                50 fs/nilfs2/dat.c 	mark_buffer_dirty(req->pr_entry_bh);
req                52 fs/nilfs2/dat.c 	brelse(req->pr_entry_bh);
req                56 fs/nilfs2/dat.c 				  struct nilfs_palloc_req *req)
req                58 fs/nilfs2/dat.c 	brelse(req->pr_entry_bh);
req                61 fs/nilfs2/dat.c int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
req                65 fs/nilfs2/dat.c 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
req                69 fs/nilfs2/dat.c 	ret = nilfs_dat_prepare_entry(dat, req, 1);
req                71 fs/nilfs2/dat.c 		nilfs_palloc_abort_alloc_entry(dat, req);
req                76 fs/nilfs2/dat.c void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
req                81 fs/nilfs2/dat.c 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
req                82 fs/nilfs2/dat.c 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req                83 fs/nilfs2/dat.c 					     req->pr_entry_bh, kaddr);
req                89 fs/nilfs2/dat.c 	nilfs_palloc_commit_alloc_entry(dat, req);
req                90 fs/nilfs2/dat.c 	nilfs_dat_commit_entry(dat, req);
req                93 fs/nilfs2/dat.c void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
req                95 fs/nilfs2/dat.c 	nilfs_dat_abort_entry(dat, req);
req                96 fs/nilfs2/dat.c 	nilfs_palloc_abort_alloc_entry(dat, req);
req               100 fs/nilfs2/dat.c 				  struct nilfs_palloc_req *req)
req               105 fs/nilfs2/dat.c 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
req               106 fs/nilfs2/dat.c 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req               107 fs/nilfs2/dat.c 					     req->pr_entry_bh, kaddr);
req               113 fs/nilfs2/dat.c 	nilfs_dat_commit_entry(dat, req);
req               114 fs/nilfs2/dat.c 	nilfs_palloc_commit_free_entry(dat, req);
req               117 fs/nilfs2/dat.c int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
req               121 fs/nilfs2/dat.c 	ret = nilfs_dat_prepare_entry(dat, req, 0);
req               126 fs/nilfs2/dat.c void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
req               132 fs/nilfs2/dat.c 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
req               133 fs/nilfs2/dat.c 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req               134 fs/nilfs2/dat.c 					     req->pr_entry_bh, kaddr);
req               139 fs/nilfs2/dat.c 	nilfs_dat_commit_entry(dat, req);
req               142 fs/nilfs2/dat.c int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
req               149 fs/nilfs2/dat.c 	ret = nilfs_dat_prepare_entry(dat, req, 0);
req               155 fs/nilfs2/dat.c 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
req               156 fs/nilfs2/dat.c 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req               157 fs/nilfs2/dat.c 					     req->pr_entry_bh, kaddr);
req               162 fs/nilfs2/dat.c 		ret = nilfs_palloc_prepare_free_entry(dat, req);
req               164 fs/nilfs2/dat.c 			nilfs_dat_abort_entry(dat, req);
req               172 fs/nilfs2/dat.c void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
req               180 fs/nilfs2/dat.c 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
req               181 fs/nilfs2/dat.c 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req               182 fs/nilfs2/dat.c 					     req->pr_entry_bh, kaddr);
req               193 fs/nilfs2/dat.c 		nilfs_dat_commit_free(dat, req);
req               195 fs/nilfs2/dat.c 		nilfs_dat_commit_entry(dat, req);
req               198 fs/nilfs2/dat.c void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
req               205 fs/nilfs2/dat.c 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
req               206 fs/nilfs2/dat.c 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req               207 fs/nilfs2/dat.c 					     req->pr_entry_bh, kaddr);
req               213 fs/nilfs2/dat.c 		nilfs_palloc_abort_free_entry(dat, req);
req               214 fs/nilfs2/dat.c 	nilfs_dat_abort_entry(dat, req);
req               264 fs/nilfs2/dat.c 	struct nilfs_palloc_req req;
req               267 fs/nilfs2/dat.c 	req.pr_entry_nr = vblocknr;
req               268 fs/nilfs2/dat.c 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
req               270 fs/nilfs2/dat.c 		nilfs_dat_commit_entry(dat, &req);
req               108 fs/nilfs2/direct.c 	union nilfs_bmap_ptr_req req;
req               119 fs/nilfs2/direct.c 		req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
req               122 fs/nilfs2/direct.c 	ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
req               128 fs/nilfs2/direct.c 		nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
req               129 fs/nilfs2/direct.c 		nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
req               135 fs/nilfs2/direct.c 			nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
req               144 fs/nilfs2/direct.c 	union nilfs_bmap_ptr_req req;
req               153 fs/nilfs2/direct.c 	req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
req               155 fs/nilfs2/direct.c 	ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
req               157 fs/nilfs2/direct.c 		nilfs_bmap_commit_end_ptr(bmap, &req, dat);
req               294 fs/nilfs2/direct.c 	union nilfs_bmap_ptr_req req;
req               297 fs/nilfs2/direct.c 	req.bpr_ptr = ptr;
req               298 fs/nilfs2/direct.c 	ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
req               300 fs/nilfs2/direct.c 		nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
req                55 fs/nilfs2/ifile.c 	struct nilfs_palloc_req req;
req                58 fs/nilfs2/ifile.c 	req.pr_entry_nr = 0;  /*
req                62 fs/nilfs2/ifile.c 	req.pr_entry_bh = NULL;
req                64 fs/nilfs2/ifile.c 	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
req                66 fs/nilfs2/ifile.c 		ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
req                67 fs/nilfs2/ifile.c 						   &req.pr_entry_bh);
req                69 fs/nilfs2/ifile.c 			nilfs_palloc_abort_alloc_entry(ifile, &req);
req                72 fs/nilfs2/ifile.c 		brelse(req.pr_entry_bh);
req                75 fs/nilfs2/ifile.c 	nilfs_palloc_commit_alloc_entry(ifile, &req);
req                76 fs/nilfs2/ifile.c 	mark_buffer_dirty(req.pr_entry_bh);
req                78 fs/nilfs2/ifile.c 	*out_ino = (ino_t)req.pr_entry_nr;
req                79 fs/nilfs2/ifile.c 	*out_bh = req.pr_entry_bh;
req                99 fs/nilfs2/ifile.c 	struct nilfs_palloc_req req = {
req               106 fs/nilfs2/ifile.c 	ret = nilfs_palloc_prepare_free_entry(ifile, &req);
req               108 fs/nilfs2/ifile.c 		ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 0,
req               109 fs/nilfs2/ifile.c 						   &req.pr_entry_bh);
req               111 fs/nilfs2/ifile.c 			nilfs_palloc_abort_free_entry(ifile, &req);
req               114 fs/nilfs2/ifile.c 		brelse(req.pr_entry_bh);
req               118 fs/nilfs2/ifile.c 	kaddr = kmap_atomic(req.pr_entry_bh->b_page);
req               119 fs/nilfs2/ifile.c 	raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
req               120 fs/nilfs2/ifile.c 						 req.pr_entry_bh, kaddr);
req               124 fs/nilfs2/ifile.c 	mark_buffer_dirty(req.pr_entry_bh);
req               125 fs/nilfs2/ifile.c 	brelse(req.pr_entry_bh);
req               127 fs/nilfs2/ifile.c 	nilfs_palloc_commit_free_entry(ifile, &req);
req              1672 fs/ocfs2/dlm/dlmrecovery.c 	struct dlm_master_requery req;
req              1675 fs/ocfs2/dlm/dlmrecovery.c 	memset(&req, 0, sizeof(req));
req              1676 fs/ocfs2/dlm/dlmrecovery.c 	req.node_idx = dlm->node_num;
req              1677 fs/ocfs2/dlm/dlmrecovery.c 	req.namelen = res->lockname.len;
req              1678 fs/ocfs2/dlm/dlmrecovery.c 	memcpy(req.name, res->lockname.name, res->lockname.len);
req              1682 fs/ocfs2/dlm/dlmrecovery.c 				 &req, sizeof(req), nodenum, &status);
req              1710 fs/ocfs2/dlm/dlmrecovery.c 	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
req              1723 fs/ocfs2/dlm/dlmrecovery.c 	hash = dlm_lockid_hash(req->name, req->namelen);
req              1726 fs/ocfs2/dlm/dlmrecovery.c 	res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
req                43 fs/ocfs2/ioctl.c 					struct ocfs2_info_request __user *req)
req                46 fs/ocfs2/ioctl.c 	(void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
req                49 fs/ocfs2/ioctl.c static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
req                51 fs/ocfs2/ioctl.c 	req->ir_flags |= OCFS2_INFO_FL_FILLED;
req                54 fs/ocfs2/ioctl.c static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
req                56 fs/ocfs2/ioctl.c 	req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
req                59 fs/ocfs2/ioctl.c static inline int o2info_coherent(struct ocfs2_info_request *req)
req                61 fs/ocfs2/ioctl.c 	return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
req               140 fs/ocfs2/ioctl.c 				       struct ocfs2_info_request __user *req)
req               144 fs/ocfs2/ioctl.c 	if (o2info_from_user(oib, req))
req               151 fs/ocfs2/ioctl.c 	if (o2info_to_user(oib, req))
req               158 fs/ocfs2/ioctl.c 					 struct ocfs2_info_request __user *req)
req               163 fs/ocfs2/ioctl.c 	if (o2info_from_user(oic, req))
req               170 fs/ocfs2/ioctl.c 	if (o2info_to_user(oic, req))
req               177 fs/ocfs2/ioctl.c 				      struct ocfs2_info_request __user *req)
req               182 fs/ocfs2/ioctl.c 	if (o2info_from_user(oim, req))
req               189 fs/ocfs2/ioctl.c 	if (o2info_to_user(oim, req))
req               196 fs/ocfs2/ioctl.c 				   struct ocfs2_info_request __user *req)
req               201 fs/ocfs2/ioctl.c 	if (o2info_from_user(oil, req))
req               208 fs/ocfs2/ioctl.c 	if (o2info_to_user(oil, req))
req               215 fs/ocfs2/ioctl.c 				  struct ocfs2_info_request __user *req)
req               220 fs/ocfs2/ioctl.c 	if (o2info_from_user(oiu, req))
req               227 fs/ocfs2/ioctl.c 	if (o2info_to_user(oiu, req))
req               234 fs/ocfs2/ioctl.c 					 struct ocfs2_info_request __user *req)
req               239 fs/ocfs2/ioctl.c 	if (o2info_from_user(oif, req))
req               248 fs/ocfs2/ioctl.c 	if (o2info_to_user(oif, req))
req               255 fs/ocfs2/ioctl.c 					  struct ocfs2_info_request __user *req)
req               260 fs/ocfs2/ioctl.c 	if (o2info_from_user(oij, req))
req               267 fs/ocfs2/ioctl.c 	if (o2info_to_user(oij, req))
req               322 fs/ocfs2/ioctl.c 				       struct ocfs2_info_request __user *req)
req               339 fs/ocfs2/ioctl.c 	if (o2info_from_user(*oifi, req)) {
req               380 fs/ocfs2/ioctl.c 	if (o2info_to_user(*oifi, req)) {
req               388 fs/ocfs2/ioctl.c 		o2info_set_request_error(&oifi->ifi_req, req);
req               610 fs/ocfs2/ioctl.c 				      struct ocfs2_info_request __user *req)
req               627 fs/ocfs2/ioctl.c 	if (o2info_from_user(*oiff, req)) {
req               667 fs/ocfs2/ioctl.c 	if (o2info_to_user(*oiff, req)) {
req               675 fs/ocfs2/ioctl.c 		o2info_set_request_error(&oiff->iff_req, req);
req               683 fs/ocfs2/ioctl.c 				     struct ocfs2_info_request __user *req)
req               687 fs/ocfs2/ioctl.c 	if (o2info_from_user(oir, req))
req               692 fs/ocfs2/ioctl.c 	if (o2info_to_user(oir, req))
req               706 fs/ocfs2/ioctl.c 				     struct ocfs2_info_request __user *req)
req               711 fs/ocfs2/ioctl.c 	if (o2info_from_user(oir, req))
req               721 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_blocksize(inode, req);
req               725 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_clustersize(inode, req);
req               729 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_maxslots(inode, req);
req               733 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_label(inode, req);
req               737 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_uuid(inode, req);
req               741 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_fs_features(inode, req);
req               745 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_journal_size(inode, req);
req               749 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_freeinode(inode, req);
req               753 fs/ocfs2/ioctl.c 			status = ocfs2_info_handle_freefrag(inode, req);
req               756 fs/ocfs2/ioctl.c 		status = ocfs2_info_handle_unknown(inode, req);
req                32 fs/orangefs/dcache.c 	new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
req                33 fs/orangefs/dcache.c 	new_op->upcall.req.lookup.parent_refn = parent->refn;
req                34 fs/orangefs/dcache.c 	strncpy(new_op->upcall.req.lookup.d_name,
req                75 fs/orangefs/dir.c 	op->upcall.req.readdir.refn = oi->refn;
req                76 fs/orangefs/dir.c 	op->upcall.req.readdir.token = od->token;
req                77 fs/orangefs/dir.c 	op->upcall.req.readdir.max_dirent_count =
req                87 fs/orangefs/dir.c 	op->upcall.req.readdir.buf_index = bufi;
req                33 fs/orangefs/file.c 	new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn;
req                64 fs/orangefs/file.c 	new_op->upcall.req.io.readahead_size = readahead_size;
req                65 fs/orangefs/file.c 	new_op->upcall.req.io.io_type = type;
req                66 fs/orangefs/file.c 	new_op->upcall.req.io.refn = orangefs_inode->refn;
req                86 fs/orangefs/file.c 	new_op->upcall.req.io.buf_index = buffer_index;
req                87 fs/orangefs/file.c 	new_op->upcall.req.io.count = total_size;
req                88 fs/orangefs/file.c 	new_op->upcall.req.io.offset = *offset;
req               540 fs/orangefs/file.c 	new_op->upcall.req.fsync.refn = orangefs_inode->refn;
req               831 fs/orangefs/inode.c 	new_op->upcall.req.truncate.refn = orangefs_inode->refn;
req               832 fs/orangefs/inode.c 	new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
req                38 fs/orangefs/namei.c 	new_op->upcall.req.create.parent_refn = parent->refn;
req                40 fs/orangefs/namei.c 	fill_default_sys_attrs(new_op->upcall.req.create.attributes,
req                43 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.create.d_name,
req               130 fs/orangefs/namei.c 	new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
req               137 fs/orangefs/namei.c 	new_op->upcall.req.lookup.parent_refn = parent->refn;
req               139 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name,
req               145 fs/orangefs/namei.c 		     new_op->upcall.req.lookup.d_name,
req               146 fs/orangefs/namei.c 		     &new_op->upcall.req.lookup.parent_refn.khandle,
req               147 fs/orangefs/namei.c 		     new_op->upcall.req.lookup.parent_refn.fs_id);
req               193 fs/orangefs/namei.c 	new_op->upcall.req.remove.parent_refn = parent->refn;
req               194 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name,
req               242 fs/orangefs/namei.c 	new_op->upcall.req.sym.parent_refn = parent->refn;
req               244 fs/orangefs/namei.c 	fill_default_sys_attrs(new_op->upcall.req.sym.attributes,
req               248 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.sym.entry_name,
req               251 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX - 1);
req               319 fs/orangefs/namei.c 	new_op->upcall.req.mkdir.parent_refn = parent->refn;
req               321 fs/orangefs/namei.c 	fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes,
req               324 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.mkdir.d_name,
req               401 fs/orangefs/namei.c 	new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn;
req               402 fs/orangefs/namei.c 	new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn;
req               404 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.rename.d_old_name,
req               407 fs/orangefs/namei.c 	strncpy(new_op->upcall.req.rename.d_new_name,
req               466 fs/orangefs/orangefs-debugfs.c 		new_op->upcall.req.param.op =
req               468 fs/orangefs/orangefs-debugfs.c 		new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
req               469 fs/orangefs/orangefs-debugfs.c 		memset(new_op->upcall.req.param.s_value,
req               472 fs/orangefs/orangefs-debugfs.c 		sprintf(new_op->upcall.req.param.s_value,
req               351 fs/orangefs/orangefs-sysfs.c 		new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET;
req               365 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               369 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               373 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               378 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               383 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               388 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               393 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               397 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               401 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               405 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               409 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               414 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               418 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               422 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               426 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               431 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               435 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               439 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               443 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               448 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               452 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               456 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               460 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.op =
req               465 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.perf_count.type =
req               469 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.perf_count.type =
req               473 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.perf_count.type =
req               498 fs/orangefs/orangefs-sysfs.c 			if (new_op->upcall.req.param.op ==
req               570 fs/orangefs/orangefs-sysfs.c 	new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
req               585 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               594 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               603 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               612 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               621 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               636 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               642 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.u.value32[0] = val1;
req               643 fs/orangefs/orangefs-sysfs.c 			new_op->upcall.req.param.u.value32[1] = val2;
req               648 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               659 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               667 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               676 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               684 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               695 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               703 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               712 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               720 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               731 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               739 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               748 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               756 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               767 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               775 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               784 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               792 fs/orangefs/orangefs-sysfs.c 				new_op->upcall.req.param.op =
req               807 fs/orangefs/orangefs-sysfs.c 	new_op->upcall.req.param.u.value64 = val;
req                21 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.io.refn.fs_id;
req                24 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.lookup.parent_refn.fs_id;
req                27 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.create.parent_refn.fs_id;
req                30 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.getattr.refn.fs_id;
req                33 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.remove.parent_refn.fs_id;
req                36 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.mkdir.parent_refn.fs_id;
req                39 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.readdir.refn.fs_id;
req                42 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.setattr.refn.fs_id;
req                45 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.sym.parent_refn.fs_id;
req                48 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.rename.old_parent_refn.fs_id;
req                51 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.statfs.fs_id;
req                54 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.truncate.refn.fs_id;
req                57 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.ra_cache_flush.refn.fs_id;
req                60 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.fs_umount.fs_id;
req                63 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.getxattr.refn.fs_id;
req                66 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.setxattr.refn.fs_id;
req                69 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.listxattr.refn.fs_id;
req                72 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.removexattr.refn.fs_id;
req                75 fs/orangefs/orangefs-utils.c 			fsid = op->upcall.req.fsync.refn.fs_id;
req               264 fs/orangefs/orangefs-utils.c 	new_op->upcall.req.getattr.refn = orangefs_inode->refn;
req               270 fs/orangefs/orangefs-utils.c 		new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT;
req               272 fs/orangefs/orangefs-utils.c 		new_op->upcall.req.getattr.mask =
req               396 fs/orangefs/orangefs-utils.c 	new_op->upcall.req.getattr.refn = orangefs_inode->refn;
req               397 fs/orangefs/orangefs-utils.c 	new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_TYPE |
req               430 fs/orangefs/orangefs-utils.c 	new_op->upcall.req.setattr.refn = orangefs_inode->refn;
req               432 fs/orangefs/orangefs-utils.c 	    &new_op->upcall.req.setattr.attributes);
req               434 fs/orangefs/orangefs-utils.c 	if (!new_op->upcall.req.setattr.attributes.mask) {
req                97 fs/orangefs/super.c static void orangefs_inode_cache_ctor(void *req)
req                99 fs/orangefs/super.c 	struct orangefs_inode_s *orangefs_inode = req;
req               181 fs/orangefs/super.c 	new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id;
req               255 fs/orangefs/super.c 	strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
req               261 fs/orangefs/super.c 		     new_op->upcall.req.fs_mount.orangefs_config_server);
req               289 fs/orangefs/super.c 		new_op->upcall.req.features.features = 0;
req               400 fs/orangefs/super.c 	op->upcall.req.fs_umount.id = id;
req               401 fs/orangefs/super.c 	op->upcall.req.fs_umount.fs_id = fs_id;
req               402 fs/orangefs/super.c 	strncpy(op->upcall.req.fs_umount.orangefs_config_server,
req               496 fs/orangefs/super.c 	strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
req               502 fs/orangefs/super.c 		     new_op->upcall.req.fs_mount.orangefs_config_server);
req               571 fs/orangefs/super.c 		new_op->upcall.req.features.features = 0;
req               257 fs/orangefs/upcall.h 	} req;
req               213 fs/orangefs/waitqueue.c 	op->slot_to_free = op->upcall.req.io.buf_index;
req               217 fs/orangefs/waitqueue.c 	op->upcall.req.cancel.op_tag = tag;
req               152 fs/orangefs/xattr.c 	new_op->upcall.req.getxattr.refn = orangefs_inode->refn;
req               153 fs/orangefs/xattr.c 	strcpy(new_op->upcall.req.getxattr.key, name);
req               160 fs/orangefs/xattr.c 	new_op->upcall.req.getxattr.key_sz = strlen(name) + 1;
req               171 fs/orangefs/xattr.c 				     (char *)new_op->upcall.req.getxattr.key);
req               213 fs/orangefs/xattr.c 		upcall.req.getxattr.key,
req               215 fs/orangefs/xattr.c 		upcall.req.getxattr.key_sz,
req               262 fs/orangefs/xattr.c 	new_op->upcall.req.removexattr.refn = orangefs_inode->refn;
req               268 fs/orangefs/xattr.c 	strcpy(new_op->upcall.req.removexattr.key, name);
req               269 fs/orangefs/xattr.c 	new_op->upcall.req.removexattr.key_sz = strlen(name) + 1;
req               273 fs/orangefs/xattr.c 		     (char *)new_op->upcall.req.removexattr.key,
req               274 fs/orangefs/xattr.c 		     (int)new_op->upcall.req.removexattr.key_sz);
req               355 fs/orangefs/xattr.c 	new_op->upcall.req.setxattr.refn = orangefs_inode->refn;
req               356 fs/orangefs/xattr.c 	new_op->upcall.req.setxattr.flags = internal_flag;
req               362 fs/orangefs/xattr.c 	strcpy(new_op->upcall.req.setxattr.keyval.key, name);
req               363 fs/orangefs/xattr.c 	new_op->upcall.req.setxattr.keyval.key_sz = strlen(name) + 1;
req               364 fs/orangefs/xattr.c 	memcpy(new_op->upcall.req.setxattr.keyval.val, value, size);
req               365 fs/orangefs/xattr.c 	new_op->upcall.req.setxattr.keyval.val_sz = size;
req               370 fs/orangefs/xattr.c 		     (char *)new_op->upcall.req.setxattr.keyval.key,
req               371 fs/orangefs/xattr.c 		     (int)new_op->upcall.req.setxattr.keyval.key_sz,
req               434 fs/orangefs/xattr.c 	new_op->upcall.req.listxattr.refn = orangefs_inode->refn;
req               435 fs/orangefs/xattr.c 	new_op->upcall.req.listxattr.token = token;
req               436 fs/orangefs/xattr.c 	new_op->upcall.req.listxattr.requested_count =
req               363 fs/ubifs/budget.c 			   const struct ubifs_budget_req *req)
req               367 fs/ubifs/budget.c 	znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) +
req               368 fs/ubifs/budget.c 		 req->new_dent;
req               379 fs/ubifs/budget.c 			    const struct ubifs_budget_req *req)
req               383 fs/ubifs/budget.c 	data_growth = req->new_ino  ? c->bi.inode_budget : 0;
req               384 fs/ubifs/budget.c 	if (req->new_page)
req               386 fs/ubifs/budget.c 	if (req->new_dent)
req               388 fs/ubifs/budget.c 	data_growth += req->new_ino_d;
req               399 fs/ubifs/budget.c 			  const struct ubifs_budget_req *req)
req               403 fs/ubifs/budget.c 	dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
req               405 fs/ubifs/budget.c 	if (req->dirtied_ino)
req               406 fs/ubifs/budget.c 		dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
req               407 fs/ubifs/budget.c 	if (req->mod_dent)
req               409 fs/ubifs/budget.c 	dd_growth += req->dirtied_ino_d;
req               426 fs/ubifs/budget.c int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
req               430 fs/ubifs/budget.c 	ubifs_assert(c, req->new_page <= 1);
req               431 fs/ubifs/budget.c 	ubifs_assert(c, req->dirtied_page <= 1);
req               432 fs/ubifs/budget.c 	ubifs_assert(c, req->new_dent <= 1);
req               433 fs/ubifs/budget.c 	ubifs_assert(c, req->mod_dent <= 1);
req               434 fs/ubifs/budget.c 	ubifs_assert(c, req->new_ino <= 1);
req               435 fs/ubifs/budget.c 	ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA);
req               436 fs/ubifs/budget.c 	ubifs_assert(c, req->dirtied_ino <= 4);
req               437 fs/ubifs/budget.c 	ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
req               438 fs/ubifs/budget.c 	ubifs_assert(c, !(req->new_ino_d & 7));
req               439 fs/ubifs/budget.c 	ubifs_assert(c, !(req->dirtied_ino_d & 7));
req               441 fs/ubifs/budget.c 	data_growth = calc_data_growth(c, req);
req               442 fs/ubifs/budget.c 	dd_growth = calc_dd_growth(c, req);
req               445 fs/ubifs/budget.c 	idx_growth = calc_idx_growth(c, req);
req               465 fs/ubifs/budget.c 		req->idx_growth = idx_growth;
req               466 fs/ubifs/budget.c 		req->data_growth = data_growth;
req               467 fs/ubifs/budget.c 		req->dd_growth = dd_growth;
req               478 fs/ubifs/budget.c 	if (req->fast) {
req               515 fs/ubifs/budget.c void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
req               517 fs/ubifs/budget.c 	ubifs_assert(c, req->new_page <= 1);
req               518 fs/ubifs/budget.c 	ubifs_assert(c, req->dirtied_page <= 1);
req               519 fs/ubifs/budget.c 	ubifs_assert(c, req->new_dent <= 1);
req               520 fs/ubifs/budget.c 	ubifs_assert(c, req->mod_dent <= 1);
req               521 fs/ubifs/budget.c 	ubifs_assert(c, req->new_ino <= 1);
req               522 fs/ubifs/budget.c 	ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA);
req               523 fs/ubifs/budget.c 	ubifs_assert(c, req->dirtied_ino <= 4);
req               524 fs/ubifs/budget.c 	ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
req               525 fs/ubifs/budget.c 	ubifs_assert(c, !(req->new_ino_d & 7));
req               526 fs/ubifs/budget.c 	ubifs_assert(c, !(req->dirtied_ino_d & 7));
req               527 fs/ubifs/budget.c 	if (!req->recalculate) {
req               528 fs/ubifs/budget.c 		ubifs_assert(c, req->idx_growth >= 0);
req               529 fs/ubifs/budget.c 		ubifs_assert(c, req->data_growth >= 0);
req               530 fs/ubifs/budget.c 		ubifs_assert(c, req->dd_growth >= 0);
req               533 fs/ubifs/budget.c 	if (req->recalculate) {
req               534 fs/ubifs/budget.c 		req->data_growth = calc_data_growth(c, req);
req               535 fs/ubifs/budget.c 		req->dd_growth = calc_dd_growth(c, req);
req               536 fs/ubifs/budget.c 		req->idx_growth = calc_idx_growth(c, req);
req               539 fs/ubifs/budget.c 	if (!req->data_growth && !req->dd_growth)
req               546 fs/ubifs/budget.c 	c->bi.idx_growth -= req->idx_growth;
req               547 fs/ubifs/budget.c 	c->bi.uncommitted_idx += req->idx_growth;
req               548 fs/ubifs/budget.c 	c->bi.data_growth -= req->data_growth;
req               549 fs/ubifs/budget.c 	c->bi.dd_growth -= req->dd_growth;
req               597 fs/ubifs/budget.c 	struct ubifs_budget_req req;
req               599 fs/ubifs/budget.c 	memset(&req, 0, sizeof(struct ubifs_budget_req));
req               601 fs/ubifs/budget.c 	req.dd_growth = c->bi.inode_budget + ALIGN(ui->data_len, 8);
req               602 fs/ubifs/budget.c 	ubifs_release_budget(c, &req);
req               546 fs/ubifs/debug.c void ubifs_dump_budget_req(const struct ubifs_budget_req *req)
req               550 fs/ubifs/debug.c 	       req->new_ino, req->dirtied_ino);
req               552 fs/ubifs/debug.c 	       req->new_ino_d, req->dirtied_ino_d);
req               554 fs/ubifs/debug.c 	       req->new_page, req->dirtied_page);
req               556 fs/ubifs/debug.c 	       req->new_dent, req->mod_dent);
req               557 fs/ubifs/debug.c 	pr_err("\tidx_growth  %d\n", req->idx_growth);
req               559 fs/ubifs/debug.c 	       req->data_growth, req->dd_growth);
req               246 fs/ubifs/debug.h void ubifs_dump_budget_req(const struct ubifs_budget_req *req);
req               286 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
req               300 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req               329 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               345 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               355 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
req               373 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req               381 fs/ubifs/dir.c 		ubifs_release_budget(c, &req);
req               423 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               434 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               695 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_dent = 1, .dirtied_ino = 2,
req               722 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req               743 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               755 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               768 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
req               799 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req               818 fs/ubifs/dir.c 		ubifs_release_budget(c, &req);
req               833 fs/ubifs/dir.c 		ubifs_release_budget(c, &req);
req               874 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
req               901 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req               921 fs/ubifs/dir.c 		ubifs_release_budget(c, &req);
req               937 fs/ubifs/dir.c 		ubifs_release_budget(c, &req);
req               949 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
req               960 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req               994 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1010 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1024 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
req              1042 fs/ubifs/dir.c 	req.new_ino_d = ALIGN(devlen, 8);
req              1043 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req              1083 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1099 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1112 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
req              1129 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req              1198 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1260 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1,
req              1308 fs/ubifs/dir.c 	err = ubifs_budget_space(c, &req);
req              1318 fs/ubifs/dir.c 		ubifs_release_budget(c, &req);
req              1440 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1482 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req              1492 fs/ubifs/dir.c 	struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1,
req              1536 fs/ubifs/dir.c 	ubifs_release_budget(c, &req);
req               198 fs/ubifs/file.c 	struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
req               200 fs/ubifs/file.c 	ubifs_release_budget(c, &req);
req               212 fs/ubifs/file.c 	struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
req               214 fs/ubifs/file.c 	ubifs_release_budget(c, &req);
req               224 fs/ubifs/file.c 	struct ubifs_budget_req req = { .new_page = 1 };
req               241 fs/ubifs/file.c 		req.dirtied_ino = 1;
req               243 fs/ubifs/file.c 	err = ubifs_budget_space(c, &req);
req               249 fs/ubifs/file.c 		ubifs_release_budget(c, &req);
req               261 fs/ubifs/file.c 				ubifs_release_budget(c, &req);
req               328 fs/ubifs/file.c 	struct ubifs_budget_req req = { .fast = 1 };
req               355 fs/ubifs/file.c 		req.dirtied_ino = 1;
req               365 fs/ubifs/file.c 			req.new_page = 1;
req               372 fs/ubifs/file.c 			req.dirtied_page = 1;
req               382 fs/ubifs/file.c 				req.dirtied_ino = 1;
req               386 fs/ubifs/file.c 	return ubifs_budget_space(c, &req);
req              1112 fs/ubifs/file.c 	struct ubifs_budget_req req;
req              1118 fs/ubifs/file.c 	memset(&req, 0, sizeof(struct ubifs_budget_req));
req              1126 fs/ubifs/file.c 		req.dirtied_page = 1;
req              1128 fs/ubifs/file.c 	req.dirtied_ino = 1;
req              1130 fs/ubifs/file.c 	req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
req              1131 fs/ubifs/file.c 	err = ubifs_budget_space(c, &req);
req              1196 fs/ubifs/file.c 		ubifs_release_budget(c, &req);
req              1220 fs/ubifs/file.c 	struct ubifs_budget_req req = { .dirtied_ino = 1,
req              1223 fs/ubifs/file.c 	err = ubifs_budget_space(c, &req);
req              1254 fs/ubifs/file.c 		ubifs_release_budget(c, &req);
req              1376 fs/ubifs/file.c 	struct ubifs_budget_req req = { .dirtied_ino = 1,
req              1383 fs/ubifs/file.c 	err = ubifs_budget_space(c, &req);
req              1399 fs/ubifs/file.c 		ubifs_release_budget(c, &req);
req              1419 fs/ubifs/file.c 		struct ubifs_budget_req req = { .dirtied_ino = 1,
req              1422 fs/ubifs/file.c 		err = ubifs_budget_space(c, &req);
req              1432 fs/ubifs/file.c 			ubifs_release_budget(c, &req);
req              1513 fs/ubifs/file.c 	struct ubifs_budget_req req = { .new_page = 1 };
req              1547 fs/ubifs/file.c 		req.dirtied_ino = 1;
req              1549 fs/ubifs/file.c 	err = ubifs_budget_space(c, &req);
req              1592 fs/ubifs/file.c 	ubifs_release_budget(c, &req);
req               103 fs/ubifs/ioctl.c 	struct ubifs_budget_req req = { .dirtied_ino = 1,
req               106 fs/ubifs/ioctl.c 	err = ubifs_budget_space(c, &req);
req               125 fs/ubifs/ioctl.c 		ubifs_release_budget(c, &req);
req               133 fs/ubifs/ioctl.c 	ubifs_release_budget(c, &req);
req              1791 fs/ubifs/ubifs.h int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req);
req              1792 fs/ubifs/ubifs.h void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req);
req              1796 fs/ubifs/ubifs.h 			  struct ubifs_budget_req *req);
req              1798 fs/ubifs/ubifs.h 				struct ubifs_budget_req *req);
req              1800 fs/ubifs/ubifs.h 			 struct ubifs_budget_req *req);
req                87 fs/ubifs/xattr.c 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
req               109 fs/ubifs/xattr.c 	err = ubifs_budget_space(c, &req);
req               158 fs/ubifs/xattr.c 	ubifs_release_budget(c, &req);
req               174 fs/ubifs/xattr.c 	ubifs_release_budget(c, &req);
req               198 fs/ubifs/xattr.c 	struct ubifs_budget_req req = { .dirtied_ino = 2,
req               202 fs/ubifs/xattr.c 	err = ubifs_budget_space(c, &req);
req               235 fs/ubifs/xattr.c 	ubifs_release_budget(c, &req);
req               244 fs/ubifs/xattr.c 	ubifs_release_budget(c, &req);
req               457 fs/ubifs/xattr.c 	struct ubifs_budget_req req = { .dirtied_ino = 2, .mod_dent = 1,
req               462 fs/ubifs/xattr.c 	err = ubifs_budget_space(c, &req);
req               478 fs/ubifs/xattr.c 	ubifs_release_budget(c, &req);
req               487 fs/ubifs/xattr.c 	ubifs_release_budget(c, &req);
req                20 fs/verity/enable.c 				   struct ahash_request *req)
req                69 fs/verity/enable.c 		err = fsverity_hash_page(params, inode, req, src_page,
req               118 fs/verity/enable.c 	struct ahash_request *req;
req               130 fs/verity/enable.c 	req = ahash_request_alloc(params->hash_alg->tfm, GFP_KERNEL);
req               131 fs/verity/enable.c 	if (!pending_hashes || !req)
req               143 fs/verity/enable.c 					      pending_hashes, req);
req               153 fs/verity/enable.c 	ahash_request_free(req);
req               123 fs/verity/fsverity_private.h 		       struct ahash_request *req, struct page *page, u8 *out);
req               108 fs/verity/hash_algs.c 	struct ahash_request *req = NULL;
req               122 fs/verity/hash_algs.c 	req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
req               123 fs/verity/hash_algs.c 	if (!req) {
req               144 fs/verity/hash_algs.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
req               147 fs/verity/hash_algs.c 	ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
req               149 fs/verity/hash_algs.c 	err = crypto_wait_req(crypto_ahash_init(req), &wait);
req               153 fs/verity/hash_algs.c 	err = crypto_wait_req(crypto_ahash_update(req), &wait);
req               157 fs/verity/hash_algs.c 	err = crypto_ahash_export(req, hashstate);
req               161 fs/verity/hash_algs.c 	ahash_request_free(req);
req               186 fs/verity/hash_algs.c 		       struct ahash_request *req, struct page *page, u8 *out)
req               197 fs/verity/hash_algs.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
req               200 fs/verity/hash_algs.c 	ahash_request_set_crypt(req, &sg, out, PAGE_SIZE);
req               203 fs/verity/hash_algs.c 		err = crypto_ahash_import(req, params->hashstate);
req               209 fs/verity/hash_algs.c 		err = crypto_ahash_finup(req);
req               211 fs/verity/hash_algs.c 		err = crypto_ahash_digest(req);
req               235 fs/verity/hash_algs.c 	struct ahash_request *req;
req               240 fs/verity/hash_algs.c 	req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
req               241 fs/verity/hash_algs.c 	if (!req)
req               245 fs/verity/hash_algs.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
req               248 fs/verity/hash_algs.c 	ahash_request_set_crypt(req, &sg, out, size);
req               250 fs/verity/hash_algs.c 	err = crypto_wait_req(crypto_ahash_digest(req), &wait);
req               252 fs/verity/hash_algs.c 	ahash_request_free(req);
req                87 fs/verity/verify.c 			struct ahash_request *req, struct page *data_page)
req               153 fs/verity/verify.c 		err = fsverity_hash_page(params, inode, req, hpage, real_hash);
req               168 fs/verity/verify.c 	err = fsverity_hash_page(params, inode, req, data_page, real_hash);
req               191 fs/verity/verify.c 	struct ahash_request *req;
req               194 fs/verity/verify.c 	req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
req               195 fs/verity/verify.c 	if (unlikely(!req))
req               198 fs/verity/verify.c 	valid = verify_page(inode, vi, req, page);
req               200 fs/verity/verify.c 	ahash_request_free(req);
req               225 fs/verity/verify.c 	struct ahash_request *req;
req               229 fs/verity/verify.c 	req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
req               230 fs/verity/verify.c 	if (unlikely(!req)) {
req               239 fs/verity/verify.c 		if (!PageError(page) && !verify_page(inode, vi, req, page))
req               243 fs/verity/verify.c 	ahash_request_free(req);
req                48 include/crypto/acompress.h 	int (*compress)(struct acomp_req *req);
req                49 include/crypto/acompress.h 	int (*decompress)(struct acomp_req *req);
req                77 include/crypto/acompress.h 	int (*compress)(struct acomp_req *req);
req                78 include/crypto/acompress.h 	int (*decompress)(struct acomp_req *req);
req               135 include/crypto/acompress.h static inline void acomp_request_set_tfm(struct acomp_req *req,
req               138 include/crypto/acompress.h 	req->base.tfm = crypto_acomp_tfm(tfm);
req               141 include/crypto/acompress.h static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
req               143 include/crypto/acompress.h 	return __crypto_acomp_tfm(req->base.tfm);
req               181 include/crypto/acompress.h void acomp_request_free(struct acomp_req *req);
req               194 include/crypto/acompress.h static inline void acomp_request_set_callback(struct acomp_req *req,
req               199 include/crypto/acompress.h 	req->base.complete = cmpl;
req               200 include/crypto/acompress.h 	req->base.data = data;
req               201 include/crypto/acompress.h 	req->base.flags = flgs;
req               217 include/crypto/acompress.h static inline void acomp_request_set_params(struct acomp_req *req,
req               223 include/crypto/acompress.h 	req->src = src;
req               224 include/crypto/acompress.h 	req->dst = dst;
req               225 include/crypto/acompress.h 	req->slen = slen;
req               226 include/crypto/acompress.h 	req->dlen = dlen;
req               228 include/crypto/acompress.h 	if (!req->dst)
req               229 include/crypto/acompress.h 		req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
req               241 include/crypto/acompress.h static inline int crypto_acomp_compress(struct acomp_req *req)
req               243 include/crypto/acompress.h 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
req               245 include/crypto/acompress.h 	unsigned int slen = req->slen;
req               249 include/crypto/acompress.h 	ret = tfm->compress(req);
req               263 include/crypto/acompress.h static inline int crypto_acomp_decompress(struct acomp_req *req)
req               265 include/crypto/acompress.h 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
req               267 include/crypto/acompress.h 	unsigned int slen = req->slen;
req               271 include/crypto/acompress.h 	ret = tfm->decompress(req);
req               134 include/crypto/aead.h 	int (*encrypt)(struct aead_request *req);
req               135 include/crypto/aead.h 	int (*decrypt)(struct aead_request *req);
req               296 include/crypto/aead.h static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
req               298 include/crypto/aead.h 	return __crypto_aead_cast(req->base.tfm);
req               320 include/crypto/aead.h int crypto_aead_encrypt(struct aead_request *req);
req               344 include/crypto/aead.h int crypto_aead_decrypt(struct aead_request *req);
req               376 include/crypto/aead.h static inline void aead_request_set_tfm(struct aead_request *req,
req               379 include/crypto/aead.h 	req->base.tfm = crypto_aead_tfm(tfm);
req               396 include/crypto/aead.h 	struct aead_request *req;
req               398 include/crypto/aead.h 	req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
req               400 include/crypto/aead.h 	if (likely(req))
req               401 include/crypto/aead.h 		aead_request_set_tfm(req, tfm);
req               403 include/crypto/aead.h 	return req;
req               410 include/crypto/aead.h static inline void aead_request_free(struct aead_request *req)
req               412 include/crypto/aead.h 	kzfree(req);
req               440 include/crypto/aead.h static inline void aead_request_set_callback(struct aead_request *req,
req               445 include/crypto/aead.h 	req->base.complete = compl;
req               446 include/crypto/aead.h 	req->base.data = data;
req               447 include/crypto/aead.h 	req->base.flags = flags;
req               480 include/crypto/aead.h static inline void aead_request_set_crypt(struct aead_request *req,
req               485 include/crypto/aead.h 	req->src = src;
req               486 include/crypto/aead.h 	req->dst = dst;
req               487 include/crypto/aead.h 	req->cryptlen = cryptlen;
req               488 include/crypto/aead.h 	req->iv = iv;
req               499 include/crypto/aead.h static inline void aead_request_set_ad(struct aead_request *req,
req               502 include/crypto/aead.h 	req->assoclen = assoclen;
req                93 include/crypto/akcipher.h 	int (*sign)(struct akcipher_request *req);
req                94 include/crypto/akcipher.h 	int (*verify)(struct akcipher_request *req);
req                95 include/crypto/akcipher.h 	int (*encrypt)(struct akcipher_request *req);
req                96 include/crypto/akcipher.h 	int (*decrypt)(struct akcipher_request *req);
req               161 include/crypto/akcipher.h static inline void akcipher_request_set_tfm(struct akcipher_request *req,
req               164 include/crypto/akcipher.h 	req->base.tfm = crypto_akcipher_tfm(tfm);
req               168 include/crypto/akcipher.h 	struct akcipher_request *req)
req               170 include/crypto/akcipher.h 	return __crypto_akcipher_tfm(req->base.tfm);
req               194 include/crypto/akcipher.h 	struct akcipher_request *req;
req               196 include/crypto/akcipher.h 	req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp);
req               197 include/crypto/akcipher.h 	if (likely(req))
req               198 include/crypto/akcipher.h 		akcipher_request_set_tfm(req, tfm);
req               200 include/crypto/akcipher.h 	return req;
req               208 include/crypto/akcipher.h static inline void akcipher_request_free(struct akcipher_request *req)
req               210 include/crypto/akcipher.h 	kzfree(req);
req               224 include/crypto/akcipher.h static inline void akcipher_request_set_callback(struct akcipher_request *req,
req               229 include/crypto/akcipher.h 	req->base.complete = cmpl;
req               230 include/crypto/akcipher.h 	req->base.data = data;
req               231 include/crypto/akcipher.h 	req->base.flags = flgs;
req               246 include/crypto/akcipher.h static inline void akcipher_request_set_crypt(struct akcipher_request *req,
req               252 include/crypto/akcipher.h 	req->src = src;
req               253 include/crypto/akcipher.h 	req->dst = dst;
req               254 include/crypto/akcipher.h 	req->src_len = src_len;
req               255 include/crypto/akcipher.h 	req->dst_len = dst_len;
req               285 include/crypto/akcipher.h static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
req               287 include/crypto/akcipher.h 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               290 include/crypto/akcipher.h 	unsigned int src_len = req->src_len;
req               294 include/crypto/akcipher.h 	ret = alg->encrypt(req);
req               309 include/crypto/akcipher.h static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
req               311 include/crypto/akcipher.h 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               314 include/crypto/akcipher.h 	unsigned int src_len = req->src_len;
req               318 include/crypto/akcipher.h 	ret = alg->decrypt(req);
req               333 include/crypto/akcipher.h static inline int crypto_akcipher_sign(struct akcipher_request *req)
req               335 include/crypto/akcipher.h 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               341 include/crypto/akcipher.h 	ret = alg->sign(req);
req               360 include/crypto/akcipher.h static inline int crypto_akcipher_verify(struct akcipher_request *req)
req               362 include/crypto/akcipher.h 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
req               368 include/crypto/akcipher.h 	ret = alg->verify(req);
req               250 include/crypto/algapi.h int ablkcipher_walk_done(struct ablkcipher_request *req,
req               252 include/crypto/algapi.h int ablkcipher_walk_phys(struct ablkcipher_request *req,
req               368 include/crypto/algapi.h static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
req               370 include/crypto/algapi.h 	return req->__ctx;
req                59 include/crypto/cbc.h static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
req                63 include/crypto/cbc.h 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                67 include/crypto/cbc.h 	err = skcipher_walk_virt(&walk, req, false);
req                51 include/crypto/chacha.h int crypto_chacha_crypt(struct skcipher_request *req);
req                52 include/crypto/chacha.h int crypto_xchacha_crypt(struct skcipher_request *req);
req                47 include/crypto/cryptd.h struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
req                20 include/crypto/ctr.h static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
req                24 include/crypto/ctr.h 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req                34 include/crypto/ctr.h 	err = skcipher_walk_virt(&walk, req, false);
req                87 include/crypto/engine.h 						 struct ablkcipher_request *req);
req                89 include/crypto/engine.h 					   struct aead_request *req);
req                91 include/crypto/engine.h 					       struct akcipher_request *req);
req                93 include/crypto/engine.h 					       struct ahash_request *req);
req                95 include/crypto/engine.h 					       struct skcipher_request *req);
req                97 include/crypto/engine.h 					struct ablkcipher_request *req, int err);
req                99 include/crypto/engine.h 				  struct aead_request *req, int err);
req               101 include/crypto/engine.h 				      struct akcipher_request *req, int err);
req               103 include/crypto/engine.h 				  struct ahash_request *req, int err);
req               105 include/crypto/engine.h 				      struct skcipher_request *req, int err);
req               129 include/crypto/hash.h 	int (*init)(struct ahash_request *req);
req               130 include/crypto/hash.h 	int (*update)(struct ahash_request *req);
req               131 include/crypto/hash.h 	int (*final)(struct ahash_request *req);
req               132 include/crypto/hash.h 	int (*finup)(struct ahash_request *req);
req               133 include/crypto/hash.h 	int (*digest)(struct ahash_request *req);
req               134 include/crypto/hash.h 	int (*export)(struct ahash_request *req, void *out);
req               135 include/crypto/hash.h 	int (*import)(struct ahash_request *req, const void *in);
req               204 include/crypto/hash.h 	int (*init)(struct ahash_request *req);
req               205 include/crypto/hash.h 	int (*update)(struct ahash_request *req);
req               206 include/crypto/hash.h 	int (*final)(struct ahash_request *req);
req               207 include/crypto/hash.h 	int (*finup)(struct ahash_request *req);
req               208 include/crypto/hash.h 	int (*digest)(struct ahash_request *req);
req               209 include/crypto/hash.h 	int (*export)(struct ahash_request *req, void *out);
req               210 include/crypto/hash.h 	int (*import)(struct ahash_request *req, const void *in);
req               379 include/crypto/hash.h 	struct ahash_request *req)
req               381 include/crypto/hash.h 	return __crypto_ahash_cast(req->base.tfm);
req               395 include/crypto/hash.h static inline void *ahash_request_ctx(struct ahash_request *req)
req               397 include/crypto/hash.h 	return req->__ctx;
req               425 include/crypto/hash.h int crypto_ahash_finup(struct ahash_request *req);
req               442 include/crypto/hash.h int crypto_ahash_final(struct ahash_request *req);
req               455 include/crypto/hash.h int crypto_ahash_digest(struct ahash_request *req);
req               468 include/crypto/hash.h static inline int crypto_ahash_export(struct ahash_request *req, void *out)
req               470 include/crypto/hash.h 	return crypto_ahash_reqtfm(req)->export(req, out);
req               484 include/crypto/hash.h static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
req               486 include/crypto/hash.h 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               491 include/crypto/hash.h 	return tfm->import(req, in);
req               505 include/crypto/hash.h static inline int crypto_ahash_init(struct ahash_request *req)
req               507 include/crypto/hash.h 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               512 include/crypto/hash.h 	return tfm->init(req);
req               526 include/crypto/hash.h static inline int crypto_ahash_update(struct ahash_request *req)
req               528 include/crypto/hash.h 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
req               530 include/crypto/hash.h 	unsigned int nbytes = req->nbytes;
req               534 include/crypto/hash.h 	ret = crypto_ahash_reqtfm(req)->update(req);
req               558 include/crypto/hash.h static inline void ahash_request_set_tfm(struct ahash_request *req,
req               561 include/crypto/hash.h 	req->base.tfm = crypto_ahash_tfm(tfm);
req               579 include/crypto/hash.h 	struct ahash_request *req;
req               581 include/crypto/hash.h 	req = kmalloc(sizeof(struct ahash_request) +
req               584 include/crypto/hash.h 	if (likely(req))
req               585 include/crypto/hash.h 		ahash_request_set_tfm(req, tfm);
req               587 include/crypto/hash.h 	return req;
req               594 include/crypto/hash.h static inline void ahash_request_free(struct ahash_request *req)
req               596 include/crypto/hash.h 	kzfree(req);
req               599 include/crypto/hash.h static inline void ahash_request_zero(struct ahash_request *req)
req               601 include/crypto/hash.h 	memzero_explicit(req, sizeof(*req) +
req               602 include/crypto/hash.h 			      crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
req               606 include/crypto/hash.h 	struct crypto_async_request *req)
req               608 include/crypto/hash.h 	return container_of(req, struct ahash_request, base);
req               636 include/crypto/hash.h static inline void ahash_request_set_callback(struct ahash_request *req,
req               641 include/crypto/hash.h 	req->base.complete = compl;
req               642 include/crypto/hash.h 	req->base.data = data;
req               643 include/crypto/hash.h 	req->base.flags = flags;
req               659 include/crypto/hash.h static inline void ahash_request_set_crypt(struct ahash_request *req,
req               663 include/crypto/hash.h 	req->src = src;
req               664 include/crypto/hash.h 	req->nbytes = nbytes;
req               665 include/crypto/hash.h 	req->result = result;
req                16 include/crypto/internal/acompress.h static inline void *acomp_request_ctx(struct acomp_req *req)
req                18 include/crypto/internal/acompress.h 	return req->__ctx;
req                26 include/crypto/internal/acompress.h static inline void acomp_request_complete(struct acomp_req *req,
req                29 include/crypto/internal/acompress.h 	req->base.complete(&req->base, err);
req                39 include/crypto/internal/acompress.h 	struct acomp_req *req;
req                41 include/crypto/internal/acompress.h 	req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
req                42 include/crypto/internal/acompress.h 	if (likely(req))
req                43 include/crypto/internal/acompress.h 		acomp_request_set_tfm(req, tfm);
req                44 include/crypto/internal/acompress.h 	return req;
req                47 include/crypto/internal/acompress.h static inline void __acomp_request_free(struct acomp_req *req)
req                49 include/crypto/internal/acompress.h 	kzfree(req);
req                63 include/crypto/internal/aead.h static inline void *aead_request_ctx(struct aead_request *req)
req                65 include/crypto/internal/aead.h 	return req->__ctx;
req                68 include/crypto/internal/aead.h static inline void aead_request_complete(struct aead_request *req, int err)
req                70 include/crypto/internal/aead.h 	req->base.complete(&req->base, err);
req                73 include/crypto/internal/aead.h static inline u32 aead_request_flags(struct aead_request *req)
req                75 include/crypto/internal/aead.h 	return req->base.flags;
req                79 include/crypto/internal/aead.h 	struct crypto_async_request *req)
req                81 include/crypto/internal/aead.h 	return container_of(req, struct aead_request, base);
req               141 include/crypto/internal/aead.h 	struct crypto_async_request *req;
req               143 include/crypto/internal/aead.h 	req = crypto_dequeue_request(&queue->base);
req               145 include/crypto/internal/aead.h 	return req ? container_of(req, struct aead_request, base) : NULL;
req               150 include/crypto/internal/aead.h 	struct crypto_async_request *req;
req               152 include/crypto/internal/aead.h 	req = crypto_get_backlog(&queue->base);
req               154 include/crypto/internal/aead.h 	return req ? container_of(req, struct aead_request, base) : NULL;
req                31 include/crypto/internal/akcipher.h static inline void *akcipher_request_ctx(struct akcipher_request *req)
req                33 include/crypto/internal/akcipher.h 	return req->__ctx;
req                47 include/crypto/internal/akcipher.h static inline void akcipher_request_complete(struct akcipher_request *req,
req                50 include/crypto/internal/akcipher.h 	req->base.complete(&req->base, err);
req                51 include/crypto/internal/hash.h int crypto_hash_walk_first(struct ahash_request *req,
req                53 include/crypto/internal/hash.h int crypto_ahash_walk_first(struct ahash_request *req,
req               120 include/crypto/internal/hash.h int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
req               121 include/crypto/internal/hash.h int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
req               122 include/crypto/internal/hash.h int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
req               171 include/crypto/internal/hash.h static inline void ahash_request_complete(struct ahash_request *req, int err)
req               173 include/crypto/internal/hash.h 	req->base.complete(&req->base, err);
req               176 include/crypto/internal/hash.h static inline u32 ahash_request_flags(struct ahash_request *req)
req               178 include/crypto/internal/hash.h 	return req->base.flags;
req                16 include/crypto/internal/kpp.h static inline void *kpp_request_ctx(struct kpp_request *req)
req                18 include/crypto/internal/kpp.h 	return req->__ctx;
req                26 include/crypto/internal/kpp.h static inline void kpp_request_complete(struct kpp_request *req, int err)
req                28 include/crypto/internal/kpp.h 	req->base.complete(&req->base, err);
req                93 include/crypto/internal/scompress.h struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
req                94 include/crypto/internal/scompress.h void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
req                86 include/crypto/internal/skcipher.h static inline void skcipher_request_complete(struct skcipher_request *req, int err)
req                88 include/crypto/internal/skcipher.h 	req->base.complete(&req->base, err);
req               138 include/crypto/internal/skcipher.h 		       struct skcipher_request *req,
req               142 include/crypto/internal/skcipher.h 			struct skcipher_request *req);
req               143 include/crypto/internal/skcipher.h int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
req               146 include/crypto/internal/skcipher.h 			       struct aead_request *req, bool atomic);
req               148 include/crypto/internal/skcipher.h 			       struct aead_request *req, bool atomic);
req               156 include/crypto/internal/skcipher.h static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
req               159 include/crypto/internal/skcipher.h 	req->base.complete(&req->base, err);
req               162 include/crypto/internal/skcipher.h static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
req               164 include/crypto/internal/skcipher.h 	return req->base.flags;
req               172 include/crypto/internal/skcipher.h static inline void *skcipher_request_ctx(struct skcipher_request *req)
req               174 include/crypto/internal/skcipher.h 	return req->__ctx;
req               177 include/crypto/internal/skcipher.h static inline u32 skcipher_request_flags(struct skcipher_request *req)
req               179 include/crypto/internal/skcipher.h 	return req->base.flags;
req                74 include/crypto/kpp.h 	int (*generate_public_key)(struct kpp_request *req);
req                75 include/crypto/kpp.h 	int (*compute_shared_secret)(struct kpp_request *req);
req               132 include/crypto/kpp.h static inline void kpp_request_set_tfm(struct kpp_request *req,
req               135 include/crypto/kpp.h 	req->base.tfm = crypto_kpp_tfm(tfm);
req               138 include/crypto/kpp.h static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req)
req               140 include/crypto/kpp.h 	return __crypto_kpp_tfm(req->base.tfm);
req               174 include/crypto/kpp.h 	struct kpp_request *req;
req               176 include/crypto/kpp.h 	req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp);
req               177 include/crypto/kpp.h 	if (likely(req))
req               178 include/crypto/kpp.h 		kpp_request_set_tfm(req, tfm);
req               180 include/crypto/kpp.h 	return req;
req               188 include/crypto/kpp.h static inline void kpp_request_free(struct kpp_request *req)
req               190 include/crypto/kpp.h 	kzfree(req);
req               204 include/crypto/kpp.h static inline void kpp_request_set_callback(struct kpp_request *req,
req               209 include/crypto/kpp.h 	req->base.complete = cmpl;
req               210 include/crypto/kpp.h 	req->base.data = data;
req               211 include/crypto/kpp.h 	req->base.flags = flgs;
req               223 include/crypto/kpp.h static inline void kpp_request_set_input(struct kpp_request *req,
req               227 include/crypto/kpp.h 	req->src = input;
req               228 include/crypto/kpp.h 	req->src_len = input_len;
req               240 include/crypto/kpp.h static inline void kpp_request_set_output(struct kpp_request *req,
req               244 include/crypto/kpp.h 	req->dst = output;
req               245 include/crypto/kpp.h 	req->dst_len = output_len;
req               307 include/crypto/kpp.h static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
req               309 include/crypto/kpp.h 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req               315 include/crypto/kpp.h 	ret = alg->generate_public_key(req);
req               330 include/crypto/kpp.h static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
req               332 include/crypto/kpp.h 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
req               338 include/crypto/kpp.h 	ret = alg->compute_shared_secret(req);
req                22 include/crypto/pcrypt.h static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
req                24 include/crypto/pcrypt.h 	return req->__ctx;
req                28 include/crypto/pcrypt.h struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
req                30 include/crypto/pcrypt.h 	return &req->padata;
req                40 include/crypto/skcipher.h 	int (*encrypt)(struct skcipher_request *req);
req                41 include/crypto/skcipher.h 	int (*decrypt)(struct skcipher_request *req);
req               114 include/crypto/skcipher.h 	int (*encrypt)(struct skcipher_request *req);
req               115 include/crypto/skcipher.h 	int (*decrypt)(struct skcipher_request *req);
req               428 include/crypto/skcipher.h 	struct skcipher_request *req)
req               430 include/crypto/skcipher.h 	return __crypto_skcipher_cast(req->base.tfm);
req               434 include/crypto/skcipher.h 	struct skcipher_request *req)
req               436 include/crypto/skcipher.h 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               452 include/crypto/skcipher.h int crypto_skcipher_encrypt(struct skcipher_request *req);
req               465 include/crypto/skcipher.h int crypto_skcipher_decrypt(struct skcipher_request *req);
req               497 include/crypto/skcipher.h static inline void skcipher_request_set_tfm(struct skcipher_request *req,
req               500 include/crypto/skcipher.h 	req->base.tfm = crypto_skcipher_tfm(tfm);
req               503 include/crypto/skcipher.h static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
req               506 include/crypto/skcipher.h 	skcipher_request_set_tfm(req, &tfm->base);
req               510 include/crypto/skcipher.h 	struct crypto_async_request *req)
req               512 include/crypto/skcipher.h 	return container_of(req, struct skcipher_request, base);
req               529 include/crypto/skcipher.h 	struct skcipher_request *req;
req               531 include/crypto/skcipher.h 	req = kmalloc(sizeof(struct skcipher_request) +
req               534 include/crypto/skcipher.h 	if (likely(req))
req               535 include/crypto/skcipher.h 		skcipher_request_set_tfm(req, tfm);
req               537 include/crypto/skcipher.h 	return req;
req               544 include/crypto/skcipher.h static inline void skcipher_request_free(struct skcipher_request *req)
req               546 include/crypto/skcipher.h 	kzfree(req);
req               549 include/crypto/skcipher.h static inline void skcipher_request_zero(struct skcipher_request *req)
req               551 include/crypto/skcipher.h 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
req               553 include/crypto/skcipher.h 	memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm));
req               581 include/crypto/skcipher.h static inline void skcipher_request_set_callback(struct skcipher_request *req,
req               586 include/crypto/skcipher.h 	req->base.complete = compl;
req               587 include/crypto/skcipher.h 	req->base.data = data;
req               588 include/crypto/skcipher.h 	req->base.flags = flags;
req               608 include/crypto/skcipher.h 	struct skcipher_request *req,
req               612 include/crypto/skcipher.h 	req->src = src;
req               613 include/crypto/skcipher.h 	req->dst = dst;
req               614 include/crypto/skcipher.h 	req->cryptlen = cryptlen;
req               615 include/crypto/skcipher.h 	req->iv = iv;
req               168 include/drm/drm_legacy.h int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
req               169 include/drm/drm_legacy.h int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
req               547 include/linux/acpi.h 					     u32 *mask, u32 req);
req                35 include/linux/adb.h 	int (*send_request)(struct adb_request *req, int sync);
req                55 include/linux/adb.h int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
req                16 include/linux/aio.h void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
req                19 include/linux/aio.h static inline void kiocb_set_cancel_fn(struct kiocb *req,
req               478 include/linux/bcma/bcma.h extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status,
req               380 include/linux/blk_types.h #define req_op(req) \
req               381 include/linux/blk_types.h 	((req)->cmd_flags & REQ_OP_MASK)
req               281 include/linux/blkdev.h static inline unsigned short req_get_ioprio(struct request *req)
req               283 include/linux/blkdev.h 	return req->ioprio;
req               124 include/linux/buffer_head.h BUFFER_FNS(Req, req)
req               125 include/linux/buffer_head.h TAS_BUFFER_FNS(Req, req)
req                55 include/linux/ceph/cls_lock_client.h int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
req               478 include/linux/ceph/osd_client.h int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp);
req               490 include/linux/ceph/osd_client.h extern void ceph_osdc_get_request(struct ceph_osd_request *req);
req               491 include/linux/ceph/osd_client.h extern void ceph_osdc_put_request(struct ceph_osd_request *req);
req               494 include/linux/ceph/osd_client.h 				   struct ceph_osd_request *req,
req               496 include/linux/ceph/osd_client.h extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
req               498 include/linux/ceph/osd_client.h 				  struct ceph_osd_request *req);
req               230 include/linux/clk-provider.h 					  struct clk_rate_request *req);
req               835 include/linux/clk-provider.h 			     struct clk_rate_request *req);
req               836 include/linux/clk-provider.h int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
req               838 include/linux/clk-provider.h 				     struct clk_rate_request *req);
req               840 include/linux/clk-provider.h 				 struct clk_rate_request *req,
req               148 include/linux/crypto.h typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
req               235 include/linux/crypto.h 	int (*encrypt)(struct ablkcipher_request *req);
req               236 include/linux/crypto.h 	int (*decrypt)(struct ablkcipher_request *req);
req               678 include/linux/crypto.h void crypto_req_done(struct crypto_async_request *req, int err);
req               721 include/linux/crypto.h 	int (*encrypt)(struct ablkcipher_request *req);
req               722 include/linux/crypto.h 	int (*decrypt)(struct ablkcipher_request *req);
req              1073 include/linux/crypto.h 	struct ablkcipher_request *req)
req              1075 include/linux/crypto.h 	return __crypto_ablkcipher_cast(req->base.tfm);
req              1089 include/linux/crypto.h static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
req              1092 include/linux/crypto.h 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
req              1094 include/linux/crypto.h 	unsigned int nbytes = req->nbytes;
req              1098 include/linux/crypto.h 	ret = crt->encrypt(req);
req              1114 include/linux/crypto.h static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
req              1117 include/linux/crypto.h 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
req              1119 include/linux/crypto.h 	unsigned int nbytes = req->nbytes;
req              1123 include/linux/crypto.h 	ret = crt->decrypt(req);
req              1160 include/linux/crypto.h 	struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
req              1162 include/linux/crypto.h 	req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
req              1166 include/linux/crypto.h 	struct crypto_async_request *req)
req              1168 include/linux/crypto.h 	return container_of(req, struct ablkcipher_request, base);
req              1185 include/linux/crypto.h 	struct ablkcipher_request *req;
req              1187 include/linux/crypto.h 	req = kmalloc(sizeof(struct ablkcipher_request) +
req              1190 include/linux/crypto.h 	if (likely(req))
req              1191 include/linux/crypto.h 		ablkcipher_request_set_tfm(req, tfm);
req              1193 include/linux/crypto.h 	return req;
req              1200 include/linux/crypto.h static inline void ablkcipher_request_free(struct ablkcipher_request *req)
req              1202 include/linux/crypto.h 	kzfree(req);
req              1231 include/linux/crypto.h 	struct ablkcipher_request *req,
req              1234 include/linux/crypto.h 	req->base.complete = compl;
req              1235 include/linux/crypto.h 	req->base.data = data;
req              1236 include/linux/crypto.h 	req->base.flags = flags;
req              1256 include/linux/crypto.h 	struct ablkcipher_request *req,
req              1260 include/linux/crypto.h 	req->src = src;
req              1261 include/linux/crypto.h 	req->dst = dst;
req              1262 include/linux/crypto.h 	req->nbytes = nbytes;
req              1263 include/linux/crypto.h 	req->info = iv;
req                16 include/linux/cuda.h extern int cuda_request(struct adb_request *req,
req               173 include/linux/dccp.h static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
req               175 include/linux/dccp.h 	return (struct dccp_request_sock *)req;
req               324 include/linux/dccp.h extern void dccp_syn_ack_timeout(const struct request_sock *req);
req                40 include/linux/greybus/hd.h 	int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
req                79 include/linux/greybus/hd.h int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
req               449 include/linux/i3c/master.h 			   const struct i3c_ibi_setup *req);
req               643 include/linux/i3c/master.h 			   const struct i3c_ibi_setup *req);
req                18 include/linux/inet_diag.h 				    const struct inet_diag_req_v2 *req);
req                32 include/linux/inet_diag.h 				   const struct inet_diag_req_v2 *req);
req                40 include/linux/inet_diag.h 		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
req                50 include/linux/inet_diag.h 			    const struct inet_diag_req_v2 *req);
req                54 include/linux/inet_diag.h 				     const struct inet_diag_req_v2 *req);
req               796 include/linux/kvm_host.h bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
req               798 include/linux/kvm_host.h bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
req              1200 include/linux/kvm_host.h static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
req              1207 include/linux/kvm_host.h 	set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
req              1215 include/linux/kvm_host.h static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
req              1217 include/linux/kvm_host.h 	return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
req              1220 include/linux/kvm_host.h static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
req              1222 include/linux/kvm_host.h 	clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
req              1225 include/linux/kvm_host.h static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
req              1227 include/linux/kvm_host.h 	if (kvm_test_request(req, vcpu)) {
req              1228 include/linux/kvm_host.h 		kvm_clear_request(req, vcpu);
req               216 include/linux/lockd/lockd.h int		  nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
req              1738 include/linux/lsm_hooks.h 					struct request_sock *req);
req              1740 include/linux/lsm_hooks.h 				const struct request_sock *req);
req              1745 include/linux/lsm_hooks.h 	void (*req_classify_flow)(const struct request_sock *req,
req               135 include/linux/mlx5/vport.h 				       struct mlx5_hca_vport_context *req);
req                91 include/linux/mmc/host.h 	void	(*post_req)(struct mmc_host *host, struct mmc_request *req,
req                93 include/linux/mmc/host.h 	void	(*pre_req)(struct mmc_host *host, struct mmc_request *req);
req                94 include/linux/mmc/host.h 	void	(*request)(struct mmc_host *host, struct mmc_request *req);
req               489 include/linux/mtd/mtd.h 				       struct erase_info *req)
req               496 include/linux/mtd/mtd.h 	mod = mtd_mod_by_eb(req->addr, mtd);
req               498 include/linux/mtd/mtd.h 		req->addr -= mod;
req               499 include/linux/mtd/mtd.h 		req->len += mod;
req               502 include/linux/mtd/mtd.h 	mod = mtd_mod_by_eb(req->addr + req->len, mtd);
req               504 include/linux/mtd/mtd.h 		req->len += mtd->erasesize - mod;
req               200 include/linux/mtd/nand.h 	struct nand_page_io_req req;
req               627 include/linux/mtd/nand.h 					loff_t offs, struct mtd_oob_ops *req,
req               632 include/linux/mtd/nand.h 	iter->req.mode = req->mode;
req               633 include/linux/mtd/nand.h 	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
req               634 include/linux/mtd/nand.h 	iter->req.ooboffs = req->ooboffs;
req               635 include/linux/mtd/nand.h 	iter->oobbytes_per_page = mtd_oobavail(mtd, req);
req               636 include/linux/mtd/nand.h 	iter->dataleft = req->len;
req               637 include/linux/mtd/nand.h 	iter->oobleft = req->ooblen;
req               638 include/linux/mtd/nand.h 	iter->req.databuf.in = req->datbuf;
req               639 include/linux/mtd/nand.h 	iter->req.datalen = min_t(unsigned int,
req               640 include/linux/mtd/nand.h 				  nand->memorg.pagesize - iter->req.dataoffs,
req               642 include/linux/mtd/nand.h 	iter->req.oobbuf.in = req->oobbuf;
req               643 include/linux/mtd/nand.h 	iter->req.ooblen = min_t(unsigned int,
req               644 include/linux/mtd/nand.h 				 iter->oobbytes_per_page - iter->req.ooboffs,
req               658 include/linux/mtd/nand.h 	nanddev_pos_next_page(nand, &iter->req.pos);
req               659 include/linux/mtd/nand.h 	iter->dataleft -= iter->req.datalen;
req               660 include/linux/mtd/nand.h 	iter->req.databuf.in += iter->req.datalen;
req               661 include/linux/mtd/nand.h 	iter->oobleft -= iter->req.ooblen;
req               662 include/linux/mtd/nand.h 	iter->req.oobbuf.in += iter->req.ooblen;
req               663 include/linux/mtd/nand.h 	iter->req.dataoffs = 0;
req               664 include/linux/mtd/nand.h 	iter->req.ooboffs = 0;
req               665 include/linux/mtd/nand.h 	iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
req               667 include/linux/mtd/nand.h 	iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
req               701 include/linux/mtd/nand.h #define nanddev_io_for_each_page(nand, start, req, iter)		\
req               702 include/linux/mtd/nand.h 	for (nanddev_io_iter_init(nand, start, req, iter);		\
req               114 include/linux/nfs_page.h #define NFS_WBACK_BUSY(req)	(test_bit(PG_BUSY,&(req)->wb_flags))
req               138 include/linux/nfs_page.h 				struct nfs_page *req);
req               140 include/linux/nfs_page.h extern	void nfs_unlock_request(struct nfs_page *req);
req               151 include/linux/nfs_page.h nfs_lock_request(struct nfs_page *req)
req               153 include/linux/nfs_page.h 	return !test_and_set_bit(PG_BUSY, &req->wb_flags);
req               162 include/linux/nfs_page.h nfs_list_add_request(struct nfs_page *req, struct list_head *head)
req               164 include/linux/nfs_page.h 	list_add_tail(&req->wb_list, head);
req               173 include/linux/nfs_page.h nfs_list_move_request(struct nfs_page *req, struct list_head *head)
req               175 include/linux/nfs_page.h 	list_move_tail(&req->wb_list, head);
req               183 include/linux/nfs_page.h nfs_list_remove_request(struct nfs_page *req)
req               185 include/linux/nfs_page.h 	if (list_empty(&req->wb_list))
req               187 include/linux/nfs_page.h 	list_del_init(&req->wb_list);
req               197 include/linux/nfs_page.h loff_t req_offset(struct nfs_page *req)
req               199 include/linux/nfs_page.h 	return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
req               203 include/linux/nfs_page.h nfs_req_openctx(struct nfs_page *req)
req               205 include/linux/nfs_page.h 	return req->wb_lock_context->open_context;
req              1511 include/linux/nfs_xdr.h 	struct nfs_page		*req;
req                84 include/linux/nvme-fc-driver.h 	void (*done)(struct nvmefc_ls_req *req, int status);
req               149 include/linux/nvme-fc-driver.h 	void (*done)(struct nvmefc_fcp_req *req);
req               518 include/linux/nvme-fc-driver.h 	void (*done)(struct nvmefc_tgt_ls_req *req);
req               109 include/linux/pm_qos.h static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
req               111 include/linux/pm_qos.h 	return req->dev != NULL;
req               117 include/linux/pm_qos.h 			 struct pm_qos_flags_request *req,
req               119 include/linux/pm_qos.h void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
req               121 include/linux/pm_qos.h void pm_qos_update_request(struct pm_qos_request *req,
req               123 include/linux/pm_qos.h void pm_qos_update_request_timeout(struct pm_qos_request *req,
req               125 include/linux/pm_qos.h void pm_qos_remove_request(struct pm_qos_request *req);
req               130 include/linux/pm_qos.h int pm_qos_request_active(struct pm_qos_request *req);
req               138 include/linux/pm_qos.h int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
req               140 include/linux/pm_qos.h int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
req               141 include/linux/pm_qos.h int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
req               151 include/linux/pm_qos.h 				    struct dev_pm_qos_request *req,
req               201 include/linux/pm_qos.h 					 struct dev_pm_qos_request *req,
req               205 include/linux/pm_qos.h static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
req               208 include/linux/pm_qos.h static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
req               227 include/linux/pm_qos.h 						  struct dev_pm_qos_request *req,
req               279 include/linux/pm_qos.h static inline int freq_qos_request_active(struct freq_qos_request *req)
req               281 include/linux/pm_qos.h 	return !IS_ERR_OR_NULL(req->qos);
req               290 include/linux/pm_qos.h 			 struct freq_qos_request *req,
req               292 include/linux/pm_qos.h int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
req               293 include/linux/pm_qos.h int freq_qos_remove_request(struct freq_qos_request *req);
req                18 include/linux/pmu.h extern int pmu_request(struct adb_request *req,
req                20 include/linux/pmu.h extern int pmu_queue_request(struct adb_request *req);
req                23 include/linux/pmu.h extern void pmu_wait_complete(struct adb_request *req);
req                42 include/linux/qcom_scm.h extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
req                79 include/linux/qcom_scm.h static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
req              1305 include/linux/security.h void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
req              1308 include/linux/security.h 			struct sk_buff *skb, struct request_sock *req);
req              1310 include/linux/security.h 			const struct request_sock *req);
req              1460 include/linux/security.h static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
req              1469 include/linux/security.h 			struct sk_buff *skb, struct request_sock *req)
req              1475 include/linux/security.h 			const struct request_sock *req)
req                36 include/linux/sunrpc/bc_xprt.h void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
req                37 include/linux/sunrpc/bc_xprt.h void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
req                38 include/linux/sunrpc/bc_xprt.h void xprt_free_bc_request(struct rpc_rqst *req);
req                45 include/linux/sunrpc/bc_xprt.h void xprt_free_bc_rqst(struct rpc_rqst *req);
req                81 include/linux/sunrpc/bc_xprt.h static inline void xprt_free_bc_request(struct rpc_rqst *req)
req               128 include/linux/sunrpc/cache.h 	struct cache_deferred_req *(*defer)(struct cache_req *req);
req               142 include/linux/sunrpc/cache.h 	void			(*revisit)(struct cache_deferred_req *req,
req               179 include/linux/sunrpc/clnt.h void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
req               219 include/linux/sunrpc/sched.h struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
req               138 include/linux/sunrpc/xprt.h 				     struct rpc_rqst *req);
req               144 include/linux/sunrpc/xprt.h 	void		(*prepare_request)(struct rpc_rqst *req);
req               145 include/linux/sunrpc/xprt.h 	int		(*send_request)(struct rpc_rqst *req);
req               301 include/linux/sunrpc/xprt.h static inline int bc_prealloc(struct rpc_rqst *req)
req               303 include/linux/sunrpc/xprt.h 	return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
req               306 include/linux/sunrpc/xprt.h static inline int bc_prealloc(struct rpc_rqst *req)
req               349 include/linux/sunrpc/xprt.h 				       struct rpc_rqst *req);
req               350 include/linux/sunrpc/xprt.h void			xprt_request_prepare(struct rpc_rqst *req);
req               359 include/linux/sunrpc/xprt.h int			xprt_adjust_timeout(struct rpc_rqst *req);
req               397 include/linux/sunrpc/xprt.h void			xprt_pin_rqst(struct rpc_rqst *req);
req               398 include/linux/sunrpc/xprt.h void			xprt_unpin_rqst(struct rpc_rqst *req);
req               400 include/linux/sunrpc/xprt.h bool			xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
req               118 include/linux/tcp.h 	struct inet_request_sock 	req;
req               133 include/linux/tcp.h static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
req               135 include/linux/tcp.h 	return (struct tcp_request_sock *)req;
req               463 include/linux/tcp.h 				struct request_sock *req)
req               465 include/linux/tcp.h 	tp->saved_syn = req->saved_syn;
req               466 include/linux/tcp.h 	req->saved_syn = NULL;
req               477 include/linux/usb/composite.h 	struct usb_request		*req;
req               113 include/linux/usb/gadget.h 					struct usb_request *req);
req               140 include/linux/usb/gadget.h 	void (*free_request) (struct usb_ep *ep, struct usb_request *req);
req               142 include/linux/usb/gadget.h 	int (*queue) (struct usb_ep *ep, struct usb_request *req,
req               144 include/linux/usb/gadget.h 	int (*dequeue) (struct usb_ep *ep, struct usb_request *req);
req               248 include/linux/usb/gadget.h void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req);
req               249 include/linux/usb/gadget.h int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags);
req               250 include/linux/usb/gadget.h int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
req               268 include/linux/usb/gadget.h 		struct usb_request *req)
req               270 include/linux/usb/gadget.h static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req,
req               273 include/linux/usb/gadget.h static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
req               819 include/linux/usb/gadget.h 		struct usb_request *req, int is_in);
req               821 include/linux/usb/gadget.h 		struct usb_request *req, int is_in);
req               824 include/linux/usb/gadget.h 		struct usb_request *req, int is_in);
req               826 include/linux/usb/gadget.h 		struct usb_request *req, int is_in);
req               829 include/linux/usb/gadget.h 		struct usb_request *req, int is_in) { return -ENOSYS; }
req               831 include/linux/usb/gadget.h 		struct usb_request *req, int is_in) { return -ENOSYS; }
req               834 include/linux/usb/gadget.h 		struct usb_request *req, int is_in) { }
req               836 include/linux/usb/gadget.h 		struct usb_request *req, int is_in) { }
req               857 include/linux/usb/gadget.h 		struct usb_request *req);
req               220 include/media/dvb_vb2.h int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req);
req                69 include/media/media-device.h 	void (*req_free)(struct media_request *req);
req                70 include/media/media-device.h 	int (*req_validate)(struct media_request *req);
req                71 include/media/media-device.h 	void (*req_queue)(struct media_request *req);
req                87 include/media/media-request.h media_request_lock_for_access(struct media_request *req)
req                92 include/media/media-request.h 	spin_lock_irqsave(&req->lock, flags);
req                93 include/media/media-request.h 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
req                94 include/media/media-request.h 		req->access_count++;
req                97 include/media/media-request.h 	spin_unlock_irqrestore(&req->lock, flags);
req               111 include/media/media-request.h static inline void media_request_unlock_for_access(struct media_request *req)
req               115 include/media/media-request.h 	spin_lock_irqsave(&req->lock, flags);
req               116 include/media/media-request.h 	if (!WARN_ON(!req->access_count))
req               117 include/media/media-request.h 		req->access_count--;
req               118 include/media/media-request.h 	spin_unlock_irqrestore(&req->lock, flags);
req               132 include/media/media-request.h media_request_lock_for_update(struct media_request *req)
req               137 include/media/media-request.h 	spin_lock_irqsave(&req->lock, flags);
req               138 include/media/media-request.h 	if (req->state == MEDIA_REQUEST_STATE_IDLE ||
req               139 include/media/media-request.h 	    req->state == MEDIA_REQUEST_STATE_UPDATING) {
req               140 include/media/media-request.h 		req->state = MEDIA_REQUEST_STATE_UPDATING;
req               141 include/media/media-request.h 		req->updating_count++;
req               145 include/media/media-request.h 	spin_unlock_irqrestore(&req->lock, flags);
req               159 include/media/media-request.h static inline void media_request_unlock_for_update(struct media_request *req)
req               163 include/media/media-request.h 	spin_lock_irqsave(&req->lock, flags);
req               164 include/media/media-request.h 	WARN_ON(req->updating_count <= 0);
req               165 include/media/media-request.h 	if (!--req->updating_count)
req               166 include/media/media-request.h 		req->state = MEDIA_REQUEST_STATE_IDLE;
req               167 include/media/media-request.h 	spin_unlock_irqrestore(&req->lock, flags);
req               177 include/media/media-request.h static inline void media_request_get(struct media_request *req)
req               179 include/media/media-request.h 	kref_get(&req->kref);
req               190 include/media/media-request.h void media_request_put(struct media_request *req);
req               223 include/media/media-request.h static inline void media_request_get(struct media_request *req)
req               227 include/media/media-request.h static inline void media_request_put(struct media_request *req)
req               272 include/media/media-request.h 	struct media_request *req;
req               318 include/media/media-request.h media_request_object_find(struct media_request *req,
req               356 include/media/media-request.h int media_request_object_bind(struct media_request *req,
req               383 include/media/media-request.h media_request_lock_for_access(struct media_request *req)
req               388 include/media/media-request.h static inline void media_request_unlock_for_access(struct media_request *req)
req               393 include/media/media-request.h media_request_lock_for_update(struct media_request *req)
req               398 include/media/media-request.h static inline void media_request_unlock_for_update(struct media_request *req)
req               411 include/media/media-request.h media_request_object_find(struct media_request *req,
req               421 include/media/media-request.h 	obj->req = NULL;
req               424 include/media/media-request.h static inline int media_request_object_bind(struct media_request *req,
req               294 include/media/v4l2-ctrls.h 	struct v4l2_ctrl_ref *req;
req              1138 include/media/v4l2-ctrls.h int v4l2_ctrl_request_setup(struct media_request *req,
req              1155 include/media/v4l2-ctrls.h void v4l2_ctrl_request_complete(struct media_request *req,
req              1176 include/media/v4l2-ctrls.h struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
req               649 include/media/v4l2-mem2mem.h void v4l2_m2m_request_queue(struct media_request *req);
req               200 include/media/videobuf-core.h 		     struct v4l2_requestbuffers *req);
req               815 include/media/videobuf2-core.h 		  struct media_request *req);
req              1223 include/media/videobuf2-core.h unsigned int vb2_request_buffer_cnt(struct media_request *req);
req                84 include/media/videobuf2-v4l2.h int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
req               323 include/media/videobuf2-v4l2.h int vb2_request_validate(struct media_request *req);
req               324 include/media/videobuf2-v4l2.h void vb2_request_queue(struct media_request *req);
req               235 include/net/9p/client.h void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
req                45 include/net/9p/transport.h 	int (*request) (struct p9_client *, struct p9_req_t *req);
req                46 include/net/9p/transport.h 	int (*cancel) (struct p9_client *, struct p9_req_t *req);
req                47 include/net/9p/transport.h 	int (*cancelled)(struct p9_client *, struct p9_req_t *req);
req              3734 include/net/cfg80211.h 			struct cfg80211_auth_request *req);
req              3736 include/net/cfg80211.h 			 struct cfg80211_assoc_request *req);
req              3738 include/net/cfg80211.h 			  struct cfg80211_deauth_request *req);
req              3740 include/net/cfg80211.h 			    struct cfg80211_disassoc_request *req);
req              7422 include/net/cfg80211.h 			  struct cfg80211_pmsr_request *req,
req              7436 include/net/cfg80211.h 			    struct cfg80211_pmsr_request *req,
req               201 include/net/cipso_ipv4.h int cipso_v4_req_setattr(struct request_sock *req,
req               204 include/net/cipso_ipv4.h void cipso_v4_req_delattr(struct request_sock *req);
req               244 include/net/cipso_ipv4.h static inline int cipso_v4_req_setattr(struct request_sock *req,
req               251 include/net/cipso_ipv4.h static inline void cipso_v4_req_delattr(struct request_sock *req)
req               705 include/net/devlink.h 	int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
req               891 include/net/devlink.h int devlink_info_serial_number_put(struct devlink_info_req *req,
req               893 include/net/devlink.h int devlink_info_driver_name_put(struct devlink_info_req *req,
req               895 include/net/devlink.h int devlink_info_version_fixed_put(struct devlink_info_req *req,
req               898 include/net/devlink.h int devlink_info_version_stored_put(struct devlink_info_req *req,
req               901 include/net/devlink.h int devlink_info_version_running_put(struct devlink_info_req *req,
req                84 include/net/dn_fib.h 			struct netlink_skb_parms *req);
req                87 include/net/dn_fib.h 			struct netlink_skb_parms *req);
req                20 include/net/inet6_connection_sock.h 				      const struct request_sock *req, u8 proto);
req                40 include/net/inet_connection_sock.h 				      struct request_sock *req,
req               160 include/net/inet_connection_sock.h 				 const struct request_sock *req,
req               258 include/net/inet_connection_sock.h 				     const struct request_sock *req);
req               261 include/net/inet_connection_sock.h 					    const struct request_sock *req);
req               264 include/net/inet_connection_sock.h 				      struct request_sock *req,
req               266 include/net/inet_connection_sock.h void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
req               269 include/net/inet_connection_sock.h 					 struct request_sock *req,
req               287 include/net/inet_connection_sock.h void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
req               288 include/net/inet_connection_sock.h void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
req                69 include/net/inet_sock.h 	struct request_sock	req;
req                70 include/net/inet_sock.h #define ir_loc_addr		req.__req_common.skc_rcv_saddr
req                71 include/net/inet_sock.h #define ir_rmt_addr		req.__req_common.skc_daddr
req                72 include/net/inet_sock.h #define ir_num			req.__req_common.skc_num
req                73 include/net/inet_sock.h #define ir_rmt_port		req.__req_common.skc_dport
req                74 include/net/inet_sock.h #define ir_v6_rmt_addr		req.__req_common.skc_v6_daddr
req                75 include/net/inet_sock.h #define ir_v6_loc_addr		req.__req_common.skc_v6_rcv_saddr
req                76 include/net/inet_sock.h #define ir_iif			req.__req_common.skc_bound_dev_if
req                77 include/net/inet_sock.h #define ir_cookie		req.__req_common.skc_cookie
req                78 include/net/inet_sock.h #define ireq_net		req.__req_common.skc_net
req                79 include/net/inet_sock.h #define ireq_state		req.__req_common.skc_state
req                80 include/net/inet_sock.h #define ireq_family		req.__req_common.skc_family
req              2532 include/net/mac80211.h 	struct cfg80211_scan_request req;
req              3809 include/net/mac80211.h 		       struct ieee80211_scan_request *req);
req              3814 include/net/mac80211.h 				struct cfg80211_sched_scan_request *req,
req               249 include/net/netlabel.h 	int (*req_setattr)(struct request_sock *req,
req               252 include/net/netlabel.h 	void (*req_delattr)(struct request_sock *req);
req               481 include/net/netlabel.h int netlbl_req_setattr(struct request_sock *req,
req               483 include/net/netlabel.h void netlbl_req_delattr(struct request_sock *req);
req               636 include/net/netlabel.h static inline int netlbl_req_setattr(struct request_sock *req,
req               641 include/net/netlabel.h static inline void netlbl_req_delattr(struct request_sock *req)
req               278 include/net/nfc/nci_core.h 		void (*req)(struct nci_dev *ndev,
req                33 include/net/request_sock.h 				       struct request_sock *req);
req                35 include/net/request_sock.h 				    struct request_sock *req);
req                38 include/net/request_sock.h 	void		(*destructor)(struct request_sock *req);
req                39 include/net/request_sock.h 	void		(*syn_ack_timeout)(const struct request_sock *req);
req                42 include/net/request_sock.h int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
req                73 include/net/request_sock.h static inline struct sock *req_to_sk(struct request_sock *req)
req                75 include/net/request_sock.h 	return (struct sock *)req;
req                82 include/net/request_sock.h 	struct request_sock *req;
req                84 include/net/request_sock.h 	req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
req                85 include/net/request_sock.h 	if (!req)
req                87 include/net/request_sock.h 	req->rsk_listener = NULL;
req                90 include/net/request_sock.h 			kmem_cache_free(ops->slab, req);
req                93 include/net/request_sock.h 		req->rsk_listener = sk_listener;
req                95 include/net/request_sock.h 	req->rsk_ops = ops;
req                96 include/net/request_sock.h 	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
req                97 include/net/request_sock.h 	sk_node_init(&req_to_sk(req)->sk_node);
req                98 include/net/request_sock.h 	sk_tx_queue_clear(req_to_sk(req));
req                99 include/net/request_sock.h 	req->saved_syn = NULL;
req               100 include/net/request_sock.h 	req->num_timeout = 0;
req               101 include/net/request_sock.h 	req->num_retrans = 0;
req               102 include/net/request_sock.h 	req->sk = NULL;
req               103 include/net/request_sock.h 	refcount_set(&req->rsk_refcnt, 0);
req               105 include/net/request_sock.h 	return req;
req               108 include/net/request_sock.h static inline void __reqsk_free(struct request_sock *req)
req               110 include/net/request_sock.h 	req->rsk_ops->destructor(req);
req               111 include/net/request_sock.h 	if (req->rsk_listener)
req               112 include/net/request_sock.h 		sock_put(req->rsk_listener);
req               113 include/net/request_sock.h 	kfree(req->saved_syn);
req               114 include/net/request_sock.h 	kmem_cache_free(req->rsk_ops->slab, req);
req               117 include/net/request_sock.h static inline void reqsk_free(struct request_sock *req)
req               119 include/net/request_sock.h 	WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
req               120 include/net/request_sock.h 	__reqsk_free(req);
req               123 include/net/request_sock.h static inline void reqsk_put(struct request_sock *req)
req               125 include/net/request_sock.h 	if (refcount_dec_and_test(&req->rsk_refcnt))
req               126 include/net/request_sock.h 		reqsk_free(req);
req               183 include/net/request_sock.h void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
req               194 include/net/request_sock.h 	struct request_sock *req;
req               197 include/net/request_sock.h 	req = queue->rskq_accept_head;
req               198 include/net/request_sock.h 	if (req) {
req               200 include/net/request_sock.h 		WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
req               205 include/net/request_sock.h 	return req;
req               209 include/net/request_sock.h 				       const struct request_sock *req)
req               211 include/net/request_sock.h 	if (req->num_timeout == 0)
req               378 include/net/tcp.h 			   struct request_sock *req, bool fastopen,
req               388 include/net/tcp.h bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
req               403 include/net/tcp.h void tcp_syn_ack_timeout(const struct request_sock *req);
req               436 include/net/tcp.h 				      struct request_sock *req,
req               440 include/net/tcp.h 				  struct request_sock *req,
req               453 include/net/tcp.h 				struct request_sock *req,
req               464 include/net/tcp.h 				 struct request_sock *req,
req               551 include/net/tcp.h u64 cookie_init_timestamp(struct request_sock *req);
req               602 include/net/tcp.h void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
req              1417 include/net/tcp.h extern void tcp_openreq_init_rwin(struct request_sock *req,
req              1655 include/net/tcp.h 			      struct request_sock *req,
req              1955 include/net/tcp.h int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
req              1986 include/net/tcp.h 	void (*init_req)(struct request_sock *req,
req              1994 include/net/tcp.h 				       const struct request_sock *req);
req              1998 include/net/tcp.h 			   struct flowi *fl, struct request_sock *req,
req                68 include/scsi/scsi_cmnd.h 	struct scsi_request req;
req               195 include/scsi/scsi_cmnd.h 	cmd->req.resid_len = resid;
req               200 include/scsi/scsi_cmnd.h 	return cmd->req.resid_len;
req                70 include/scsi/scsi_dh.h static inline int scsi_dh_activate(struct request_queue *req,
req                76 include/scsi/scsi_dh.h static inline int scsi_dh_attach(struct request_queue *req, const char *name)
req                85 include/scsi/scsi_dh.h static inline int scsi_dh_set_params(struct request_queue *req, const char *params)
req                25 include/scsi/scsi_request.h static inline void scsi_req_free_cmd(struct scsi_request *req)
req                27 include/scsi/scsi_request.h 	if (req->cmd != req->__cmd)
req                28 include/scsi/scsi_request.h 		kfree(req->cmd);
req                31 include/scsi/scsi_request.h void scsi_req_init(struct scsi_request *req);
req                25 include/scsi/scsi_tcq.h 	struct request *req = NULL;
req                33 include/scsi/scsi_tcq.h 		req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
req                37 include/scsi/scsi_tcq.h 	if (!req)
req                39 include/scsi/scsi_tcq.h 	return blk_mq_rq_to_pdu(req);
req               717 include/target/iscsi/iscsi_target_core.h 	char req[ISCSI_HDR_LEN];
req               246 include/trace/events/hswadsp.h 		struct sst_hsw_audio_data_format_ipc *req),
req               248 include/trace/events/hswadsp.h 	TP_ARGS(stream, req),
req               263 include/trace/events/hswadsp.h 		__entry->frequency = req->frequency;
req               264 include/trace/events/hswadsp.h 		__entry->bitdepth = req->bitdepth;
req               265 include/trace/events/hswadsp.h 		__entry->map = req->map;
req               266 include/trace/events/hswadsp.h 		__entry->config = req->config;
req               267 include/trace/events/hswadsp.h 		__entry->style = req->style;
req               268 include/trace/events/hswadsp.h 		__entry->ch_num = req->ch_num;
req               269 include/trace/events/hswadsp.h 		__entry->valid_bit = req->valid_bit;
req               282 include/trace/events/hswadsp.h 		struct sst_hsw_ipc_stream_alloc_req *req),
req               284 include/trace/events/hswadsp.h 	TP_ARGS(stream, req),
req               295 include/trace/events/hswadsp.h 		__entry->path_id = req->path_id;
req               296 include/trace/events/hswadsp.h 		__entry->stream_type = req->stream_type;
req               297 include/trace/events/hswadsp.h 		__entry->format_id = req->format_id;
req               308 include/trace/events/hswadsp.h 		struct sst_hsw_ipc_stream_free_req *req),
req               310 include/trace/events/hswadsp.h 	TP_ARGS(stream, req),
req               319 include/trace/events/hswadsp.h 		__entry->stream_id = req->stream_id;
req               329 include/trace/events/hswadsp.h 		struct sst_hsw_ipc_volume_req *req),
req               331 include/trace/events/hswadsp.h 	TP_ARGS(stream, req),
req               343 include/trace/events/hswadsp.h 		__entry->channel = req->channel;
req               344 include/trace/events/hswadsp.h 		__entry->target_volume = req->target_volume;
req               345 include/trace/events/hswadsp.h 		__entry->curve_duration = req->curve_duration;
req               346 include/trace/events/hswadsp.h 		__entry->curve_type = req->curve_type;
req               358 include/trace/events/hswadsp.h 	TP_PROTO(struct sst_hsw_ipc_device_config_req *req),
req               360 include/trace/events/hswadsp.h 	TP_ARGS(req),
req               370 include/trace/events/hswadsp.h 		__entry->ssp = req->ssp_interface;
req               371 include/trace/events/hswadsp.h 		__entry->clock_freq = req->clock_frequency;
req               372 include/trace/events/hswadsp.h 		__entry->mode = req->mode;
req               373 include/trace/events/hswadsp.h 		__entry->clock_divider = req->clock_divider;
req                12 include/trace/events/nbd.h 	TP_PROTO(struct request *req, u64 handle),
req                14 include/trace/events/nbd.h 	TP_ARGS(req, handle),
req                17 include/trace/events/nbd.h 		__field(struct request *, req)
req                22 include/trace/events/nbd.h 		__entry->req = req;
req                28 include/trace/events/nbd.h 		__entry->req,
req                35 include/trace/events/nbd.h 	TP_PROTO(struct request *req, u64 handle),
req                37 include/trace/events/nbd.h 	TP_ARGS(req, handle)
req                42 include/trace/events/nbd.h 	TP_PROTO(struct request *req, u64 handle),
req                44 include/trace/events/nbd.h 	TP_ARGS(req, handle)
req                49 include/trace/events/nbd.h 	TP_PROTO(struct request *req, u64 handle),
req                51 include/trace/events/nbd.h 	TP_ARGS(req, handle)
req                56 include/trace/events/nbd.h 	TP_PROTO(struct request *req, u64 handle),
req                58 include/trace/events/nbd.h 	TP_ARGS(req, handle)
req               278 include/trace/events/rpcrdma.h 		__field(const void *, req)
req               284 include/trace/events/rpcrdma.h 		__entry->req = rpcr_to_rdmar(rqst);
req               290 include/trace/events/rpcrdma.h 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
req               474 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req
req               477 include/trace/events/rpcrdma.h 	TP_ARGS(req),
req               480 include/trace/events/rpcrdma.h 		__field(const void *, req)
req               487 include/trace/events/rpcrdma.h 		const struct rpc_rqst *rqst = &req->rl_slot;
req               489 include/trace/events/rpcrdma.h 		__entry->req = req;
req               497 include/trace/events/rpcrdma.h 		__entry->req
req               503 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req
req               506 include/trace/events/rpcrdma.h 	TP_ARGS(req),
req               509 include/trace/events/rpcrdma.h 		__field(const void *, req)
req               516 include/trace/events/rpcrdma.h 		const struct rpc_rqst *rqst = &req->rl_slot;
req               518 include/trace/events/rpcrdma.h 		__entry->req = req;
req               526 include/trace/events/rpcrdma.h 		__entry->req
req               550 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req,
req               555 include/trace/events/rpcrdma.h 	TP_ARGS(req, rtype, wtype),
req               570 include/trace/events/rpcrdma.h 		const struct rpc_rqst *rqst = &req->rl_slot;
req               575 include/trace/events/rpcrdma.h 		__entry->hdrlen = req->rl_hdrbuf.len;
req               648 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req,
req               652 include/trace/events/rpcrdma.h 	TP_ARGS(req, status),
req               655 include/trace/events/rpcrdma.h 		__field(const void *, req)
req               664 include/trace/events/rpcrdma.h 		const struct rpc_rqst *rqst = &req->rl_slot;
req               669 include/trace/events/rpcrdma.h 		__entry->req = req;
req               670 include/trace/events/rpcrdma.h 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
req               671 include/trace/events/rpcrdma.h 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
req               678 include/trace/events/rpcrdma.h 		__entry->req, __entry->num_sge,
req               740 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req,
req               744 include/trace/events/rpcrdma.h 	TP_ARGS(req, status),
req               747 include/trace/events/rpcrdma.h 		__field(const void *, req)
req               753 include/trace/events/rpcrdma.h 		__entry->req = req;
req               755 include/trace/events/rpcrdma.h 		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
req               759 include/trace/events/rpcrdma.h 		__entry->req, __entry->xid, __entry->status
req               776 include/trace/events/rpcrdma.h 		__field(const void *, req)
req               783 include/trace/events/rpcrdma.h 		__entry->req = sc->sc_req;
req               790 include/trace/events/rpcrdma.h 		__entry->req, __entry->unmap_count,
req               984 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req,
req               988 include/trace/events/rpcrdma.h 	TP_ARGS(task, rep, req, credits),
req               994 include/trace/events/rpcrdma.h 		__field(const void *, req)
req              1003 include/trace/events/rpcrdma.h 		__entry->req = req;
req              1010 include/trace/events/rpcrdma.h 		__entry->credits, __entry->rep, __entry->req
req              1146 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req
req              1149 include/trace/events/rpcrdma.h 	TP_ARGS(task, req),
req              1154 include/trace/events/rpcrdma.h 		__field(const void *, req)
req              1162 include/trace/events/rpcrdma.h 		__entry->req = req;
req              1169 include/trace/events/rpcrdma.h 		__entry->req, __entry->callsize, __entry->rcvsize
req              1176 include/trace/events/rpcrdma.h 		const struct rpcrdma_req *req
req              1179 include/trace/events/rpcrdma.h 	TP_ARGS(task, req),
req              1184 include/trace/events/rpcrdma.h 		__field(const void *, req)
req              1191 include/trace/events/rpcrdma.h 		__entry->req = req;
req              1192 include/trace/events/rpcrdma.h 		__entry->rep = req->rl_reply;
req              1197 include/trace/events/rpcrdma.h 		__entry->req, __entry->rep
req               466 include/trace/events/sunrpc.h 		const struct rpc_rqst *req
req               469 include/trace/events/sunrpc.h 	TP_ARGS(req),
req               482 include/trace/events/sunrpc.h 		__entry->task_id = req->rq_task->tk_pid;
req               483 include/trace/events/sunrpc.h 		__entry->client_id = req->rq_task->tk_client->cl_clid;
req               485 include/trace/events/sunrpc.h 		__entry->head_base = req->rq_rcv_buf.head[0].iov_base;
req               486 include/trace/events/sunrpc.h 		__entry->head_len = req->rq_rcv_buf.head[0].iov_len;
req               487 include/trace/events/sunrpc.h 		__entry->page_len = req->rq_rcv_buf.page_len;
req               488 include/trace/events/sunrpc.h 		__entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
req               489 include/trace/events/sunrpc.h 		__entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
req               186 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct request_sock *req),
req               188 include/trace/events/tcp.h 	TP_ARGS(sk, req),
req               192 include/trace/events/tcp.h 		__field(const void *, req)
req               202 include/trace/events/tcp.h 		struct inet_request_sock *ireq = inet_rsk(req);
req               206 include/trace/events/tcp.h 		__entry->req = req;
req               244 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_surface_create_req req;
req               272 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_surface_arg req;
req               429 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_alloc_bo_req req;
req               963 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_gb_surface_create_req req;
req              1005 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_surface_arg req;
req              1088 include/uapi/drm/vmwgfx_drm.h 	enum drm_vmw_extended_context req;
req              1168 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_gb_surface_create_ext_req req;
req              1211 include/uapi/drm/vmwgfx_drm.h 	struct drm_vmw_surface_arg req;
req               288 include/uapi/linux/if_packet.h 	struct tpacket_req	req;
req               259 include/uapi/linux/ipmi.h 	struct ipmi_req req;
req               119 include/uapi/linux/target_core_user.h 		} req;
req                32 include/uapi/linux/usb/g_uvc.h 		struct usb_ctrlrequest req;
req                53 include/uapi/linux/usb/tmc.h 	struct usbtmc_request req;
req                19 include/video/sisfb.h extern void sis_malloc(struct sis_memreq *req);
req                20 include/video/sisfb.h extern void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req);
req                85 include/xen/arm/hypercall.h MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
req                78 include/xen/interface/io/ring.h     __req_t req;							\
req               177 include/xen/interface/io/ring.h 	unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;	\
req               180 include/xen/interface/io/ring.h 	req < rsp ? req : rsp;						\
req               185 include/xen/interface/io/ring.h     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
req                86 include/xen/interface/io/xs_wire.h     char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
req               144 kernel/power/qos.c 	struct pm_qos_request *req;
req               181 kernel/power/qos.c 	plist_for_each_entry(req, &c->list, node) {
req               184 kernel/power/qos.c 		if ((req->node).prio != c->default_value) {
req               190 kernel/power/qos.c 			   (req->node).prio, state);
req               273 kernel/power/qos.c 				    struct pm_qos_flags_request *req)
req               277 kernel/power/qos.c 	list_del(&req->node);
req               278 kernel/power/qos.c 	list_for_each_entry(req, &pqf->list, node)
req               279 kernel/power/qos.c 		val |= req->flags;
req               296 kernel/power/qos.c 			 struct pm_qos_flags_request *req,
req               308 kernel/power/qos.c 		pm_qos_flags_remove_req(pqf, req);
req               311 kernel/power/qos.c 		pm_qos_flags_remove_req(pqf, req);
req               314 kernel/power/qos.c 		req->flags = val;
req               315 kernel/power/qos.c 		INIT_LIST_HEAD(&req->node);
req               316 kernel/power/qos.c 		list_add_tail(&req->node, &pqf->list);
req               344 kernel/power/qos.c int pm_qos_request_active(struct pm_qos_request *req)
req               346 kernel/power/qos.c 	return req->pm_qos_class != 0;
req               350 kernel/power/qos.c static void __pm_qos_update_request(struct pm_qos_request *req,
req               353 kernel/power/qos.c 	trace_pm_qos_update_request(req->pm_qos_class, new_value);
req               355 kernel/power/qos.c 	if (new_value != req->node.prio)
req               357 kernel/power/qos.c 			pm_qos_array[req->pm_qos_class]->constraints,
req               358 kernel/power/qos.c 			&req->node, PM_QOS_UPDATE_REQ, new_value);
req               369 kernel/power/qos.c 	struct pm_qos_request *req = container_of(to_delayed_work(work),
req               373 kernel/power/qos.c 	__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
req               389 kernel/power/qos.c void pm_qos_add_request(struct pm_qos_request *req,
req               392 kernel/power/qos.c 	if (!req) /*guard against callers passing in null */
req               395 kernel/power/qos.c 	if (pm_qos_request_active(req)) {
req               399 kernel/power/qos.c 	req->pm_qos_class = pm_qos_class;
req               400 kernel/power/qos.c 	INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
req               403 kernel/power/qos.c 			     &req->node, PM_QOS_ADD_REQ, value);
req               417 kernel/power/qos.c void pm_qos_update_request(struct pm_qos_request *req,
req               420 kernel/power/qos.c 	if (!req) /*guard against callers passing in null */
req               423 kernel/power/qos.c 	if (!pm_qos_request_active(req)) {
req               428 kernel/power/qos.c 	cancel_delayed_work_sync(&req->work);
req               429 kernel/power/qos.c 	__pm_qos_update_request(req, new_value);
req               441 kernel/power/qos.c void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
req               444 kernel/power/qos.c 	if (!req)
req               446 kernel/power/qos.c 	if (WARN(!pm_qos_request_active(req),
req               450 kernel/power/qos.c 	cancel_delayed_work_sync(&req->work);
req               452 kernel/power/qos.c 	trace_pm_qos_update_request_timeout(req->pm_qos_class,
req               454 kernel/power/qos.c 	if (new_value != req->node.prio)
req               456 kernel/power/qos.c 			pm_qos_array[req->pm_qos_class]->constraints,
req               457 kernel/power/qos.c 			&req->node, PM_QOS_UPDATE_REQ, new_value);
req               459 kernel/power/qos.c 	schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
req               470 kernel/power/qos.c void pm_qos_remove_request(struct pm_qos_request *req)
req               472 kernel/power/qos.c 	if (!req) /*guard against callers passing in null */
req               476 kernel/power/qos.c 	if (!pm_qos_request_active(req)) {
req               481 kernel/power/qos.c 	cancel_delayed_work_sync(&req->work);
req               483 kernel/power/qos.c 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
req               484 kernel/power/qos.c 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
req               485 kernel/power/qos.c 			     &req->node, PM_QOS_REMOVE_REQ,
req               487 kernel/power/qos.c 	memset(req, 0, sizeof(*req));
req               563 kernel/power/qos.c 		struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
req               564 kernel/power/qos.c 		if (!req)
req               567 kernel/power/qos.c 		pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
req               568 kernel/power/qos.c 		filp->private_data = req;
req               577 kernel/power/qos.c 	struct pm_qos_request *req;
req               579 kernel/power/qos.c 	req = filp->private_data;
req               580 kernel/power/qos.c 	pm_qos_remove_request(req);
req               581 kernel/power/qos.c 	kfree(req);
req               592 kernel/power/qos.c 	struct pm_qos_request *req = filp->private_data;
req               594 kernel/power/qos.c 	if (!req)
req               596 kernel/power/qos.c 	if (!pm_qos_request_active(req))
req               600 kernel/power/qos.c 	value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
req               610 kernel/power/qos.c 	struct pm_qos_request *req;
req               623 kernel/power/qos.c 	req = filp->private_data;
req               624 kernel/power/qos.c 	pm_qos_update_request(req, value);
req               718 kernel/power/qos.c static int freq_qos_apply(struct freq_qos_request *req,
req               723 kernel/power/qos.c 	switch(req->type) {
req               725 kernel/power/qos.c 		ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
req               729 kernel/power/qos.c 		ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
req               754 kernel/power/qos.c 			 struct freq_qos_request *req,
req               759 kernel/power/qos.c 	if (IS_ERR_OR_NULL(qos) || !req)
req               762 kernel/power/qos.c 	if (WARN(freq_qos_request_active(req),
req               766 kernel/power/qos.c 	req->qos = qos;
req               767 kernel/power/qos.c 	req->type = type;
req               768 kernel/power/qos.c 	ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
req               770 kernel/power/qos.c 		req->qos = NULL;
req               771 kernel/power/qos.c 		req->type = 0;
req               789 kernel/power/qos.c int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
req               791 kernel/power/qos.c 	if (!req)
req               794 kernel/power/qos.c 	if (WARN(!freq_qos_request_active(req),
req               798 kernel/power/qos.c 	if (req->pnode.prio == new_value)
req               801 kernel/power/qos.c 	return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
req               815 kernel/power/qos.c int freq_qos_remove_request(struct freq_qos_request *req)
req               819 kernel/power/qos.c 	if (!req)
req               822 kernel/power/qos.c 	if (WARN(!freq_qos_request_active(req),
req               826 kernel/power/qos.c 	ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
req               827 kernel/power/qos.c 	req->qos = NULL;
req               828 kernel/power/qos.c 	req->type = 0;
req               879 kernel/ptrace.c static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
req               893 kernel/ptrace.c 	if (req == PTRACE_GETREGSET)
req              7247 kernel/sched/core.c 	struct uclamp_request req = {
req              7255 kernel/sched/core.c 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
req              7256 kernel/sched/core.c 					     &req.percent);
req              7257 kernel/sched/core.c 		if (req.ret)
req              7258 kernel/sched/core.c 			return req;
req              7259 kernel/sched/core.c 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
req              7260 kernel/sched/core.c 			req.ret = -ERANGE;
req              7261 kernel/sched/core.c 			return req;
req              7264 kernel/sched/core.c 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
req              7265 kernel/sched/core.c 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
req              7268 kernel/sched/core.c 	return req;
req              7275 kernel/sched/core.c 	struct uclamp_request req;
req              7278 kernel/sched/core.c 	req = capacity_from_percent(buf);
req              7279 kernel/sched/core.c 	if (req.ret)
req              7280 kernel/sched/core.c 		return req.ret;
req              7286 kernel/sched/core.c 	if (tg->uclamp_req[clamp_id].value != req.util)
req              7287 kernel/sched/core.c 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
req              7293 kernel/sched/core.c 	tg->uclamp_pct[clamp_id] = req.percent;
req               129 lib/test_firmware.c 	struct test_batched_req *req;
req               136 lib/test_firmware.c 		req = &test_fw_config->reqs[i];
req               137 lib/test_firmware.c 		if (req->fw)
req               138 lib/test_firmware.c 			release_firmware(req->fw);
req               607 lib/test_firmware.c 	struct test_batched_req *req = data;
req               609 lib/test_firmware.c 	if (!req) {
req               621 lib/test_firmware.c 		req->rc = request_firmware_into_buf(&req->fw,
req               622 lib/test_firmware.c 						    req->name,
req               623 lib/test_firmware.c 						    req->dev,
req               626 lib/test_firmware.c 		if (!req->fw)
req               629 lib/test_firmware.c 		req->rc = test_fw_config->req_firmware(&req->fw,
req               630 lib/test_firmware.c 						       req->name,
req               631 lib/test_firmware.c 						       req->dev);
req               634 lib/test_firmware.c 	if (req->rc) {
req               636 lib/test_firmware.c 			req->idx, req->rc);
req               638 lib/test_firmware.c 			test_fw_config->test_result = req->rc;
req               639 lib/test_firmware.c 	} else if (req->fw) {
req               640 lib/test_firmware.c 		req->sent = true;
req               642 lib/test_firmware.c 			req->idx, req->fw->size);
req               644 lib/test_firmware.c 	complete(&req->completion);
req               646 lib/test_firmware.c 	req->task = NULL;
req               661 lib/test_firmware.c 	struct test_batched_req *req;
req               679 lib/test_firmware.c 		req = &test_fw_config->reqs[i];
req               680 lib/test_firmware.c 		req->fw = NULL;
req               681 lib/test_firmware.c 		req->idx = i;
req               682 lib/test_firmware.c 		req->name = test_fw_config->name;
req               683 lib/test_firmware.c 		req->dev = dev;
req               684 lib/test_firmware.c 		init_completion(&req->completion);
req               685 lib/test_firmware.c 		req->task = kthread_run(test_fw_run_batch_request, req,
req               686 lib/test_firmware.c 					     "%s-%u", KBUILD_MODNAME, req->idx);
req               687 lib/test_firmware.c 		if (!req->task || IS_ERR(req->task)) {
req               688 lib/test_firmware.c 			pr_err("Setting up thread %u failed\n", req->idx);
req               689 lib/test_firmware.c 			req->task = NULL;
req               707 lib/test_firmware.c 		req = &test_fw_config->reqs[i];
req               708 lib/test_firmware.c 		if (req->task || req->sent)
req               709 lib/test_firmware.c 			wait_for_completion(&req->completion);
req               728 lib/test_firmware.c 	struct test_batched_req *req = context;
req               730 lib/test_firmware.c 	if (!req) {
req               736 lib/test_firmware.c 	if (!req->idx)
req               739 lib/test_firmware.c 	req->fw = fw;
req               750 lib/test_firmware.c 	complete(&req->completion);
req               758 lib/test_firmware.c 	struct test_batched_req *req;
req               780 lib/test_firmware.c 		req = &test_fw_config->reqs[i];
req               781 lib/test_firmware.c 		req->name = test_fw_config->name;
req               782 lib/test_firmware.c 		req->fw = NULL;
req               783 lib/test_firmware.c 		req->idx = i;
req               784 lib/test_firmware.c 		init_completion(&req->completion);
req               786 lib/test_firmware.c 					     req->name,
req               787 lib/test_firmware.c 					     dev, GFP_KERNEL, req,
req               792 lib/test_firmware.c 			req->rc = rc;
req               795 lib/test_firmware.c 			req->sent = true;
req               811 lib/test_firmware.c 		req = &test_fw_config->reqs[i];
req               812 lib/test_firmware.c 		if (req->sent)
req               813 lib/test_firmware.c 			wait_for_completion(&req->completion);
req               848 lib/test_firmware.c 	struct test_batched_req *req;
req               865 lib/test_firmware.c 	req = &test_fw_config->reqs[idx];
req               866 lib/test_firmware.c 	if (!req->fw) {
req               872 lib/test_firmware.c 	pr_info("#%u: loaded %zu\n", idx, req->fw->size);
req               874 lib/test_firmware.c 	if (req->fw->size > PAGE_SIZE) {
req               879 lib/test_firmware.c 	memcpy(buf, req->fw->data, req->fw->size);
req               881 lib/test_firmware.c 	rc = req->fw->size;
req               270 net/9p/client.c 	struct p9_req_t *req = kmem_cache_alloc(p9_req_cache, GFP_NOFS);
req               274 net/9p/client.c 	if (!req)
req               277 net/9p/client.c 	if (p9_fcall_init(c, &req->tc, alloc_msize))
req               279 net/9p/client.c 	if (p9_fcall_init(c, &req->rc, alloc_msize))
req               282 net/9p/client.c 	p9pdu_reset(&req->tc);
req               283 net/9p/client.c 	p9pdu_reset(&req->rc);
req               284 net/9p/client.c 	req->t_err = 0;
req               285 net/9p/client.c 	req->status = REQ_STATUS_ALLOC;
req               286 net/9p/client.c 	init_waitqueue_head(&req->wq);
req               287 net/9p/client.c 	INIT_LIST_HEAD(&req->req_list);
req               292 net/9p/client.c 		tag = idr_alloc(&c->reqs, req, P9_NOTAG, P9_NOTAG + 1,
req               295 net/9p/client.c 		tag = idr_alloc(&c->reqs, req, 0, P9_NOTAG, GFP_NOWAIT);
req               296 net/9p/client.c 	req->tc.tag = tag;
req               312 net/9p/client.c 	refcount_set(&req->refcount.refcount, 2);
req               314 net/9p/client.c 	return req;
req               317 net/9p/client.c 	p9_fcall_fini(&req->tc);
req               318 net/9p/client.c 	p9_fcall_fini(&req->rc);
req               320 net/9p/client.c 	kmem_cache_free(p9_req_cache, req);
req               334 net/9p/client.c 	struct p9_req_t *req;
req               338 net/9p/client.c 	req = idr_find(&c->reqs, tag);
req               339 net/9p/client.c 	if (req) {
req               345 net/9p/client.c 		if (!p9_req_try_get(req))
req               347 net/9p/client.c 		if (req->tc.tag != tag) {
req               348 net/9p/client.c 			p9_req_put(req);
req               354 net/9p/client.c 	return req;
req               400 net/9p/client.c 	struct p9_req_t *req;
req               404 net/9p/client.c 	idr_for_each_entry(&c->reqs, req, id) {
req               406 net/9p/client.c 		if (p9_tag_remove(c, req) == 0)
req               408 net/9p/client.c 				req->tc.tag);
req               419 net/9p/client.c void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
req               421 net/9p/client.c 	p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc.tag);
req               428 net/9p/client.c 	req->status = status;
req               430 net/9p/client.c 	wake_up(&req->wq);
req               431 net/9p/client.c 	p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
req               432 net/9p/client.c 	p9_req_put(req);
req               497 net/9p/client.c static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
req               503 net/9p/client.c 	err = p9_parse_header(&req->rc, NULL, &type, NULL, 0);
req               504 net/9p/client.c 	if (req->rc.size >= c->msize) {
req               507 net/9p/client.c 			 req->rc.size);
req               514 net/9p/client.c 	trace_9p_protocol_dump(c, &req->rc);
req               524 net/9p/client.c 		err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
req               540 net/9p/client.c 		err = p9pdu_readf(&req->rc, c->proto_version, "d", &ecode);
req               566 net/9p/client.c static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
req               574 net/9p/client.c 	err = p9_parse_header(&req->rc, NULL, &type, NULL, 0);
req               579 net/9p/client.c 	trace_9p_protocol_dump(c, &req->rc);
req               594 net/9p/client.c 		len = req->rc.size - req->rc.offset;
req               600 net/9p/client.c 		ename = &req->rc.sdata[req->rc.offset];
req               610 net/9p/client.c 		err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
req               626 net/9p/client.c 		err = p9pdu_readf(&req->rc, c->proto_version, "d", &ecode);
req               655 net/9p/client.c 	struct p9_req_t *req;
req               665 net/9p/client.c 	req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag);
req               666 net/9p/client.c 	if (IS_ERR(req))
req               667 net/9p/client.c 		return PTR_ERR(req);
req               678 net/9p/client.c 	p9_tag_remove(c, req);
req               687 net/9p/client.c 	struct p9_req_t *req;
req               699 net/9p/client.c 	req = p9_tag_alloc(c, type, req_size);
req               700 net/9p/client.c 	if (IS_ERR(req))
req               701 net/9p/client.c 		return req;
req               704 net/9p/client.c 	p9pdu_prepare(&req->tc, req->tc.tag, type);
req               705 net/9p/client.c 	err = p9pdu_vwritef(&req->tc, c->proto_version, fmt, ap);
req               708 net/9p/client.c 	p9pdu_finalize(c, &req->tc);
req               709 net/9p/client.c 	trace_9p_client_req(c, type, req->tc.tag);
req               710 net/9p/client.c 	return req;
req               712 net/9p/client.c 	p9_tag_remove(c, req);
req               714 net/9p/client.c 	p9_req_put(req);
req               733 net/9p/client.c 	struct p9_req_t *req;
req               736 net/9p/client.c 	req = p9_client_prepare_req(c, type, c->msize, fmt, ap);
req               738 net/9p/client.c 	if (IS_ERR(req))
req               739 net/9p/client.c 		return req;
req               747 net/9p/client.c 	err = c->trans_mod->request(c, req);
req               750 net/9p/client.c 		p9_req_put(req);
req               757 net/9p/client.c 	err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
req               772 net/9p/client.c 	if (req->status == REQ_STATUS_ERROR) {
req               773 net/9p/client.c 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
req               774 net/9p/client.c 		err = req->t_err;
req               781 net/9p/client.c 		if (c->trans_mod->cancel(c, req))
req               782 net/9p/client.c 			p9_client_flush(c, req);
req               785 net/9p/client.c 		if (req->status == REQ_STATUS_RCVD)
req               797 net/9p/client.c 	err = p9_check_errors(c, req);
req               798 net/9p/client.c 	trace_9p_client_res(c, type, req->rc.tag, err);
req               800 net/9p/client.c 		return req;
req               802 net/9p/client.c 	p9_tag_remove(c, req);
req               828 net/9p/client.c 	struct p9_req_t *req;
req               835 net/9p/client.c 	req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap);
req               837 net/9p/client.c 	if (IS_ERR(req))
req               838 net/9p/client.c 		return req;
req               846 net/9p/client.c 	err = c->trans_mod->zc_request(c, req, uidata, uodata,
req               854 net/9p/client.c 	if (req->status == REQ_STATUS_ERROR) {
req               855 net/9p/client.c 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
req               856 net/9p/client.c 		err = req->t_err;
req               863 net/9p/client.c 		if (c->trans_mod->cancel(c, req))
req               864 net/9p/client.c 			p9_client_flush(c, req);
req               867 net/9p/client.c 		if (req->status == REQ_STATUS_RCVD)
req               879 net/9p/client.c 	err = p9_check_zc_errors(c, req, uidata, in_hdrlen);
req               880 net/9p/client.c 	trace_9p_client_res(c, type, req->rc.tag, err);
req               882 net/9p/client.c 		return req;
req               884 net/9p/client.c 	p9_tag_remove(c, req);
req               936 net/9p/client.c 	struct p9_req_t *req;
req               945 net/9p/client.c 		req = p9_client_rpc(c, P9_TVERSION, "ds",
req               949 net/9p/client.c 		req = p9_client_rpc(c, P9_TVERSION, "ds",
req               953 net/9p/client.c 		req = p9_client_rpc(c, P9_TVERSION, "ds",
req               960 net/9p/client.c 	if (IS_ERR(req))
req               961 net/9p/client.c 		return PTR_ERR(req);
req               963 net/9p/client.c 	err = p9pdu_readf(&req->rc, c->proto_version, "ds", &msize, &version);
req               966 net/9p/client.c 		trace_9p_protocol_dump(c, &req->rc);
req               995 net/9p/client.c 	p9_tag_remove(c, req);
req              1120 net/9p/client.c 	struct p9_req_t *req;
req              1134 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid,
req              1136 net/9p/client.c 	if (IS_ERR(req)) {
req              1137 net/9p/client.c 		err = PTR_ERR(req);
req              1141 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", &qid);
req              1143 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1144 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1153 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1170 net/9p/client.c 	struct p9_req_t *req;
req              1191 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
req              1193 net/9p/client.c 	if (IS_ERR(req)) {
req              1194 net/9p/client.c 		err = PTR_ERR(req);
req              1198 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "R", &nwqids, &wqids);
req              1200 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1201 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1204 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1244 net/9p/client.c 	struct p9_req_t *req;
req              1257 net/9p/client.c 		req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode);
req              1259 net/9p/client.c 		req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode);
req              1260 net/9p/client.c 	if (IS_ERR(req)) {
req              1261 net/9p/client.c 		err = PTR_ERR(req);
req              1265 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit);
req              1267 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1279 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1290 net/9p/client.c 	struct p9_req_t *req;
req              1302 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags,
req              1304 net/9p/client.c 	if (IS_ERR(req)) {
req              1305 net/9p/client.c 		err = PTR_ERR(req);
req              1309 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", qid, &iounit);
req              1311 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1324 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1335 net/9p/client.c 	struct p9_req_t *req;
req              1347 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm,
req              1349 net/9p/client.c 	if (IS_ERR(req)) {
req              1350 net/9p/client.c 		err = PTR_ERR(req);
req              1354 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit);
req              1356 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1369 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1380 net/9p/client.c 	struct p9_req_t *req;
req              1386 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt,
req              1388 net/9p/client.c 	if (IS_ERR(req)) {
req              1389 net/9p/client.c 		err = PTR_ERR(req);
req              1393 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
req              1395 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1403 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1412 net/9p/client.c 	struct p9_req_t *req;
req              1417 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid,
req              1419 net/9p/client.c 	if (IS_ERR(req))
req              1420 net/9p/client.c 		return PTR_ERR(req);
req              1423 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1432 net/9p/client.c 	struct p9_req_t *req;
req              1439 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync);
req              1440 net/9p/client.c 	if (IS_ERR(req)) {
req              1441 net/9p/client.c 		err = PTR_ERR(req);
req              1447 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1458 net/9p/client.c 	struct p9_req_t *req;
req              1474 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid);
req              1475 net/9p/client.c 	if (IS_ERR(req)) {
req              1476 net/9p/client.c 		err = PTR_ERR(req);
req              1482 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1502 net/9p/client.c 	struct p9_req_t *req;
req              1508 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid);
req              1509 net/9p/client.c 	if (IS_ERR(req)) {
req              1510 net/9p/client.c 		err = PTR_ERR(req);
req              1516 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1529 net/9p/client.c 	struct p9_req_t *req;
req              1536 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags);
req              1537 net/9p/client.c 	if (IS_ERR(req)) {
req              1538 net/9p/client.c 		err = PTR_ERR(req);
req              1543 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1553 net/9p/client.c 	struct p9_req_t *req;
req              1578 net/9p/client.c 			req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
req              1583 net/9p/client.c 			req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
req              1586 net/9p/client.c 		if (IS_ERR(req)) {
req              1587 net/9p/client.c 			*err = PTR_ERR(req);
req              1591 net/9p/client.c 		*err = p9pdu_readf(&req->rc, clnt->proto_version,
req              1594 net/9p/client.c 			trace_9p_protocol_dump(clnt, &req->rc);
req              1595 net/9p/client.c 			p9_tag_remove(clnt, req);
req              1605 net/9p/client.c 			p9_tag_remove(clnt, req);
req              1615 net/9p/client.c 				p9_tag_remove(clnt, req);
req              1623 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1633 net/9p/client.c 	struct p9_req_t *req;
req              1652 net/9p/client.c 			req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
req              1656 net/9p/client.c 			req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
req              1659 net/9p/client.c 		if (IS_ERR(req)) {
req              1660 net/9p/client.c 			*err = PTR_ERR(req);
req              1664 net/9p/client.c 		*err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &count);
req              1666 net/9p/client.c 			trace_9p_protocol_dump(clnt, &req->rc);
req              1667 net/9p/client.c 			p9_tag_remove(clnt, req);
req              1677 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1691 net/9p/client.c 	struct p9_req_t *req;
req              1702 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid);
req              1703 net/9p/client.c 	if (IS_ERR(req)) {
req              1704 net/9p/client.c 		err = PTR_ERR(req);
req              1708 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "wS", &ignored, ret);
req              1710 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1711 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1728 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1744 net/9p/client.c 	struct p9_req_t *req;
req              1755 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask);
req              1756 net/9p/client.c 	if (IS_ERR(req)) {
req              1757 net/9p/client.c 		err = PTR_ERR(req);
req              1761 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "A", ret);
req              1763 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1764 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1789 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1830 net/9p/client.c 	struct p9_req_t *req;
req              1850 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
req              1851 net/9p/client.c 	if (IS_ERR(req)) {
req              1852 net/9p/client.c 		err = PTR_ERR(req);
req              1858 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1867 net/9p/client.c 	struct p9_req_t *req;
req              1883 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr);
req              1885 net/9p/client.c 	if (IS_ERR(req)) {
req              1886 net/9p/client.c 		err = PTR_ERR(req);
req              1890 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1899 net/9p/client.c 	struct p9_req_t *req;
req              1907 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
req              1908 net/9p/client.c 	if (IS_ERR(req)) {
req              1909 net/9p/client.c 		err = PTR_ERR(req);
req              1913 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type,
req              1917 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              1918 net/9p/client.c 		p9_tag_remove(clnt, req);
req              1929 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1939 net/9p/client.c 	struct p9_req_t *req;
req              1948 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
req              1950 net/9p/client.c 	if (IS_ERR(req)) {
req              1951 net/9p/client.c 		err = PTR_ERR(req);
req              1957 net/9p/client.c 	p9_tag_remove(clnt, req);
req              1967 net/9p/client.c 	struct p9_req_t *req;
req              1977 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid,
req              1979 net/9p/client.c 	if (IS_ERR(req)) {
req              1980 net/9p/client.c 		err = PTR_ERR(req);
req              1987 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2000 net/9p/client.c 	struct p9_req_t *req;
req              2015 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
req              2017 net/9p/client.c 	if (IS_ERR(req)) {
req              2018 net/9p/client.c 		err = PTR_ERR(req);
req              2021 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "q", attr_size);
req              2023 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2024 net/9p/client.c 		p9_tag_remove(clnt, req);
req              2027 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2046 net/9p/client.c 	struct p9_req_t *req;
req              2054 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd",
req              2056 net/9p/client.c 	if (IS_ERR(req)) {
req              2057 net/9p/client.c 		err = PTR_ERR(req);
req              2061 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2071 net/9p/client.c 	struct p9_req_t *req;
req              2097 net/9p/client.c 		req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0,
req              2101 net/9p/client.c 		req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
req              2104 net/9p/client.c 	if (IS_ERR(req)) {
req              2105 net/9p/client.c 		err = PTR_ERR(req);
req              2109 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "D", &count, &dataptr);
req              2111 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2124 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2128 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2139 net/9p/client.c 	struct p9_req_t *req;
req              2145 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode,
req              2147 net/9p/client.c 	if (IS_ERR(req))
req              2148 net/9p/client.c 		return PTR_ERR(req);
req              2150 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
req              2152 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2159 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2170 net/9p/client.c 	struct p9_req_t *req;
req              2176 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg", fid->fid, name, mode,
req              2178 net/9p/client.c 	if (IS_ERR(req))
req              2179 net/9p/client.c 		return PTR_ERR(req);
req              2181 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
req              2183 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2190 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2200 net/9p/client.c 	struct p9_req_t *req;
req              2209 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type,
req              2213 net/9p/client.c 	if (IS_ERR(req))
req              2214 net/9p/client.c 		return PTR_ERR(req);
req              2216 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "b", status);
req              2218 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2223 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2233 net/9p/client.c 	struct p9_req_t *req;
req              2241 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid,  glock->type,
req              2244 net/9p/client.c 	if (IS_ERR(req))
req              2245 net/9p/client.c 		return PTR_ERR(req);
req              2247 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "bqqds", &glock->type,
req              2251 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2258 net/9p/client.c 	p9_tag_remove(clnt, req);
req              2267 net/9p/client.c 	struct p9_req_t *req;
req              2273 net/9p/client.c 	req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid);
req              2274 net/9p/client.c 	if (IS_ERR(req))
req              2275 net/9p/client.c 		return PTR_ERR(req);
req              2277 net/9p/client.c 	err = p9pdu_readf(&req->rc, clnt->proto_version, "s", target);
req              2279 net/9p/client.c 		trace_9p_protocol_dump(clnt, &req->rc);
req              2284 net/9p/client.c 	p9_tag_remove(clnt, req);
req               187 net/9p/trans_fd.c 	struct p9_req_t *req, *rtmp;
req               201 net/9p/trans_fd.c 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
req               202 net/9p/trans_fd.c 		list_move(&req->req_list, &cancel_list);
req               204 net/9p/trans_fd.c 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
req               205 net/9p/trans_fd.c 		list_move(&req->req_list, &cancel_list);
req               208 net/9p/trans_fd.c 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
req               209 net/9p/trans_fd.c 		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
req               210 net/9p/trans_fd.c 		list_del(&req->req_list);
req               211 net/9p/trans_fd.c 		if (!req->t_err)
req               212 net/9p/trans_fd.c 			req->t_err = err;
req               213 net/9p/trans_fd.c 		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
req               441 net/9p/trans_fd.c 	struct p9_req_t *req;
req               458 net/9p/trans_fd.c 		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req               460 net/9p/trans_fd.c 		req->status = REQ_STATUS_SENT;
req               461 net/9p/trans_fd.c 		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
req               462 net/9p/trans_fd.c 		list_move_tail(&req->req_list, &m->req_list);
req               464 net/9p/trans_fd.c 		m->wbuf = req->tc.sdata;
req               465 net/9p/trans_fd.c 		m->wsize = req->tc.size;
req               467 net/9p/trans_fd.c 		p9_req_get(req);
req               468 net/9p/trans_fd.c 		m->wreq = req;
req               656 net/9p/trans_fd.c static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
req               663 net/9p/trans_fd.c 		 m, current, &req->tc, req->tc.id);
req               668 net/9p/trans_fd.c 	req->status = REQ_STATUS_UNSENT;
req               669 net/9p/trans_fd.c 	list_add_tail(&req->req_list, &m->unsent_req_list);
req               683 net/9p/trans_fd.c static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
req               687 net/9p/trans_fd.c 	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
req               691 net/9p/trans_fd.c 	if (req->status == REQ_STATUS_UNSENT) {
req               692 net/9p/trans_fd.c 		list_del(&req->req_list);
req               693 net/9p/trans_fd.c 		req->status = REQ_STATUS_FLSHD;
req               694 net/9p/trans_fd.c 		p9_req_put(req);
req               702 net/9p/trans_fd.c static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
req               704 net/9p/trans_fd.c 	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
req               710 net/9p/trans_fd.c 	list_del(&req->req_list);
req               712 net/9p/trans_fd.c 	p9_req_put(req);
req               109 net/9p/trans_rdma.c 		struct p9_req_t *req;
req               296 net/9p/trans_rdma.c 	struct p9_req_t *req;
req               300 net/9p/trans_rdma.c 	req = NULL;
req               312 net/9p/trans_rdma.c 	req = p9_tag_lookup(client, tag);
req               313 net/9p/trans_rdma.c 	if (!req)
req               318 net/9p/trans_rdma.c 	if (unlikely(req->rc.sdata)) {
req               323 net/9p/trans_rdma.c 	req->rc.size = c->rc.size;
req               324 net/9p/trans_rdma.c 	req->rc.sdata = c->rc.sdata;
req               325 net/9p/trans_rdma.c 	p9_client_cb(client, req, REQ_STATUS_RCVD);
req               334 net/9p/trans_rdma.c 			req, err, wc->status);
req               349 net/9p/trans_rdma.c 			    c->busa, c->req->tc.size,
req               352 net/9p/trans_rdma.c 	p9_req_put(c->req);
req               412 net/9p/trans_rdma.c static int rdma_request(struct p9_client *client, struct p9_req_t *req)
req               433 net/9p/trans_rdma.c 			p9_fcall_fini(&req->rc);
req               434 net/9p/trans_rdma.c 			req->rc.sdata = NULL;
req               448 net/9p/trans_rdma.c 	rpl_context->rc.sdata = req->rc.sdata;
req               468 net/9p/trans_rdma.c 	req->rc.sdata = NULL;
req               477 net/9p/trans_rdma.c 	c->req = req;
req               480 net/9p/trans_rdma.c 				    c->req->tc.sdata, c->req->tc.size,
req               490 net/9p/trans_rdma.c 	sge.length = c->req->tc.size;
req               509 net/9p/trans_rdma.c 	req->status = REQ_STATUS_SENT;
req               519 net/9p/trans_rdma.c 	req->status = REQ_STATUS_ERROR;
req               584 net/9p/trans_rdma.c static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
req               595 net/9p/trans_rdma.c static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
req               129 net/9p/trans_virtio.c 	struct p9_req_t *req;
req               136 net/9p/trans_virtio.c 	while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
req               143 net/9p/trans_virtio.c 			req->rc.size = len;
req               144 net/9p/trans_virtio.c 			p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
req               190 net/9p/trans_virtio.c static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
req               196 net/9p/trans_virtio.c static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
req               198 net/9p/trans_virtio.c 	p9_req_put(req);
req               251 net/9p/trans_virtio.c p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req               261 net/9p/trans_virtio.c 	req->status = REQ_STATUS_SENT;
req               268 net/9p/trans_virtio.c 			   VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
req               273 net/9p/trans_virtio.c 			  VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
req               277 net/9p/trans_virtio.c 	err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
req               387 net/9p/trans_virtio.c p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
req               414 net/9p/trans_virtio.c 			memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
req               421 net/9p/trans_virtio.c 		sz = cpu_to_le32(req->tc.size + outlen);
req               422 net/9p/trans_virtio.c 		memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
req               433 net/9p/trans_virtio.c 			memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
req               437 net/9p/trans_virtio.c 	req->status = REQ_STATUS_SENT;
req               445 net/9p/trans_virtio.c 			   VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
req               464 net/9p/trans_virtio.c 			  VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
req               475 net/9p/trans_virtio.c 	err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
req               500 net/9p/trans_virtio.c 	err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
req               521 net/9p/trans_virtio.c 		p9_req_put(req);
req                88 net/9p/trans_xen.c static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
req               199 net/9p/trans_xen.c 	struct p9_req_t *req;
req               224 net/9p/trans_xen.c 		req = p9_tag_lookup(priv->client, h.tag);
req               225 net/9p/trans_xen.c 		if (!req || req->status != REQ_STATUS_SENT) {
req               233 net/9p/trans_xen.c 		memcpy(&req->rc, &h, sizeof(h));
req               234 net/9p/trans_xen.c 		req->rc.offset = 0;
req               238 net/9p/trans_xen.c 		xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
req               246 net/9p/trans_xen.c 		status = (req->status != REQ_STATUS_ERROR) ?
req               249 net/9p/trans_xen.c 		p9_client_cb(priv->client, req, status);
req               133 net/bluetooth/a2mp.c 	struct a2mp_discov_req *req = (void *) skb->data;
req               140 net/bluetooth/a2mp.c 	if (len < sizeof(*req))
req               143 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
req               145 net/bluetooth/a2mp.c 	ext_feat = le16_to_cpu(req->ext_feat);
req               147 net/bluetooth/a2mp.c 	BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
req               226 net/bluetooth/a2mp.c 			struct a2mp_info_req req;
req               229 net/bluetooth/a2mp.c 			req.id = cl->id;
req               231 net/bluetooth/a2mp.c 				  sizeof(req), &req);
req               294 net/bluetooth/a2mp.c 	struct a2mp_info_req *req  = (void *) skb->data;
req               299 net/bluetooth/a2mp.c 	if (le16_to_cpu(hdr->len) < sizeof(*req))
req               302 net/bluetooth/a2mp.c 	BT_DBG("id %d", req->id);
req               304 net/bluetooth/a2mp.c 	hdev = hci_dev_get(req->id);
req               308 net/bluetooth/a2mp.c 		rsp.id = req->id;
req               328 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
req               336 net/bluetooth/a2mp.c 	struct a2mp_amp_assoc_req req;
req               351 net/bluetooth/a2mp.c 	req.id = rsp->id;
req               352 net/bluetooth/a2mp.c 	a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
req               353 net/bluetooth/a2mp.c 		  &req);
req               362 net/bluetooth/a2mp.c 	struct a2mp_amp_assoc_req *req = (void *) skb->data;
req               366 net/bluetooth/a2mp.c 	if (le16_to_cpu(hdr->len) < sizeof(*req))
req               369 net/bluetooth/a2mp.c 	BT_DBG("id %d", req->id);
req               374 net/bluetooth/a2mp.c 	hdev = hci_dev_get(req->id);
req               377 net/bluetooth/a2mp.c 		rsp.id = req->id;
req               398 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
req               466 net/bluetooth/a2mp.c 	struct a2mp_physlink_req *req = (void *) skb->data;
req               473 net/bluetooth/a2mp.c 	if (le16_to_cpu(hdr->len) < sizeof(*req))
req               476 net/bluetooth/a2mp.c 	BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
req               478 net/bluetooth/a2mp.c 	rsp.local_id = req->remote_id;
req               479 net/bluetooth/a2mp.c 	rsp.remote_id = req->local_id;
req               481 net/bluetooth/a2mp.c 	hdev = hci_dev_get(req->remote_id);
req               499 net/bluetooth/a2mp.c 		size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
req               502 net/bluetooth/a2mp.c 		assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
req               516 net/bluetooth/a2mp.c 	hcon = phylink_add(hdev, mgr, req->local_id, false);
req               546 net/bluetooth/a2mp.c 	struct a2mp_physlink_req *req = (void *) skb->data;
req               551 net/bluetooth/a2mp.c 	if (le16_to_cpu(hdr->len) < sizeof(*req))
req               554 net/bluetooth/a2mp.c 	BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
req               556 net/bluetooth/a2mp.c 	rsp.local_id = req->remote_id;
req               557 net/bluetooth/a2mp.c 	rsp.remote_id = req->local_id;
req               560 net/bluetooth/a2mp.c 	hdev = hci_dev_get(req->remote_id);
req               582 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
req               955 net/bluetooth/a2mp.c 	struct a2mp_physlink_req *req;
req               963 net/bluetooth/a2mp.c 	len = sizeof(*req) + loc_assoc->len;
req               967 net/bluetooth/a2mp.c 	req = kzalloc(len, GFP_KERNEL);
req               968 net/bluetooth/a2mp.c 	if (!req) {
req               977 net/bluetooth/a2mp.c 	req->local_id = hdev->id;
req               978 net/bluetooth/a2mp.c 	req->remote_id = bredr_chan->remote_amp_id;
req               979 net/bluetooth/a2mp.c 	memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
req               981 net/bluetooth/a2mp.c 	a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
req               985 net/bluetooth/a2mp.c 	kfree(req);
req              1018 net/bluetooth/a2mp.c 	struct a2mp_discov_req req;
req              1030 net/bluetooth/a2mp.c 	req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
req              1031 net/bluetooth/a2mp.c 	req.ext_feat = 0;
req              1032 net/bluetooth/a2mp.c 	a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
req               257 net/bluetooth/amp.c 	struct hci_request req;
req               266 net/bluetooth/amp.c 	hci_req_init(&req, hdev);
req               267 net/bluetooth/amp.c 	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
req               268 net/bluetooth/amp.c 	err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
req               276 net/bluetooth/amp.c 	struct hci_request req;
req               285 net/bluetooth/amp.c 	hci_req_init(&req, hdev);
req               286 net/bluetooth/amp.c 	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
req               287 net/bluetooth/amp.c 	err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
req               297 net/bluetooth/amp.c 	struct hci_request req;
req               307 net/bluetooth/amp.c 	hci_req_init(&req, hdev);
req               308 net/bluetooth/amp.c 	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
req               309 net/bluetooth/amp.c 	err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
req               335 net/bluetooth/amp.c 	struct hci_request req;
req               373 net/bluetooth/amp.c 	hci_req_init(&req, hdev);
req               374 net/bluetooth/amp.c 	hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
req               375 net/bluetooth/amp.c 	hci_req_run_skb(&req, write_remote_amp_assoc_complete);
req               442 net/bluetooth/amp.c 	struct hci_request req;
req               455 net/bluetooth/amp.c 	hci_req_init(&req, hdev);
req               456 net/bluetooth/amp.c 	hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
req               457 net/bluetooth/amp.c 	hci_req_run(&req, create_phylink_complete);
req               481 net/bluetooth/amp.c 	struct hci_request req;
req               494 net/bluetooth/amp.c 	hci_req_init(&req, hdev);
req               495 net/bluetooth/amp.c 	hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
req               496 net/bluetooth/amp.c 	hci_req_run(&req, accept_phylink_complete);
req               139 net/bluetooth/bnep/bnep.h int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock);
req               140 net/bluetooth/bnep/bnep.h int bnep_del_connection(struct bnep_conndel_req *req);
req               141 net/bluetooth/bnep/bnep.h int bnep_get_connlist(struct bnep_connlist_req *req);
req               556 net/bluetooth/bnep/core.c int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
req               569 net/bluetooth/bnep/core.c 	if (req->flags & ~valid_flags)
req               577 net/bluetooth/bnep/core.c 			   (*req->device) ? req->device : "bnep%d",
req               601 net/bluetooth/bnep/core.c 	s->role  = req->role;
req               603 net/bluetooth/bnep/core.c 	s->flags = req->flags;
req               641 net/bluetooth/bnep/core.c 	strcpy(req->device, dev->name);
req               650 net/bluetooth/bnep/core.c int bnep_del_connection(struct bnep_conndel_req *req)
req               658 net/bluetooth/bnep/core.c 	if (req->flags & ~valid_flags)
req               663 net/bluetooth/bnep/core.c 	s = __bnep_get_session(req->dst);
req               686 net/bluetooth/bnep/core.c int bnep_get_connlist(struct bnep_connlist_req *req)
req               698 net/bluetooth/bnep/core.c 		if (copy_to_user(req->ci, &ci, sizeof(ci))) {
req               703 net/bluetooth/bnep/core.c 		if (++n >= req->cnum)
req               706 net/bluetooth/bnep/core.c 		req->ci++;
req               708 net/bluetooth/bnep/core.c 	req->cnum = n;
req                61 net/bluetooth/cmtp/cmtp.h int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock);
req                62 net/bluetooth/cmtp/cmtp.h int cmtp_del_connection(struct cmtp_conndel_req *req);
req                63 net/bluetooth/cmtp/cmtp.h int cmtp_get_connlist(struct cmtp_connlist_req *req);
req               330 net/bluetooth/cmtp/core.c int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
req               341 net/bluetooth/cmtp/core.c 	if (req->flags & ~valid_flags)
req               379 net/bluetooth/cmtp/core.c 	session->flags = req->flags;
req               414 net/bluetooth/cmtp/core.c int cmtp_del_connection(struct cmtp_conndel_req *req)
req               422 net/bluetooth/cmtp/core.c 	if (req->flags & ~valid_flags)
req               427 net/bluetooth/cmtp/core.c 	session = __cmtp_get_session(&req->bdaddr);
req               447 net/bluetooth/cmtp/core.c int cmtp_get_connlist(struct cmtp_connlist_req *req)
req               461 net/bluetooth/cmtp/core.c 		if (copy_to_user(req->ci, &ci, sizeof(ci))) {
req               466 net/bluetooth/cmtp/core.c 		if (++n >= req->cnum)
req               469 net/bluetooth/cmtp/core.c 		req->ci++;
req               471 net/bluetooth/cmtp/core.c 	req->cnum = n;
req                33 net/bluetooth/ecdh_helper.c static void ecdh_complete(struct crypto_async_request *req, int err)
req                35 net/bluetooth/ecdh_helper.c 	struct ecdh_completion *res = req->data;
req                63 net/bluetooth/ecdh_helper.c 	struct kpp_request *req;
req                73 net/bluetooth/ecdh_helper.c 	req = kpp_request_alloc(tfm, GFP_KERNEL);
req                74 net/bluetooth/ecdh_helper.c 	if (!req) {
req                86 net/bluetooth/ecdh_helper.c 	kpp_request_set_input(req, &src, 64);
req                87 net/bluetooth/ecdh_helper.c 	kpp_request_set_output(req, &dst, 32);
req                88 net/bluetooth/ecdh_helper.c 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req                90 net/bluetooth/ecdh_helper.c 	err = crypto_kpp_compute_shared_secret(req);
req               105 net/bluetooth/ecdh_helper.c 	kpp_request_free(req);
req               170 net/bluetooth/ecdh_helper.c 	struct kpp_request *req;
req               180 net/bluetooth/ecdh_helper.c 	req = kpp_request_alloc(tfm, GFP_KERNEL);
req               181 net/bluetooth/ecdh_helper.c 	if (!req) {
req               188 net/bluetooth/ecdh_helper.c 	kpp_request_set_input(req, NULL, 0);
req               189 net/bluetooth/ecdh_helper.c 	kpp_request_set_output(req, &dst, 64);
req               190 net/bluetooth/ecdh_helper.c 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req               193 net/bluetooth/ecdh_helper.c 	err = crypto_kpp_generate_public_key(req);
req               208 net/bluetooth/ecdh_helper.c 	kpp_request_free(req);
req               774 net/bluetooth/hci_conn.c static void hci_req_add_le_create_conn(struct hci_request *req,
req               785 net/bluetooth/hci_conn.c 		if (bacmp(&req->hdev->random_addr, direct_rpa))
req               786 net/bluetooth/hci_conn.c 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
req               795 net/bluetooth/hci_conn.c 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
req               840 net/bluetooth/hci_conn.c 		hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
req               863 net/bluetooth/hci_conn.c 		hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
req               870 net/bluetooth/hci_conn.c static void hci_req_directed_advertising(struct hci_request *req,
req               873 net/bluetooth/hci_conn.c 	struct hci_dev *hdev = req->hdev;
req               901 net/bluetooth/hci_conn.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
req               913 net/bluetooth/hci_conn.c 			hci_req_add(req,
req               918 net/bluetooth/hci_conn.c 		__hci_req_enable_ext_advertising(req, 0x00);
req               932 net/bluetooth/hci_conn.c 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
req               951 net/bluetooth/hci_conn.c 		hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
req               954 net/bluetooth/hci_conn.c 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
req               968 net/bluetooth/hci_conn.c 	struct hci_request req;
req              1024 net/bluetooth/hci_conn.c 	hci_req_init(&req, hdev);
req              1034 net/bluetooth/hci_conn.c 		hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
req              1045 net/bluetooth/hci_conn.c 			hci_req_purge(&req);
req              1050 net/bluetooth/hci_conn.c 		hci_req_directed_advertising(&req, conn);
req              1074 net/bluetooth/hci_conn.c 		hci_req_add_le_scan_disable(&req);
req              1078 net/bluetooth/hci_conn.c 	hci_req_add_le_create_conn(&req, conn, direct_rpa);
req              1081 net/bluetooth/hci_conn.c 	err = hci_req_run(&req, create_le_conn_complete);
req              1544 net/bluetooth/hci_conn.c 	struct hci_conn_list_req req, *cl;
req              1549 net/bluetooth/hci_conn.c 	if (copy_from_user(&req, arg, sizeof(req)))
req              1552 net/bluetooth/hci_conn.c 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
req              1555 net/bluetooth/hci_conn.c 	size = sizeof(req) + req.conn_num * sizeof(*ci);
req              1561 net/bluetooth/hci_conn.c 	hdev = hci_dev_get(req.dev_id);
req              1577 net/bluetooth/hci_conn.c 		if (++n >= req.conn_num)
req              1584 net/bluetooth/hci_conn.c 	size = sizeof(req) + n * sizeof(*ci);
req              1596 net/bluetooth/hci_conn.c 	struct hci_conn_info_req req;
req              1599 net/bluetooth/hci_conn.c 	char __user *ptr = arg + sizeof(req);
req              1601 net/bluetooth/hci_conn.c 	if (copy_from_user(&req, arg, sizeof(req)))
req              1605 net/bluetooth/hci_conn.c 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
req              1624 net/bluetooth/hci_conn.c 	struct hci_auth_info_req req;
req              1627 net/bluetooth/hci_conn.c 	if (copy_from_user(&req, arg, sizeof(req)))
req              1631 net/bluetooth/hci_conn.c 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
req              1633 net/bluetooth/hci_conn.c 		req.type = conn->auth_type;
req              1639 net/bluetooth/hci_conn.c 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
req               185 net/bluetooth/hci_core.c static int hci_reset_req(struct hci_request *req, unsigned long opt)
req               187 net/bluetooth/hci_core.c 	BT_DBG("%s %ld", req->hdev->name, opt);
req               190 net/bluetooth/hci_core.c 	set_bit(HCI_RESET, &req->hdev->flags);
req               191 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
req               195 net/bluetooth/hci_core.c static void bredr_init(struct hci_request *req)
req               197 net/bluetooth/hci_core.c 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
req               200 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
req               203 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
req               206 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
req               209 net/bluetooth/hci_core.c static void amp_init1(struct hci_request *req)
req               211 net/bluetooth/hci_core.c 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
req               214 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
req               217 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
req               220 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
req               223 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
req               226 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
req               229 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
req               232 net/bluetooth/hci_core.c static int amp_init2(struct hci_request *req)
req               238 net/bluetooth/hci_core.c 	if (req->hdev->commands[14] & 0x20)
req               239 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
req               244 net/bluetooth/hci_core.c static int hci_init1_req(struct hci_request *req, unsigned long opt)
req               246 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               252 net/bluetooth/hci_core.c 		hci_reset_req(req, 0);
req               256 net/bluetooth/hci_core.c 		bredr_init(req);
req               259 net/bluetooth/hci_core.c 		amp_init1(req);
req               269 net/bluetooth/hci_core.c static void bredr_setup(struct hci_request *req)
req               275 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
req               278 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
req               281 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
req               284 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
req               287 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
req               290 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
req               294 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
req               298 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
req               301 net/bluetooth/hci_core.c static void le_setup(struct hci_request *req)
req               303 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               306 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
req               309 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
req               312 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
req               319 net/bluetooth/hci_core.c static void hci_setup_event_mask(struct hci_request *req)
req               321 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               412 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
req               415 net/bluetooth/hci_core.c static int hci_init2_req(struct hci_request *req, unsigned long opt)
req               417 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               420 net/bluetooth/hci_core.c 		return amp_init2(req);
req               423 net/bluetooth/hci_core.c 		bredr_setup(req);
req               428 net/bluetooth/hci_core.c 		le_setup(req);
req               440 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
req               454 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
req               462 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
req               476 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
req               480 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
req               486 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
req               492 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
req               499 net/bluetooth/hci_core.c static void hci_setup_link_policy(struct hci_request *req)
req               501 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               515 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
req               518 net/bluetooth/hci_core.c static void hci_set_le_support(struct hci_request *req)
req               520 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               535 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
req               539 net/bluetooth/hci_core.c static void hci_set_event_mask_page_2(struct hci_request *req)
req               541 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               580 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
req               584 net/bluetooth/hci_core.c static int hci_init3_req(struct hci_request *req, unsigned long opt)
req               586 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               589 net/bluetooth/hci_core.c 	hci_setup_event_mask(req);
req               597 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
req               601 net/bluetooth/hci_core.c 		hci_setup_link_policy(req);
req               604 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
req               611 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
req               724 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
req               735 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
req               740 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
req               746 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
req               751 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
req               757 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
req               762 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
req               765 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
req               770 net/bluetooth/hci_core.c 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
req               774 net/bluetooth/hci_core.c 		hci_set_le_support(req);
req               782 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
req               789 net/bluetooth/hci_core.c static int hci_init4_req(struct hci_request *req, unsigned long opt)
req               791 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               812 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
req               818 net/bluetooth/hci_core.c 		hci_set_event_mask_page_2(req);
req               822 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
req               826 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
req               830 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
req               837 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
req               847 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
req               858 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
req               921 net/bluetooth/hci_core.c static int hci_init0_req(struct hci_request *req, unsigned long opt)
req               923 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req               929 net/bluetooth/hci_core.c 		hci_reset_req(req, 0);
req               932 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
req               936 net/bluetooth/hci_core.c 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
req               958 net/bluetooth/hci_core.c static int hci_scan_req(struct hci_request *req, unsigned long opt)
req               962 net/bluetooth/hci_core.c 	BT_DBG("%s %x", req->hdev->name, scan);
req               965 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
req               969 net/bluetooth/hci_core.c static int hci_auth_req(struct hci_request *req, unsigned long opt)
req               973 net/bluetooth/hci_core.c 	BT_DBG("%s %x", req->hdev->name, auth);
req               976 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
req               980 net/bluetooth/hci_core.c static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
req               984 net/bluetooth/hci_core.c 	BT_DBG("%s %x", req->hdev->name, encrypt);
req               987 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
req               991 net/bluetooth/hci_core.c static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
req               995 net/bluetooth/hci_core.c 	BT_DBG("%s %x", req->hdev->name, policy);
req               998 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
req              1244 net/bluetooth/hci_core.c static int hci_inq_req(struct hci_request *req, unsigned long opt)
req              1247 net/bluetooth/hci_core.c 	struct hci_dev *hdev = req->hdev;
req              1259 net/bluetooth/hci_core.c 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
req              2998 net/bluetooth/hci_event.c 		struct hci_request req;
req              3009 net/bluetooth/hci_event.c 		hci_req_init(&req, hdev);
req              3012 net/bluetooth/hci_event.c 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
req              3014 net/bluetooth/hci_event.c 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
req                37 net/bluetooth/hci_request.c void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
req                39 net/bluetooth/hci_request.c 	skb_queue_head_init(&req->cmd_q);
req                40 net/bluetooth/hci_request.c 	req->hdev = hdev;
req                41 net/bluetooth/hci_request.c 	req->err = 0;
req                44 net/bluetooth/hci_request.c void hci_req_purge(struct hci_request *req)
req                46 net/bluetooth/hci_request.c 	skb_queue_purge(&req->cmd_q);
req                54 net/bluetooth/hci_request.c static int req_run(struct hci_request *req, hci_req_complete_t complete,
req                57 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req                61 net/bluetooth/hci_request.c 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
req                66 net/bluetooth/hci_request.c 	if (req->err) {
req                67 net/bluetooth/hci_request.c 		skb_queue_purge(&req->cmd_q);
req                68 net/bluetooth/hci_request.c 		return req->err;
req                72 net/bluetooth/hci_request.c 	if (skb_queue_empty(&req->cmd_q))
req                75 net/bluetooth/hci_request.c 	skb = skb_peek_tail(&req->cmd_q);
req                84 net/bluetooth/hci_request.c 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
req                92 net/bluetooth/hci_request.c int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
req                94 net/bluetooth/hci_request.c 	return req_run(req, complete, NULL);
req                97 net/bluetooth/hci_request.c int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
req                99 net/bluetooth/hci_request.c 	return req_run(req, NULL, complete);
req               130 net/bluetooth/hci_request.c 	struct hci_request req;
req               136 net/bluetooth/hci_request.c 	hci_req_init(&req, hdev);
req               138 net/bluetooth/hci_request.c 	hci_req_add_ev(&req, opcode, plen, param, event);
req               142 net/bluetooth/hci_request.c 	err = hci_req_run_skb(&req, hci_req_sync_complete);
req               192 net/bluetooth/hci_request.c int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
req               196 net/bluetooth/hci_request.c 	struct hci_request req;
req               201 net/bluetooth/hci_request.c 	hci_req_init(&req, hdev);
req               205 net/bluetooth/hci_request.c 	err = func(&req, opt);
req               212 net/bluetooth/hci_request.c 	err = hci_req_run_skb(&req, hci_req_sync_complete);
req               268 net/bluetooth/hci_request.c int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
req               279 net/bluetooth/hci_request.c 	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
req               312 net/bluetooth/hci_request.c void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
req               315 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               323 net/bluetooth/hci_request.c 	if (req->err)
req               330 net/bluetooth/hci_request.c 		req->err = -ENOMEM;
req               334 net/bluetooth/hci_request.c 	if (skb_queue_empty(&req->cmd_q))
req               339 net/bluetooth/hci_request.c 	skb_queue_tail(&req->cmd_q, skb);
req               342 net/bluetooth/hci_request.c void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
req               345 net/bluetooth/hci_request.c 	hci_req_add_ev(req, opcode, plen, param, 0);
req               348 net/bluetooth/hci_request.c void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
req               350 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               376 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
req               380 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
req               389 net/bluetooth/hci_request.c static void __hci_update_background_scan(struct hci_request *req)
req               391 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               429 net/bluetooth/hci_request.c 		hci_req_add_le_scan_disable(req);
req               448 net/bluetooth/hci_request.c 			hci_req_add_le_scan_disable(req);
req               450 net/bluetooth/hci_request.c 		hci_req_add_le_passive_scan(req);
req               456 net/bluetooth/hci_request.c void __hci_req_update_name(struct hci_request *req)
req               458 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               463 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
req               624 net/bluetooth/hci_request.c void __hci_req_update_eir(struct hci_request *req)
req               626 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               650 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
req               653 net/bluetooth/hci_request.c void hci_req_add_le_scan_disable(struct hci_request *req)
req               655 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               662 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
req               669 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
req               673 net/bluetooth/hci_request.c static void add_to_white_list(struct hci_request *req,
req               681 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
req               684 net/bluetooth/hci_request.c static u8 update_white_list(struct hci_request *req)
req               686 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               710 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
req               750 net/bluetooth/hci_request.c 		add_to_white_list(req, params);
req               774 net/bluetooth/hci_request.c 		add_to_white_list(req, params);
req               786 net/bluetooth/hci_request.c static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
req               789 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               834 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
req               841 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
req               853 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
req               859 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
req               864 net/bluetooth/hci_request.c void hci_req_add_le_passive_scan(struct hci_request *req)
req               866 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req               876 net/bluetooth/hci_request.c 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
req               884 net/bluetooth/hci_request.c 	filter_policy = update_white_list(req);
req               899 net/bluetooth/hci_request.c 	hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
req               940 net/bluetooth/hci_request.c void __hci_req_disable_advertising(struct hci_request *req)
req               942 net/bluetooth/hci_request.c 	if (ext_adv_capable(req->hdev)) {
req               949 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
req               953 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
req              1051 net/bluetooth/hci_request.c void __hci_req_enable_advertising(struct hci_request *req)
req              1053 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1072 net/bluetooth/hci_request.c 		__hci_req_disable_advertising(req);
req              1085 net/bluetooth/hci_request.c 	if (hci_update_random_address(req, !connectable,
req              1118 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
req              1120 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
req              1204 net/bluetooth/hci_request.c void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
req              1206 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1235 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
req              1257 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
req              1350 net/bluetooth/hci_request.c void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
req              1352 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1378 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
req              1396 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
req              1402 net/bluetooth/hci_request.c 	struct hci_request req;
req              1404 net/bluetooth/hci_request.c 	hci_req_init(&req, hdev);
req              1405 net/bluetooth/hci_request.c 	__hci_req_update_adv_data(&req, instance);
req              1407 net/bluetooth/hci_request.c 	return hci_req_run(&req, NULL);
req              1417 net/bluetooth/hci_request.c 	struct hci_request req;
req              1423 net/bluetooth/hci_request.c 	hci_req_init(&req, hdev);
req              1426 net/bluetooth/hci_request.c 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
req              1430 net/bluetooth/hci_request.c 			__hci_req_start_ext_adv(&req, 0x00);
req              1432 net/bluetooth/hci_request.c 			__hci_req_update_adv_data(&req, 0x00);
req              1433 net/bluetooth/hci_request.c 			__hci_req_update_scan_rsp_data(&req, 0x00);
req              1434 net/bluetooth/hci_request.c 			__hci_req_enable_advertising(&req);
req              1438 net/bluetooth/hci_request.c 	hci_req_run(&req, adv_enable_complete);
req              1446 net/bluetooth/hci_request.c 	struct hci_request req;
req              1459 net/bluetooth/hci_request.c 	hci_req_init(&req, hdev);
req              1461 net/bluetooth/hci_request.c 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
req              1464 net/bluetooth/hci_request.c 		__hci_req_disable_advertising(&req);
req              1466 net/bluetooth/hci_request.c 	hci_req_run(&req, NULL);
req              1553 net/bluetooth/hci_request.c void __hci_req_clear_ext_adv_sets(struct hci_request *req)
req              1555 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
req              1558 net/bluetooth/hci_request.c int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
req              1561 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1642 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
req              1662 net/bluetooth/hci_request.c 		hci_req_add(req,
req              1670 net/bluetooth/hci_request.c int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
req              1672 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1708 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
req              1715 net/bluetooth/hci_request.c int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
req              1717 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1721 net/bluetooth/hci_request.c 		__hci_req_disable_advertising(req);
req              1723 net/bluetooth/hci_request.c 	err = __hci_req_setup_ext_adv_instance(req, instance);
req              1727 net/bluetooth/hci_request.c 	__hci_req_update_scan_rsp_data(req, instance);
req              1728 net/bluetooth/hci_request.c 	__hci_req_enable_ext_advertising(req, instance);
req              1733 net/bluetooth/hci_request.c int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
req              1736 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1790 net/bluetooth/hci_request.c 		__hci_req_start_ext_adv(req, instance);
req              1792 net/bluetooth/hci_request.c 		__hci_req_update_adv_data(req, instance);
req              1793 net/bluetooth/hci_request.c 		__hci_req_update_scan_rsp_data(req, instance);
req              1794 net/bluetooth/hci_request.c 		__hci_req_enable_advertising(req);
req              1820 net/bluetooth/hci_request.c 				struct hci_request *req, u8 instance,
req              1865 net/bluetooth/hci_request.c 	if (!req || !hdev_is_powered(hdev) ||
req              1870 net/bluetooth/hci_request.c 		__hci_req_schedule_adv_instance(req, next_instance->instance,
req              1874 net/bluetooth/hci_request.c static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
req              1876 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1895 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
req              1898 net/bluetooth/hci_request.c int hci_update_random_address(struct hci_request *req, bool require_privacy,
req              1901 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              1923 net/bluetooth/hci_request.c 		set_random_addr(req, &hdev->rpa);
req              1954 net/bluetooth/hci_request.c 		set_random_addr(req, &nrpa);
req              1973 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
req              2004 net/bluetooth/hci_request.c void __hci_req_update_scan(struct hci_request *req)
req              2006 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2031 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
req              2034 net/bluetooth/hci_request.c static int update_scan(struct hci_request *req, unsigned long opt)
req              2036 net/bluetooth/hci_request.c 	hci_dev_lock(req->hdev);
req              2037 net/bluetooth/hci_request.c 	__hci_req_update_scan(req);
req              2038 net/bluetooth/hci_request.c 	hci_dev_unlock(req->hdev);
req              2049 net/bluetooth/hci_request.c static int connectable_update(struct hci_request *req, unsigned long opt)
req              2051 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2055 net/bluetooth/hci_request.c 	__hci_req_update_scan(req);
req              2062 net/bluetooth/hci_request.c 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
req              2068 net/bluetooth/hci_request.c 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
req              2070 net/bluetooth/hci_request.c 			__hci_req_enable_advertising(req);
req              2073 net/bluetooth/hci_request.c 	__hci_update_background_scan(req);
req              2101 net/bluetooth/hci_request.c void __hci_req_update_class(struct hci_request *req)
req              2103 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2127 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
req              2130 net/bluetooth/hci_request.c static void write_iac(struct hci_request *req)
req              2132 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2155 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
req              2159 net/bluetooth/hci_request.c static int discoverable_update(struct hci_request *req, unsigned long opt)
req              2161 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2166 net/bluetooth/hci_request.c 		write_iac(req);
req              2167 net/bluetooth/hci_request.c 		__hci_req_update_scan(req);
req              2168 net/bluetooth/hci_request.c 		__hci_req_update_class(req);
req              2175 net/bluetooth/hci_request.c 		__hci_req_update_adv_data(req, 0x00);
req              2182 net/bluetooth/hci_request.c 				__hci_req_start_ext_adv(req, 0x00);
req              2184 net/bluetooth/hci_request.c 				__hci_req_enable_advertising(req);
req              2203 net/bluetooth/hci_request.c void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
req              2214 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
req              2221 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
req              2231 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
req              2234 net/bluetooth/hci_request.c 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
req              2236 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
req              2247 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
req              2262 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
req              2280 net/bluetooth/hci_request.c 	struct hci_request req;
req              2283 net/bluetooth/hci_request.c 	hci_req_init(&req, conn->hdev);
req              2285 net/bluetooth/hci_request.c 	__hci_abort_conn(&req, conn, reason);
req              2287 net/bluetooth/hci_request.c 	err = hci_req_run(&req, abort_conn_complete);
req              2296 net/bluetooth/hci_request.c static int update_bg_scan(struct hci_request *req, unsigned long opt)
req              2298 net/bluetooth/hci_request.c 	hci_dev_lock(req->hdev);
req              2299 net/bluetooth/hci_request.c 	__hci_update_background_scan(req);
req              2300 net/bluetooth/hci_request.c 	hci_dev_unlock(req->hdev);
req              2325 net/bluetooth/hci_request.c static int le_scan_disable(struct hci_request *req, unsigned long opt)
req              2327 net/bluetooth/hci_request.c 	hci_req_add_le_scan_disable(req);
req              2331 net/bluetooth/hci_request.c static int bredr_inquiry(struct hci_request *req, unsigned long opt)
req              2338 net/bluetooth/hci_request.c 	BT_DBG("%s", req->hdev->name);
req              2340 net/bluetooth/hci_request.c 	hci_dev_lock(req->hdev);
req              2341 net/bluetooth/hci_request.c 	hci_inquiry_cache_flush(req->hdev);
req              2342 net/bluetooth/hci_request.c 	hci_dev_unlock(req->hdev);
req              2346 net/bluetooth/hci_request.c 	if (req->hdev->discovery.limited)
req              2353 net/bluetooth/hci_request.c 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
req              2417 net/bluetooth/hci_request.c static int le_scan_restart(struct hci_request *req, unsigned long opt)
req              2419 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2425 net/bluetooth/hci_request.c 	hci_req_add_le_scan_disable(req);
req              2434 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
req              2442 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
req              2498 net/bluetooth/hci_request.c static int active_scan(struct hci_request *req, unsigned long opt)
req              2501 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2521 net/bluetooth/hci_request.c 		__hci_req_disable_advertising(req);
req              2529 net/bluetooth/hci_request.c 		hci_req_add_le_scan_disable(req);
req              2535 net/bluetooth/hci_request.c 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
req              2540 net/bluetooth/hci_request.c 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
req              2545 net/bluetooth/hci_request.c static int interleaved_discov(struct hci_request *req, unsigned long opt)
req              2549 net/bluetooth/hci_request.c 	BT_DBG("%s", req->hdev->name);
req              2551 net/bluetooth/hci_request.c 	err = active_scan(req, opt);
req              2555 net/bluetooth/hci_request.c 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
req              2627 net/bluetooth/hci_request.c bool hci_req_stop_discovery(struct hci_request *req)
req              2629 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2639 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
req              2643 net/bluetooth/hci_request.c 			hci_req_add_le_scan_disable(req);
req              2650 net/bluetooth/hci_request.c 			hci_req_add_le_scan_disable(req);
req              2666 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
req              2674 net/bluetooth/hci_request.c static int stop_discovery(struct hci_request *req, unsigned long opt)
req              2676 net/bluetooth/hci_request.c 	hci_dev_lock(req->hdev);
req              2677 net/bluetooth/hci_request.c 	hci_req_stop_discovery(req);
req              2678 net/bluetooth/hci_request.c 	hci_dev_unlock(req->hdev);
req              2734 net/bluetooth/hci_request.c static int powered_update_hci(struct hci_request *req, unsigned long opt)
req              2736 net/bluetooth/hci_request.c 	struct hci_dev *hdev = req->hdev;
req              2745 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
req              2750 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
req              2767 net/bluetooth/hci_request.c 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
req              2781 net/bluetooth/hci_request.c 				err = __hci_req_setup_ext_adv_instance(req,
req              2784 net/bluetooth/hci_request.c 					__hci_req_update_scan_rsp_data(req,
req              2788 net/bluetooth/hci_request.c 				__hci_req_update_adv_data(req, 0x00);
req              2789 net/bluetooth/hci_request.c 				__hci_req_update_scan_rsp_data(req, 0x00);
req              2794 net/bluetooth/hci_request.c 					__hci_req_enable_advertising(req);
req              2796 net/bluetooth/hci_request.c 					__hci_req_enable_ext_advertising(req,
req              2804 net/bluetooth/hci_request.c 			__hci_req_schedule_adv_instance(req,
req              2812 net/bluetooth/hci_request.c 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
req              2817 net/bluetooth/hci_request.c 			__hci_req_write_fast_connectable(req, true);
req              2819 net/bluetooth/hci_request.c 			__hci_req_write_fast_connectable(req, false);
req              2820 net/bluetooth/hci_request.c 		__hci_req_update_scan(req);
req              2821 net/bluetooth/hci_request.c 		__hci_req_update_class(req);
req              2822 net/bluetooth/hci_request.c 		__hci_req_update_name(req);
req              2823 net/bluetooth/hci_request.c 		__hci_req_update_eir(req);
req                38 net/bluetooth/hci_request.h void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
req                39 net/bluetooth/hci_request.h void hci_req_purge(struct hci_request *req);
req                41 net/bluetooth/hci_request.h int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
req                42 net/bluetooth/hci_request.h int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
req                43 net/bluetooth/hci_request.h void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
req                45 net/bluetooth/hci_request.h void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
req                51 net/bluetooth/hci_request.h int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
req                54 net/bluetooth/hci_request.h int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
req                64 net/bluetooth/hci_request.h void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
req                65 net/bluetooth/hci_request.h void __hci_req_update_name(struct hci_request *req);
req                66 net/bluetooth/hci_request.h void __hci_req_update_eir(struct hci_request *req);
req                68 net/bluetooth/hci_request.h void hci_req_add_le_scan_disable(struct hci_request *req);
req                69 net/bluetooth/hci_request.h void hci_req_add_le_passive_scan(struct hci_request *req);
req                72 net/bluetooth/hci_request.h void __hci_req_enable_advertising(struct hci_request *req);
req                73 net/bluetooth/hci_request.h void __hci_req_disable_advertising(struct hci_request *req);
req                74 net/bluetooth/hci_request.h void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
req                76 net/bluetooth/hci_request.h void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
req                78 net/bluetooth/hci_request.h int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
req                81 net/bluetooth/hci_request.h 				struct hci_request *req, u8 instance,
req                84 net/bluetooth/hci_request.h int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
req                85 net/bluetooth/hci_request.h int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
req                86 net/bluetooth/hci_request.h int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
req                87 net/bluetooth/hci_request.h void __hci_req_clear_ext_adv_sets(struct hci_request *req);
req                92 net/bluetooth/hci_request.h void __hci_req_update_class(struct hci_request *req);
req                95 net/bluetooth/hci_request.h bool hci_req_stop_discovery(struct hci_request *req);
req               102 net/bluetooth/hci_request.h void __hci_req_update_scan(struct hci_request *req);
req               104 net/bluetooth/hci_request.h int hci_update_random_address(struct hci_request *req, bool require_privacy,
req               108 net/bluetooth/hci_request.h void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
req               657 net/bluetooth/hidp/core.c 				const struct hidp_connadd_req *req)
req               673 net/bluetooth/hidp/core.c 	input->id.vendor  = req->vendor;
req               674 net/bluetooth/hidp/core.c 	input->id.product = req->product;
req               675 net/bluetooth/hidp/core.c 	input->id.version = req->version;
req               677 net/bluetooth/hidp/core.c 	if (req->subclass & 0x40) {
req               693 net/bluetooth/hidp/core.c 	if (req->subclass & 0x80) {
req               756 net/bluetooth/hidp/core.c 				const struct hidp_connadd_req *req)
req               761 net/bluetooth/hidp/core.c 	session->rd_data = memdup_user(req->rd_data, req->rd_size);
req               765 net/bluetooth/hidp/core.c 	session->rd_size = req->rd_size;
req               778 net/bluetooth/hidp/core.c 	hid->vendor  = req->vendor;
req               779 net/bluetooth/hidp/core.c 	hid->product = req->product;
req               780 net/bluetooth/hidp/core.c 	hid->version = req->version;
req               781 net/bluetooth/hidp/core.c 	hid->country = req->country;
req               783 net/bluetooth/hidp/core.c 	strscpy(hid->name, req->name, sizeof(hid->name));
req               815 net/bluetooth/hidp/core.c 				 const struct hidp_connadd_req *req)
req               819 net/bluetooth/hidp/core.c 	if (req->rd_size > 0) {
req               820 net/bluetooth/hidp/core.c 		ret = hidp_setup_hid(session, req);
req               826 net/bluetooth/hidp/core.c 		ret = hidp_setup_input(session, req);
req               914 net/bluetooth/hidp/core.c 			    const struct hidp_connadd_req *req,
req               932 net/bluetooth/hidp/core.c 	session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID);
req               948 net/bluetooth/hidp/core.c 	session->idle_to = req->idle_to;
req               958 net/bluetooth/hidp/core.c 	ret = hidp_session_dev_init(session, req);
req              1346 net/bluetooth/hidp/core.c int hidp_connection_add(const struct hidp_connadd_req *req,
req              1361 net/bluetooth/hidp/core.c 	if (req->flags & ~valid_flags)
req              1375 net/bluetooth/hidp/core.c 			       intr_sock, req, conn);
req              1392 net/bluetooth/hidp/core.c int hidp_connection_del(struct hidp_conndel_req *req)
req              1397 net/bluetooth/hidp/core.c 	if (req->flags & ~valid_flags)
req              1400 net/bluetooth/hidp/core.c 	session = hidp_session_find(&req->bdaddr);
req              1404 net/bluetooth/hidp/core.c 	if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
req              1417 net/bluetooth/hidp/core.c int hidp_get_connlist(struct hidp_connlist_req *req)
req              1431 net/bluetooth/hidp/core.c 		if (copy_to_user(req->ci, &ci, sizeof(ci))) {
req              1436 net/bluetooth/hidp/core.c 		if (++n >= req->cnum)
req              1439 net/bluetooth/hidp/core.c 		req->ci++;
req              1441 net/bluetooth/hidp/core.c 	req->cnum = n;
req               125 net/bluetooth/hidp/hidp.h int hidp_connection_add(const struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
req               126 net/bluetooth/hidp/hidp.h int hidp_connection_del(struct hidp_conndel_req *req);
req               127 net/bluetooth/hidp/hidp.h int hidp_get_connlist(struct hidp_connlist_req *req);
req              1181 net/bluetooth/l2cap_core.c 	struct l2cap_conn_req req;
req              1183 net/bluetooth/l2cap_core.c 	req.scid = cpu_to_le16(chan->scid);
req              1184 net/bluetooth/l2cap_core.c 	req.psm  = chan->psm;
req              1190 net/bluetooth/l2cap_core.c 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
req              1195 net/bluetooth/l2cap_core.c 	struct l2cap_create_chan_req req;
req              1196 net/bluetooth/l2cap_core.c 	req.scid = cpu_to_le16(chan->scid);
req              1197 net/bluetooth/l2cap_core.c 	req.psm  = chan->psm;
req              1198 net/bluetooth/l2cap_core.c 	req.amp_id = amp_id;
req              1203 net/bluetooth/l2cap_core.c 		       sizeof(req), &req);
req              1287 net/bluetooth/l2cap_core.c 	struct l2cap_le_conn_req req;
req              1294 net/bluetooth/l2cap_core.c 	req.psm     = chan->psm;
req              1295 net/bluetooth/l2cap_core.c 	req.scid    = cpu_to_le16(chan->scid);
req              1296 net/bluetooth/l2cap_core.c 	req.mtu     = cpu_to_le16(chan->imtu);
req              1297 net/bluetooth/l2cap_core.c 	req.mps     = cpu_to_le16(chan->mps);
req              1298 net/bluetooth/l2cap_core.c 	req.credits = cpu_to_le16(chan->rx_credits);
req              1303 net/bluetooth/l2cap_core.c 		       sizeof(req), &req);
req              1336 net/bluetooth/l2cap_core.c 	struct l2cap_info_req req;
req              1341 net/bluetooth/l2cap_core.c 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
req              1349 net/bluetooth/l2cap_core.c 		       sizeof(req), &req);
req              1413 net/bluetooth/l2cap_core.c 	struct l2cap_disconn_req req;
req              1429 net/bluetooth/l2cap_core.c 	req.dcid = cpu_to_le16(chan->dcid);
req              1430 net/bluetooth/l2cap_core.c 	req.scid = cpu_to_le16(chan->scid);
req              1432 net/bluetooth/l2cap_core.c 		       sizeof(req), &req);
req              1539 net/bluetooth/l2cap_core.c 		struct l2cap_conn_param_update_req req;
req              1541 net/bluetooth/l2cap_core.c 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
req              1542 net/bluetooth/l2cap_core.c 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
req              1543 net/bluetooth/l2cap_core.c 		req.latency = cpu_to_le16(hcon->le_conn_latency);
req              1544 net/bluetooth/l2cap_core.c 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
req              1547 net/bluetooth/l2cap_core.c 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
req              3231 net/bluetooth/l2cap_core.c 	struct l2cap_conf_req *req = data;
req              3233 net/bluetooth/l2cap_core.c 	void *ptr = req->data;
req              3345 net/bluetooth/l2cap_core.c 	req->dcid  = cpu_to_le16(chan->dcid);
req              3346 net/bluetooth/l2cap_core.c 	req->flags = cpu_to_le16(0);
req              3356 net/bluetooth/l2cap_core.c 	void *req = chan->conf_req;
req              3370 net/bluetooth/l2cap_core.c 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
req              3579 net/bluetooth/l2cap_core.c 	struct l2cap_conf_req *req = data;
req              3580 net/bluetooth/l2cap_core.c 	void *ptr = req->data;
req              3688 net/bluetooth/l2cap_core.c 	req->dcid   = cpu_to_le16(chan->dcid);
req              3689 net/bluetooth/l2cap_core.c 	req->flags  = cpu_to_le16(0);
req              3840 net/bluetooth/l2cap_core.c 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
req              3845 net/bluetooth/l2cap_core.c 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
req              3846 net/bluetooth/l2cap_core.c 	__le16 psm = req->psm;
req              4005 net/bluetooth/l2cap_core.c 	u8 req[128];
req              4050 net/bluetooth/l2cap_core.c 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
req              4114 net/bluetooth/l2cap_core.c 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
req              4120 net/bluetooth/l2cap_core.c 	if (cmd_len < sizeof(*req))
req              4123 net/bluetooth/l2cap_core.c 	dcid  = __le16_to_cpu(req->dcid);
req              4124 net/bluetooth/l2cap_core.c 	flags = __le16_to_cpu(req->flags);
req              4141 net/bluetooth/l2cap_core.c 	len = cmd_len - sizeof(*req);
req              4150 net/bluetooth/l2cap_core.c 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
req              4276 net/bluetooth/l2cap_core.c 			char req[64];
req              4278 net/bluetooth/l2cap_core.c 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
req              4286 net/bluetooth/l2cap_core.c 						   req, sizeof(req), &result);
req              4293 net/bluetooth/l2cap_core.c 				       L2CAP_CONF_REQ, len, req);
req              4336 net/bluetooth/l2cap_core.c 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
req              4341 net/bluetooth/l2cap_core.c 	if (cmd_len != sizeof(*req))
req              4344 net/bluetooth/l2cap_core.c 	scid = __le16_to_cpu(req->scid);
req              4345 net/bluetooth/l2cap_core.c 	dcid = __le16_to_cpu(req->dcid);
req              4428 net/bluetooth/l2cap_core.c 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
req              4431 net/bluetooth/l2cap_core.c 	if (cmd_len != sizeof(*req))
req              4434 net/bluetooth/l2cap_core.c 	type = __le16_to_cpu(req->type);
req              4511 net/bluetooth/l2cap_core.c 			struct l2cap_info_req req;
req              4512 net/bluetooth/l2cap_core.c 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
req              4517 net/bluetooth/l2cap_core.c 				       L2CAP_INFO_REQ, sizeof(req), &req);
req              4542 net/bluetooth/l2cap_core.c 	struct l2cap_create_chan_req *req = data;
req              4548 net/bluetooth/l2cap_core.c 	if (cmd_len != sizeof(*req))
req              4554 net/bluetooth/l2cap_core.c 	psm = le16_to_cpu(req->psm);
req              4555 net/bluetooth/l2cap_core.c 	scid = le16_to_cpu(req->scid);
req              4557 net/bluetooth/l2cap_core.c 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
req              4560 net/bluetooth/l2cap_core.c 	if (req->amp_id == AMP_ID_BREDR) {
req              4562 net/bluetooth/l2cap_core.c 			      req->amp_id);
req              4567 net/bluetooth/l2cap_core.c 	hdev = hci_dev_get(req->amp_id);
req              4577 net/bluetooth/l2cap_core.c 			     req->amp_id);
req              4617 net/bluetooth/l2cap_core.c 	struct l2cap_move_chan_req req;
req              4625 net/bluetooth/l2cap_core.c 	req.icid = cpu_to_le16(chan->scid);
req              4626 net/bluetooth/l2cap_core.c 	req.dest_amp_id = dest_amp_id;
req              4628 net/bluetooth/l2cap_core.c 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
req              4629 net/bluetooth/l2cap_core.c 		       &req);
req              4966 net/bluetooth/l2cap_core.c 	struct l2cap_move_chan_req *req = data;
req              4972 net/bluetooth/l2cap_core.c 	if (cmd_len != sizeof(*req))
req              4975 net/bluetooth/l2cap_core.c 	icid = le16_to_cpu(req->icid);
req              4977 net/bluetooth/l2cap_core.c 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
req              5001 net/bluetooth/l2cap_core.c 	if (chan->local_amp_id == req->dest_amp_id) {
req              5006 net/bluetooth/l2cap_core.c 	if (req->dest_amp_id != AMP_ID_BREDR) {
req              5008 net/bluetooth/l2cap_core.c 		hdev = hci_dev_get(req->dest_amp_id);
req              5033 net/bluetooth/l2cap_core.c 	chan->move_id = req->dest_amp_id;
req              5036 net/bluetooth/l2cap_core.c 	if (req->dest_amp_id == AMP_ID_BREDR) {
req              5284 net/bluetooth/l2cap_core.c 	struct l2cap_conn_param_update_req *req;
req              5295 net/bluetooth/l2cap_core.c 	req = (struct l2cap_conn_param_update_req *) data;
req              5296 net/bluetooth/l2cap_core.c 	min		= __le16_to_cpu(req->min);
req              5297 net/bluetooth/l2cap_core.c 	max		= __le16_to_cpu(req->max);
req              5298 net/bluetooth/l2cap_core.c 	latency		= __le16_to_cpu(req->latency);
req              5299 net/bluetooth/l2cap_core.c 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
req              5500 net/bluetooth/l2cap_core.c 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
req              5507 net/bluetooth/l2cap_core.c 	if (cmd_len != sizeof(*req))
req              5510 net/bluetooth/l2cap_core.c 	scid = __le16_to_cpu(req->scid);
req              5511 net/bluetooth/l2cap_core.c 	mtu  = __le16_to_cpu(req->mtu);
req              5512 net/bluetooth/l2cap_core.c 	mps  = __le16_to_cpu(req->mps);
req              5513 net/bluetooth/l2cap_core.c 	psm  = req->psm;
req              5573 net/bluetooth/l2cap_core.c 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
req               908 net/bluetooth/mgmt.c 	struct hci_request req;
req               913 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req               917 net/bluetooth/mgmt.c 	__hci_req_update_eir(&req);
req               918 net/bluetooth/mgmt.c 	__hci_req_update_class(&req);
req               922 net/bluetooth/mgmt.c 	hci_req_run(&req, NULL);
req               929 net/bluetooth/mgmt.c 	struct hci_request req;
req               942 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req               944 net/bluetooth/mgmt.c 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
req               946 net/bluetooth/mgmt.c 		__hci_req_enable_advertising(&req);
req               947 net/bluetooth/mgmt.c 	hci_req_run(&req, NULL);
req              1123 net/bluetooth/mgmt.c 	struct hci_request req;
req              1128 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              1133 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
req              1139 net/bluetooth/mgmt.c 		__hci_req_disable_advertising(&req);
req              1141 net/bluetooth/mgmt.c 	discov_stopped = hci_req_stop_discovery(&req);
req              1145 net/bluetooth/mgmt.c 		__hci_abort_conn(&req, conn, 0x15);
req              1148 net/bluetooth/mgmt.c 	err = hci_req_run(&req, clean_up_hci_complete);
req              1849 net/bluetooth/mgmt.c 		struct hci_request req;
req              1850 net/bluetooth/mgmt.c 		hci_req_init(&req, hdev);
req              1854 net/bluetooth/mgmt.c 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
req              1856 net/bluetooth/mgmt.c 				__hci_req_update_scan_rsp_data(&req, 0x00);
req              1858 net/bluetooth/mgmt.c 			__hci_req_update_adv_data(&req, 0x00);
req              1859 net/bluetooth/mgmt.c 			__hci_req_update_scan_rsp_data(&req, 0x00);
req              1861 net/bluetooth/mgmt.c 		hci_req_run(&req, NULL);
req              1874 net/bluetooth/mgmt.c 	struct hci_request req;
req              1949 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              1958 net/bluetooth/mgmt.c 			__hci_req_disable_advertising(&req);
req              1961 net/bluetooth/mgmt.c 			__hci_req_clear_ext_adv_sets(&req);
req              1964 net/bluetooth/mgmt.c 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
req              1967 net/bluetooth/mgmt.c 	err = hci_req_run(&req, le_enable_complete);
req              2048 net/bluetooth/mgmt.c 	struct hci_request req;
req              2074 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              2076 net/bluetooth/mgmt.c 	__hci_req_update_class(&req);
req              2077 net/bluetooth/mgmt.c 	__hci_req_update_eir(&req);
req              2079 net/bluetooth/mgmt.c 	err = hci_req_run(&req, add_uuid_complete);
req              2130 net/bluetooth/mgmt.c 	struct hci_request req;
req              2174 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              2176 net/bluetooth/mgmt.c 	__hci_req_update_class(&req);
req              2177 net/bluetooth/mgmt.c 	__hci_req_update_eir(&req);
req              2179 net/bluetooth/mgmt.c 	err = hci_req_run(&req, remove_uuid_complete);
req              2214 net/bluetooth/mgmt.c 	struct hci_request req;
req              2246 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              2252 net/bluetooth/mgmt.c 		__hci_req_update_eir(&req);
req              2255 net/bluetooth/mgmt.c 	__hci_req_update_class(&req);
req              2257 net/bluetooth/mgmt.c 	err = hci_req_run(&req, set_class_complete);
req              3153 net/bluetooth/mgmt.c 	struct hci_request req;
req              3170 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              3171 net/bluetooth/mgmt.c 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
req              3176 net/bluetooth/mgmt.c 	hci_req_run(&req, NULL);
req              3216 net/bluetooth/mgmt.c 	struct hci_request req;
req              3259 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              3262 net/bluetooth/mgmt.c 		__hci_req_update_name(&req);
req              3263 net/bluetooth/mgmt.c 		__hci_req_update_eir(&req);
req              3270 net/bluetooth/mgmt.c 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
req              3272 net/bluetooth/mgmt.c 	err = hci_req_run(&req, set_name_complete);
req              3385 net/bluetooth/mgmt.c 	struct hci_request req;
req              3494 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              3522 net/bluetooth/mgmt.c 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
req              3524 net/bluetooth/mgmt.c 	err = hci_req_run_skb(&req, set_default_phy_complete);
req              3597 net/bluetooth/mgmt.c 	struct hci_request req;
req              3628 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              3631 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
req              3633 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
req              3635 net/bluetooth/mgmt.c 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
req              4200 net/bluetooth/mgmt.c 	struct hci_request req;
req              4222 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              4223 net/bluetooth/mgmt.c 	__hci_req_update_eir(&req);
req              4224 net/bluetooth/mgmt.c 	hci_req_run(&req, NULL);
req              4241 net/bluetooth/mgmt.c 	struct hci_request req;
req              4286 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              4288 net/bluetooth/mgmt.c 	err = __hci_req_schedule_adv_instance(&req, instance, true);
req              4291 net/bluetooth/mgmt.c 		err = hci_req_run(&req, enable_advertising_instance);
req              4305 net/bluetooth/mgmt.c 	struct hci_request req;
req              4372 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              4389 net/bluetooth/mgmt.c 			__hci_req_start_ext_adv(&req, 0x00);
req              4391 net/bluetooth/mgmt.c 			__hci_req_update_adv_data(&req, 0x00);
req              4392 net/bluetooth/mgmt.c 			__hci_req_update_scan_rsp_data(&req, 0x00);
req              4393 net/bluetooth/mgmt.c 			__hci_req_enable_advertising(&req);
req              4396 net/bluetooth/mgmt.c 		__hci_req_disable_advertising(&req);
req              4399 net/bluetooth/mgmt.c 	err = hci_req_run(&req, set_advertising_complete);
req              4494 net/bluetooth/mgmt.c 		struct hci_request req;
req              4496 net/bluetooth/mgmt.c 		hci_req_init(&req, hdev);
req              4498 net/bluetooth/mgmt.c 		hci_req_add_le_scan_disable(&req);
req              4499 net/bluetooth/mgmt.c 		hci_req_add_le_passive_scan(&req);
req              4501 net/bluetooth/mgmt.c 		hci_req_run(&req, NULL);
req              4548 net/bluetooth/mgmt.c 	struct hci_request req;
req              4591 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              4593 net/bluetooth/mgmt.c 	__hci_req_write_fast_connectable(&req, cp->val);
req              4595 net/bluetooth/mgmt.c 	err = hci_req_run(&req, fast_connectable_complete);
req              4644 net/bluetooth/mgmt.c 	struct hci_request req;
req              4733 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              4735 net/bluetooth/mgmt.c 	__hci_req_write_fast_connectable(&req, false);
req              4736 net/bluetooth/mgmt.c 	__hci_req_update_scan(&req);
req              4741 net/bluetooth/mgmt.c 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
req              4743 net/bluetooth/mgmt.c 	err = hci_req_run(&req, set_bredr_complete);
req              4802 net/bluetooth/mgmt.c 	struct hci_request req;
req              4872 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              4873 net/bluetooth/mgmt.c 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
req              4874 net/bluetooth/mgmt.c 	err = hci_req_run(&req, sc_enable_complete);
req              5317 net/bluetooth/mgmt.c 		struct hci_request req;
req              5322 net/bluetooth/mgmt.c 		hci_req_init(&req, hdev);
req              5324 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
req              5334 net/bluetooth/mgmt.c 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
req              5342 net/bluetooth/mgmt.c 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
req              5346 net/bluetooth/mgmt.c 		err = hci_req_run(&req, conn_info_refresh_complete);
req              5452 net/bluetooth/mgmt.c 	struct hci_request req;
req              5498 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              5501 net/bluetooth/mgmt.c 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
req              5509 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
req              5512 net/bluetooth/mgmt.c 	err = hci_req_run(&req, get_clock_info_complete);
req              6131 net/bluetooth/mgmt.c 	struct hci_request req;
req              6139 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              6142 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
req              6144 net/bluetooth/mgmt.c 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
req              6146 net/bluetooth/mgmt.c 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
req              6552 net/bluetooth/mgmt.c 	struct hci_request req;
req              6665 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              6667 net/bluetooth/mgmt.c 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
req              6670 net/bluetooth/mgmt.c 		err = hci_req_run(&req, add_advertising_complete);
req              6717 net/bluetooth/mgmt.c 	struct hci_request req;
req              6745 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              6747 net/bluetooth/mgmt.c 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
req              6750 net/bluetooth/mgmt.c 		__hci_req_disable_advertising(&req);
req              6756 net/bluetooth/mgmt.c 	if (skb_queue_empty(&req.cmd_q) ||
req              6759 net/bluetooth/mgmt.c 		hci_req_purge(&req);
req              6774 net/bluetooth/mgmt.c 	err = hci_req_run(&req, remove_advertising_complete);
req              7569 net/bluetooth/mgmt.c static void clear_eir(struct hci_request *req)
req              7571 net/bluetooth/mgmt.c 	struct hci_dev *hdev = req->hdev;
req              7581 net/bluetooth/mgmt.c 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
req              7587 net/bluetooth/mgmt.c 	struct hci_request req;
req              7623 net/bluetooth/mgmt.c 	hci_req_init(&req, hdev);
req              7627 net/bluetooth/mgmt.c 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
req              7629 net/bluetooth/mgmt.c 		__hci_req_update_eir(&req);
req              7631 net/bluetooth/mgmt.c 		clear_eir(&req);
req              7634 net/bluetooth/mgmt.c 	hci_req_run(&req, NULL);
req               216 net/bluetooth/rfcomm/tty.c static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
req               229 net/bluetooth/rfcomm/tty.c 	if (req->dev_id < 0) {
req               240 net/bluetooth/rfcomm/tty.c 		dev->id = req->dev_id;
req               264 net/bluetooth/rfcomm/tty.c 	bacpy(&dev->src, &req->src);
req               265 net/bluetooth/rfcomm/tty.c 	bacpy(&dev->dst, &req->dst);
req               266 net/bluetooth/rfcomm/tty.c 	dev->channel = req->channel;
req               268 net/bluetooth/rfcomm/tty.c 	dev->flags = req->flags &
req               278 net/bluetooth/rfcomm/tty.c 	if (req->flags & (1 << RFCOMM_REUSE_DLC)) {
req               317 net/bluetooth/rfcomm/tty.c static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
req               322 net/bluetooth/rfcomm/tty.c 	BT_DBG("id %d channel %d", req->dev_id, req->channel);
req               324 net/bluetooth/rfcomm/tty.c 	dev = __rfcomm_dev_add(req, dlc);
req               392 net/bluetooth/rfcomm/tty.c 	struct rfcomm_dev_req req;
req               396 net/bluetooth/rfcomm/tty.c 	if (copy_from_user(&req, arg, sizeof(req)))
req               399 net/bluetooth/rfcomm/tty.c 	BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
req               401 net/bluetooth/rfcomm/tty.c 	if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN))
req               404 net/bluetooth/rfcomm/tty.c 	if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
req               413 net/bluetooth/rfcomm/tty.c 		dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
req               423 net/bluetooth/rfcomm/tty.c 	id = rfcomm_dev_add(&req, dlc);
req               427 net/bluetooth/rfcomm/tty.c 	if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
req               438 net/bluetooth/rfcomm/tty.c 	struct rfcomm_dev_req req;
req               442 net/bluetooth/rfcomm/tty.c 	if (copy_from_user(&req, arg, sizeof(req)))
req               445 net/bluetooth/rfcomm/tty.c 	BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags);
req               447 net/bluetooth/rfcomm/tty.c 	dev = rfcomm_dev_get(req.dev_id);
req               462 net/bluetooth/rfcomm/tty.c 	if (req.flags & (1 << RFCOMM_HANGUP_NOW))
req               658 net/bluetooth/smp.c 			      struct smp_cmd_pairing *req,
req               712 net/bluetooth/smp.c 		req->io_capability = conn->hcon->io_capability;
req               713 net/bluetooth/smp.c 		req->oob_flag = oob_flag;
req               714 net/bluetooth/smp.c 		req->max_key_size = hdev->le_max_key_size;
req               715 net/bluetooth/smp.c 		req->init_key_dist = local_dist;
req               716 net/bluetooth/smp.c 		req->resp_key_dist = remote_dist;
req               717 net/bluetooth/smp.c 		req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
req               726 net/bluetooth/smp.c 	rsp->init_key_dist = req->init_key_dist & remote_dist;
req               727 net/bluetooth/smp.c 	rsp->resp_key_dist = req->resp_key_dist & local_dist;
req              1039 net/bluetooth/smp.c 	struct smp_cmd_pairing *req = (void *) &smp->preq[1];
req              1054 net/bluetooth/smp.c 		persistent = !!((req->auth_req & rsp->auth_req) &
req              1233 net/bluetooth/smp.c 	struct smp_cmd_pairing *req, *rsp;
req              1249 net/bluetooth/smp.c 	req = (void *) &smp->preq[1];
req              1253 net/bluetooth/smp.c 		*keydist &= req->init_key_dist;
req              1256 net/bluetooth/smp.c 		*keydist &= req->resp_key_dist;
req              1686 net/bluetooth/smp.c 				    struct smp_cmd_pairing *req,
req              1705 net/bluetooth/smp.c 		memset(req, 0, sizeof(*req));
req              1707 net/bluetooth/smp.c 		req->auth_req        = SMP_AUTH_CT2;
req              1708 net/bluetooth/smp.c 		req->init_key_dist   = local_dist;
req              1709 net/bluetooth/smp.c 		req->resp_key_dist   = remote_dist;
req              1710 net/bluetooth/smp.c 		req->max_key_size    = conn->hcon->enc_key_size;
req              1721 net/bluetooth/smp.c 	rsp->init_key_dist   = req->init_key_dist & remote_dist;
req              1722 net/bluetooth/smp.c 	rsp->resp_key_dist   = req->resp_key_dist & local_dist;
req              1729 net/bluetooth/smp.c 	struct smp_cmd_pairing rsp, *req = (void *) skb->data;
req              1738 net/bluetooth/smp.c 	if (skb->len < sizeof(*req))
req              1753 net/bluetooth/smp.c 	auth = req->auth_req & AUTH_REQ_MASK(hdev);
req              1763 net/bluetooth/smp.c 	memcpy(&smp->preq[1], req, sizeof(*req));
req              1764 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*req));
req              1770 net/bluetooth/smp.c 	if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
req              1782 net/bluetooth/smp.c 		build_bredr_pairing_cmd(smp, req, &rsp);
req              1784 net/bluetooth/smp.c 		if (req->auth_req & SMP_AUTH_CT2)
req              1787 net/bluetooth/smp.c 		key_size = min(req->max_key_size, rsp.max_key_size);
req              1802 net/bluetooth/smp.c 	build_pairing_cmd(conn, req, &rsp, auth);
req              1824 net/bluetooth/smp.c 					 req->io_capability);
req              1829 net/bluetooth/smp.c 	key_size = min(req->max_key_size, rsp.max_key_size);
req              1858 net/bluetooth/smp.c 	ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
req              1920 net/bluetooth/smp.c 	struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
req              1937 net/bluetooth/smp.c 	req = (void *) &smp->preq[1];
req              1939 net/bluetooth/smp.c 	key_size = min(req->max_key_size, rsp->max_key_size);
req              1963 net/bluetooth/smp.c 	if ((req->auth_req & SMP_AUTH_CT2) && (auth & SMP_AUTH_CT2))
req              1974 net/bluetooth/smp.c 	if ((req->auth_req & SMP_AUTH_SC) && (auth & SMP_AUTH_SC))
req              1983 net/bluetooth/smp.c 		method = get_auth_method(smp, req->io_capability,
req              2003 net/bluetooth/smp.c 	auth |= req->auth_req;
req              2005 net/bluetooth/smp.c 	ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
req              2045 net/bluetooth/smp.c 	struct smp_cmd_pairing *req, *rsp;
req              2059 net/bluetooth/smp.c 	req = (void *) &smp->preq[1];
req              2063 net/bluetooth/smp.c 	smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist);
req              2065 net/bluetooth/smp.c 	auth = req->auth_req & AUTH_REQ_MASK(hdev);
req              2067 net/bluetooth/smp.c 	if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) {
req              3009 net/bluetooth/smp.c 	struct smp_cmd_pairing req;
req              3062 net/bluetooth/smp.c 	build_bredr_pairing_cmd(smp, &req, NULL);
req              3065 net/bluetooth/smp.c 	memcpy(&smp->preq[1], &req, sizeof(req));
req              3067 net/bluetooth/smp.c 	smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req);
req                40 net/bpfilter/bpfilter_kern.c 	struct mbox_request req;
req                46 net/bpfilter/bpfilter_kern.c 	req.is_set = is_set;
req                47 net/bpfilter/bpfilter_kern.c 	req.pid = current->pid;
req                48 net/bpfilter/bpfilter_kern.c 	req.cmd = optname;
req                49 net/bpfilter/bpfilter_kern.c 	req.addr = (long __force __user)optval;
req                50 net/bpfilter/bpfilter_kern.c 	req.len = optlen;
req                53 net/bpfilter/bpfilter_kern.c 	n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
req                55 net/bpfilter/bpfilter_kern.c 	if (n != sizeof(req)) {
req                34 net/bpfilter/main.c 		struct mbox_request req;
req                38 net/bpfilter/main.c 		n = read(0, &req, sizeof(req));
req                39 net/bpfilter/main.c 		if (n != sizeof(req)) {
req                44 net/bpfilter/main.c 		reply.status = req.is_set ?
req                45 net/bpfilter/main.c 			handle_set_cmd(&req) :
req                46 net/bpfilter/main.c 			handle_get_cmd(&req);
req               130 net/caif/cfctrl.c 			      struct cfctrl_request_info *req)
req               134 net/caif/cfctrl.c 	req->sequence_no = atomic_read(&ctrl->req_seq_no);
req               135 net/caif/cfctrl.c 	list_add_tail(&req->list, &ctrl->list);
req               141 net/caif/cfctrl.c 						struct cfctrl_request_info *req)
req               148 net/caif/cfctrl.c 		if (cfctrl_req_eq(req, p)) {
req               207 net/caif/cfctrl.c 	struct cfctrl_request_info *req;
req               274 net/caif/cfctrl.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               275 net/caif/cfctrl.c 	if (!req)
req               277 net/caif/cfctrl.c 	req->client_layer = user_layer;
req               278 net/caif/cfctrl.c 	req->cmd = CFCTRL_CMD_LINK_SETUP;
req               279 net/caif/cfctrl.c 	req->param = *param;
req               280 net/caif/cfctrl.c 	cfctrl_insert_req(cfctrl, req);
req               359 net/caif/cfctrl.c 	struct cfctrl_request_info rsp, *req;
req               495 net/caif/cfctrl.c 			req = cfctrl_remove_req(cfctrl, &rsp);
req               503 net/caif/cfctrl.c 						       req ? req->client_layer
req               509 net/caif/cfctrl.c 							  req ? req->
req               513 net/caif/cfctrl.c 			kfree(req);
req               542 net/ceph/auth_x.c 		struct ceph_x_service_ticket_request *req;
req               554 net/ceph/auth_x.c 		req = p;
req               555 net/ceph/auth_x.c 		req->keys = cpu_to_le32(need);
req               556 net/ceph/auth_x.c 		p += sizeof(*req);
req               383 net/ceph/cls_lock_client.c int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
req               401 net/ceph/cls_lock_client.c 	ret = osd_req_op_cls_init(req, which, "lock", "assert_locked");
req               421 net/ceph/cls_lock_client.c 	osd_req_op_cls_request_data_pages(req, which, pages, assert_op_buf_size,
req               221 net/ceph/crypto.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
req               237 net/ceph/crypto.c 	skcipher_request_set_sync_tfm(req, key->tfm);
req               238 net/ceph/crypto.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               239 net/ceph/crypto.c 	skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
req               248 net/ceph/crypto.c 		ret = crypto_skcipher_encrypt(req);
req               250 net/ceph/crypto.c 		ret = crypto_skcipher_decrypt(req);
req               251 net/ceph/crypto.c 	skcipher_request_zero(req);
req               139 net/ceph/debugfs.c 	struct ceph_mon_generic_request *req;
req               160 net/ceph/debugfs.c 		req = rb_entry(rp, struct ceph_mon_generic_request, node);
req               161 net/ceph/debugfs.c 		op = le16_to_cpu(req->request->hdr.type);
req               163 net/ceph/debugfs.c 			seq_printf(s, "%llu statfs\n", req->tid);
req               165 net/ceph/debugfs.c 			seq_printf(s, "%llu mon_get_version", req->tid);
req               167 net/ceph/debugfs.c 			seq_printf(s, "%llu unknown\n", req->tid);
req               207 net/ceph/debugfs.c static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
req               211 net/ceph/debugfs.c 	seq_printf(s, "%llu\t", req->r_tid);
req               212 net/ceph/debugfs.c 	dump_target(s, &req->r_t);
req               214 net/ceph/debugfs.c 	seq_printf(s, "\t%d", req->r_attempts);
req               216 net/ceph/debugfs.c 	for (i = 0; i < req->r_num_ops; i++) {
req               217 net/ceph/debugfs.c 		struct ceph_osd_req_op *op = &req->r_ops[i];
req               235 net/ceph/debugfs.c 		struct ceph_osd_request *req =
req               238 net/ceph/debugfs.c 		dump_request(s, req);
req               510 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req =
req               513 net/ceph/mon_client.c 	dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
req               514 net/ceph/mon_client.c 	     req->reply);
req               515 net/ceph/mon_client.c 	WARN_ON(!RB_EMPTY_NODE(&req->node));
req               517 net/ceph/mon_client.c 	if (req->reply)
req               518 net/ceph/mon_client.c 		ceph_msg_put(req->reply);
req               519 net/ceph/mon_client.c 	if (req->request)
req               520 net/ceph/mon_client.c 		ceph_msg_put(req->request);
req               522 net/ceph/mon_client.c 	kfree(req);
req               525 net/ceph/mon_client.c static void put_generic_request(struct ceph_mon_generic_request *req)
req               527 net/ceph/mon_client.c 	if (req)
req               528 net/ceph/mon_client.c 		kref_put(&req->kref, release_generic_request);
req               531 net/ceph/mon_client.c static void get_generic_request(struct ceph_mon_generic_request *req)
req               533 net/ceph/mon_client.c 	kref_get(&req->kref);
req               539 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               541 net/ceph/mon_client.c 	req = kzalloc(sizeof(*req), gfp);
req               542 net/ceph/mon_client.c 	if (!req)
req               545 net/ceph/mon_client.c 	req->monc = monc;
req               546 net/ceph/mon_client.c 	kref_init(&req->kref);
req               547 net/ceph/mon_client.c 	RB_CLEAR_NODE(&req->node);
req               548 net/ceph/mon_client.c 	init_completion(&req->completion);
req               550 net/ceph/mon_client.c 	dout("%s greq %p\n", __func__, req);
req               551 net/ceph/mon_client.c 	return req;
req               554 net/ceph/mon_client.c static void register_generic_request(struct ceph_mon_generic_request *req)
req               556 net/ceph/mon_client.c 	struct ceph_mon_client *monc = req->monc;
req               558 net/ceph/mon_client.c 	WARN_ON(req->tid);
req               560 net/ceph/mon_client.c 	get_generic_request(req);
req               561 net/ceph/mon_client.c 	req->tid = ++monc->last_tid;
req               562 net/ceph/mon_client.c 	insert_generic_request(&monc->generic_request_tree, req);
req               566 net/ceph/mon_client.c 				 struct ceph_mon_generic_request *req)
req               568 net/ceph/mon_client.c 	WARN_ON(!req->tid);
req               570 net/ceph/mon_client.c 	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
req               571 net/ceph/mon_client.c 	req->request->hdr.tid = cpu_to_le64(req->tid);
req               572 net/ceph/mon_client.c 	ceph_con_send(&monc->con, ceph_msg_get(req->request));
req               575 net/ceph/mon_client.c static void __finish_generic_request(struct ceph_mon_generic_request *req)
req               577 net/ceph/mon_client.c 	struct ceph_mon_client *monc = req->monc;
req               579 net/ceph/mon_client.c 	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
req               580 net/ceph/mon_client.c 	erase_generic_request(&monc->generic_request_tree, req);
req               582 net/ceph/mon_client.c 	ceph_msg_revoke(req->request);
req               583 net/ceph/mon_client.c 	ceph_msg_revoke_incoming(req->reply);
req               586 net/ceph/mon_client.c static void finish_generic_request(struct ceph_mon_generic_request *req)
req               588 net/ceph/mon_client.c 	__finish_generic_request(req);
req               589 net/ceph/mon_client.c 	put_generic_request(req);
req               592 net/ceph/mon_client.c static void complete_generic_request(struct ceph_mon_generic_request *req)
req               594 net/ceph/mon_client.c 	if (req->complete_cb)
req               595 net/ceph/mon_client.c 		req->complete_cb(req);
req               597 net/ceph/mon_client.c 		complete_all(&req->completion);
req               598 net/ceph/mon_client.c 	put_generic_request(req);
req               601 net/ceph/mon_client.c static void cancel_generic_request(struct ceph_mon_generic_request *req)
req               603 net/ceph/mon_client.c 	struct ceph_mon_client *monc = req->monc;
req               606 net/ceph/mon_client.c 	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
req               610 net/ceph/mon_client.c 					    req->tid);
req               612 net/ceph/mon_client.c 		WARN_ON(lookup_req != req);
req               613 net/ceph/mon_client.c 		finish_generic_request(req);
req               619 net/ceph/mon_client.c static int wait_generic_request(struct ceph_mon_generic_request *req)
req               623 net/ceph/mon_client.c 	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
req               624 net/ceph/mon_client.c 	ret = wait_for_completion_interruptible(&req->completion);
req               626 net/ceph/mon_client.c 		cancel_generic_request(req);
req               628 net/ceph/mon_client.c 		ret = req->result; /* completed */
req               638 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               643 net/ceph/mon_client.c 	req = lookup_generic_request(&monc->generic_request_tree, tid);
req               644 net/ceph/mon_client.c 	if (!req) {
req               649 net/ceph/mon_client.c 		dout("get_generic_reply %lld got %p\n", tid, req->reply);
req               651 net/ceph/mon_client.c 		m = ceph_msg_get(req->reply);
req               668 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               678 net/ceph/mon_client.c 	req = lookup_generic_request(&monc->generic_request_tree, tid);
req               679 net/ceph/mon_client.c 	if (!req) {
req               684 net/ceph/mon_client.c 	req->result = 0;
req               685 net/ceph/mon_client.c 	*req->u.st = reply->st; /* struct */
req               686 net/ceph/mon_client.c 	__finish_generic_request(req);
req               689 net/ceph/mon_client.c 	complete_generic_request(req);
req               703 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               707 net/ceph/mon_client.c 	req = alloc_generic_request(monc, GFP_NOFS);
req               708 net/ceph/mon_client.c 	if (!req)
req               711 net/ceph/mon_client.c 	req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
req               713 net/ceph/mon_client.c 	if (!req->request)
req               716 net/ceph/mon_client.c 	req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
req               717 net/ceph/mon_client.c 	if (!req->reply)
req               720 net/ceph/mon_client.c 	req->u.st = buf;
req               721 net/ceph/mon_client.c 	req->request->hdr.version = cpu_to_le16(2);
req               724 net/ceph/mon_client.c 	register_generic_request(req);
req               726 net/ceph/mon_client.c 	h = req->request->front.iov_base;
req               733 net/ceph/mon_client.c 	send_generic_request(monc, req);
req               736 net/ceph/mon_client.c 	ret = wait_generic_request(req);
req               738 net/ceph/mon_client.c 	put_generic_request(req);
req               746 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               760 net/ceph/mon_client.c 	req = lookup_generic_request(&monc->generic_request_tree, handle);
req               761 net/ceph/mon_client.c 	if (!req) {
req               766 net/ceph/mon_client.c 	req->result = 0;
req               767 net/ceph/mon_client.c 	req->u.newest = ceph_decode_64(&p);
req               768 net/ceph/mon_client.c 	__finish_generic_request(req);
req               771 net/ceph/mon_client.c 	complete_generic_request(req);
req               783 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               785 net/ceph/mon_client.c 	req = alloc_generic_request(monc, GFP_NOIO);
req               786 net/ceph/mon_client.c 	if (!req)
req               789 net/ceph/mon_client.c 	req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
req               792 net/ceph/mon_client.c 	if (!req->request)
req               795 net/ceph/mon_client.c 	req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
req               797 net/ceph/mon_client.c 	if (!req->reply)
req               800 net/ceph/mon_client.c 	req->complete_cb = cb;
req               801 net/ceph/mon_client.c 	req->private_data = private_data;
req               804 net/ceph/mon_client.c 	register_generic_request(req);
req               806 net/ceph/mon_client.c 		void *p = req->request->front.iov_base;
req               807 net/ceph/mon_client.c 		void *const end = p + req->request->front_alloc_len;
req               809 net/ceph/mon_client.c 		ceph_encode_64(&p, req->tid); /* handle */
req               813 net/ceph/mon_client.c 	send_generic_request(monc, req);
req               816 net/ceph/mon_client.c 	return req;
req               819 net/ceph/mon_client.c 	put_generic_request(req);
req               831 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               834 net/ceph/mon_client.c 	req = __ceph_monc_get_version(monc, what, NULL, 0);
req               835 net/ceph/mon_client.c 	if (IS_ERR(req))
req               836 net/ceph/mon_client.c 		return PTR_ERR(req);
req               838 net/ceph/mon_client.c 	ret = wait_generic_request(req);
req               840 net/ceph/mon_client.c 		*newest = req->u.newest;
req               842 net/ceph/mon_client.c 	put_generic_request(req);
req               855 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               857 net/ceph/mon_client.c 	req = __ceph_monc_get_version(monc, what, cb, private_data);
req               858 net/ceph/mon_client.c 	if (IS_ERR(req))
req               859 net/ceph/mon_client.c 		return PTR_ERR(req);
req               861 net/ceph/mon_client.c 	put_generic_request(req);
req               869 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               881 net/ceph/mon_client.c 	req = lookup_generic_request(&monc->generic_request_tree, tid);
req               882 net/ceph/mon_client.c 	if (!req) {
req               887 net/ceph/mon_client.c 	req->result = ceph_decode_32(&p);
req               888 net/ceph/mon_client.c 	__finish_generic_request(req);
req               891 net/ceph/mon_client.c 	complete_generic_request(req);
req               902 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               907 net/ceph/mon_client.c 	req = alloc_generic_request(monc, GFP_NOIO);
req               908 net/ceph/mon_client.c 	if (!req)
req               911 net/ceph/mon_client.c 	req->request = ceph_msg_new(CEPH_MSG_MON_COMMAND, 256, GFP_NOIO, true);
req               912 net/ceph/mon_client.c 	if (!req->request)
req               915 net/ceph/mon_client.c 	req->reply = ceph_msg_new(CEPH_MSG_MON_COMMAND_ACK, 512, GFP_NOIO,
req               917 net/ceph/mon_client.c 	if (!req->reply)
req               921 net/ceph/mon_client.c 	register_generic_request(req);
req               922 net/ceph/mon_client.c 	h = req->request->front.iov_base;
req               933 net/ceph/mon_client.c 	send_generic_request(monc, req);
req               936 net/ceph/mon_client.c 	ret = wait_generic_request(req);
req               947 net/ceph/mon_client.c 	put_generic_request(req);
req               957 net/ceph/mon_client.c 	struct ceph_mon_generic_request *req;
req               961 net/ceph/mon_client.c 		req = rb_entry(p, struct ceph_mon_generic_request, node);
req               962 net/ceph/mon_client.c 		ceph_msg_revoke(req->request);
req               963 net/ceph/mon_client.c 		ceph_msg_revoke_incoming(req->reply);
req               964 net/ceph/mon_client.c 		ceph_con_send(&monc->con, ceph_msg_get(req->request));
req                49 net/ceph/osd_client.c static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
req                50 net/ceph/osd_client.c static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
req               469 net/ceph/osd_client.c static void request_release_checks(struct ceph_osd_request *req)
req               471 net/ceph/osd_client.c 	WARN_ON(!RB_EMPTY_NODE(&req->r_node));
req               472 net/ceph/osd_client.c 	WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
req               473 net/ceph/osd_client.c 	WARN_ON(!list_empty(&req->r_private_item));
req               474 net/ceph/osd_client.c 	WARN_ON(req->r_osd);
req               479 net/ceph/osd_client.c 	struct ceph_osd_request *req = container_of(kref,
req               483 net/ceph/osd_client.c 	dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
req               484 net/ceph/osd_client.c 	     req->r_request, req->r_reply);
req               485 net/ceph/osd_client.c 	request_release_checks(req);
req               487 net/ceph/osd_client.c 	if (req->r_request)
req               488 net/ceph/osd_client.c 		ceph_msg_put(req->r_request);
req               489 net/ceph/osd_client.c 	if (req->r_reply)
req               490 net/ceph/osd_client.c 		ceph_msg_put(req->r_reply);
req               492 net/ceph/osd_client.c 	for (which = 0; which < req->r_num_ops; which++)
req               493 net/ceph/osd_client.c 		osd_req_op_data_release(req, which);
req               495 net/ceph/osd_client.c 	target_destroy(&req->r_t);
req               496 net/ceph/osd_client.c 	ceph_put_snap_context(req->r_snapc);
req               498 net/ceph/osd_client.c 	if (req->r_mempool)
req               499 net/ceph/osd_client.c 		mempool_free(req, req->r_osdc->req_mempool);
req               500 net/ceph/osd_client.c 	else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
req               501 net/ceph/osd_client.c 		kmem_cache_free(ceph_osd_request_cache, req);
req               503 net/ceph/osd_client.c 		kfree(req);
req               506 net/ceph/osd_client.c void ceph_osdc_get_request(struct ceph_osd_request *req)
req               508 net/ceph/osd_client.c 	dout("%s %p (was %d)\n", __func__, req,
req               509 net/ceph/osd_client.c 	     kref_read(&req->r_kref));
req               510 net/ceph/osd_client.c 	kref_get(&req->r_kref);
req               514 net/ceph/osd_client.c void ceph_osdc_put_request(struct ceph_osd_request *req)
req               516 net/ceph/osd_client.c 	if (req) {
req               517 net/ceph/osd_client.c 		dout("%s %p (was %d)\n", __func__, req,
req               518 net/ceph/osd_client.c 		     kref_read(&req->r_kref));
req               519 net/ceph/osd_client.c 		kref_put(&req->r_kref, ceph_osdc_release_request);
req               524 net/ceph/osd_client.c static void request_init(struct ceph_osd_request *req)
req               527 net/ceph/osd_client.c 	memset(req, 0, sizeof(*req));
req               529 net/ceph/osd_client.c 	kref_init(&req->r_kref);
req               530 net/ceph/osd_client.c 	init_completion(&req->r_completion);
req               531 net/ceph/osd_client.c 	RB_CLEAR_NODE(&req->r_node);
req               532 net/ceph/osd_client.c 	RB_CLEAR_NODE(&req->r_mc_node);
req               533 net/ceph/osd_client.c 	INIT_LIST_HEAD(&req->r_private_item);
req               535 net/ceph/osd_client.c 	target_init(&req->r_t);
req               545 net/ceph/osd_client.c static void request_reinit(struct ceph_osd_request *req)
req               547 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req               548 net/ceph/osd_client.c 	bool mempool = req->r_mempool;
req               549 net/ceph/osd_client.c 	unsigned int num_ops = req->r_num_ops;
req               550 net/ceph/osd_client.c 	u64 snapid = req->r_snapid;
req               551 net/ceph/osd_client.c 	struct ceph_snap_context *snapc = req->r_snapc;
req               552 net/ceph/osd_client.c 	bool linger = req->r_linger;
req               553 net/ceph/osd_client.c 	struct ceph_msg *request_msg = req->r_request;
req               554 net/ceph/osd_client.c 	struct ceph_msg *reply_msg = req->r_reply;
req               556 net/ceph/osd_client.c 	dout("%s req %p\n", __func__, req);
req               557 net/ceph/osd_client.c 	WARN_ON(kref_read(&req->r_kref) != 1);
req               558 net/ceph/osd_client.c 	request_release_checks(req);
req               562 net/ceph/osd_client.c 	target_destroy(&req->r_t);
req               564 net/ceph/osd_client.c 	request_init(req);
req               565 net/ceph/osd_client.c 	req->r_osdc = osdc;
req               566 net/ceph/osd_client.c 	req->r_mempool = mempool;
req               567 net/ceph/osd_client.c 	req->r_num_ops = num_ops;
req               568 net/ceph/osd_client.c 	req->r_snapid = snapid;
req               569 net/ceph/osd_client.c 	req->r_snapc = snapc;
req               570 net/ceph/osd_client.c 	req->r_linger = linger;
req               571 net/ceph/osd_client.c 	req->r_request = request_msg;
req               572 net/ceph/osd_client.c 	req->r_reply = reply_msg;
req               581 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req               585 net/ceph/osd_client.c 		req = mempool_alloc(osdc->req_mempool, gfp_flags);
req               587 net/ceph/osd_client.c 		req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
req               590 net/ceph/osd_client.c 		req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
req               592 net/ceph/osd_client.c 	if (unlikely(!req))
req               595 net/ceph/osd_client.c 	request_init(req);
req               596 net/ceph/osd_client.c 	req->r_osdc = osdc;
req               597 net/ceph/osd_client.c 	req->r_mempool = use_mempool;
req               598 net/ceph/osd_client.c 	req->r_num_ops = num_ops;
req               599 net/ceph/osd_client.c 	req->r_snapid = CEPH_NOSNAP;
req               600 net/ceph/osd_client.c 	req->r_snapc = ceph_get_snap_context(snapc);
req               602 net/ceph/osd_client.c 	dout("%s req %p\n", __func__, req);
req               603 net/ceph/osd_client.c 	return req;
req               612 net/ceph/osd_client.c static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
req               616 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req               620 net/ceph/osd_client.c 	WARN_ON(req->r_request || req->r_reply);
req               621 net/ceph/osd_client.c 	WARN_ON(ceph_oid_empty(&req->r_base_oid));
req               622 net/ceph/osd_client.c 	WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
req               633 net/ceph/osd_client.c 			ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
req               634 net/ceph/osd_client.c 	msg_size += 4 + req->r_base_oid.name_len; /* oid */
req               635 net/ceph/osd_client.c 	msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
req               638 net/ceph/osd_client.c 	msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
req               641 net/ceph/osd_client.c 	if (req->r_mempool)
req               651 net/ceph/osd_client.c 	req->r_request = msg;
req               655 net/ceph/osd_client.c 	msg_size += req->r_base_oid.name_len;
req               656 net/ceph/osd_client.c 	msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
req               658 net/ceph/osd_client.c 	if (req->r_mempool)
req               667 net/ceph/osd_client.c 	req->r_reply = msg;
req               683 net/ceph/osd_client.c static void get_num_data_items(struct ceph_osd_request *req,
req               692 net/ceph/osd_client.c 	for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
req               732 net/ceph/osd_client.c int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
req               736 net/ceph/osd_client.c 	get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
req               737 net/ceph/osd_client.c 	return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
req               924 net/ceph/osd_client.c static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
req               929 net/ceph/osd_client.c 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
req              1074 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              1084 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
req              1086 net/ceph/osd_client.c 	if (!req) {
req              1097 net/ceph/osd_client.c 		osd_req_op_init(req, which, opcode, 0);
req              1110 net/ceph/osd_client.c 		osd_req_op_extent_init(req, which, opcode, objoff, objlen,
req              1114 net/ceph/osd_client.c 	req->r_flags = flags;
req              1115 net/ceph/osd_client.c 	req->r_base_oloc.pool = layout->pool_id;
req              1116 net/ceph/osd_client.c 	req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
req              1117 net/ceph/osd_client.c 	ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
req              1119 net/ceph/osd_client.c 	req->r_snapid = vino.snap;
req              1121 net/ceph/osd_client.c 		req->r_data_offset = off;
req              1129 net/ceph/osd_client.c 		r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
req              1131 net/ceph/osd_client.c 		r = ceph_osdc_alloc_messages(req, GFP_NOFS);
req              1135 net/ceph/osd_client.c 	return req;
req              1138 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              1153 net/ceph/osd_client.c 			int (*fn)(struct ceph_osd_request *req, void *arg),
req              1162 net/ceph/osd_client.c 			struct ceph_osd_request *req =
req              1166 net/ceph/osd_client.c 			if (fn(req, arg))
req              1172 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              1176 net/ceph/osd_client.c 		if (fn(req, arg))
req              1317 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              1322 net/ceph/osd_client.c 		dout(" reassigning req %p tid %llu\n", req, req->r_tid);
req              1323 net/ceph/osd_client.c 		unlink_request(osd, req);
req              1324 net/ceph/osd_client.c 		link_request(&osdc->homeless_osd, req);
req              1368 net/ceph/osd_client.c 			struct ceph_osd_request *req =
req              1370 net/ceph/osd_client.c 			req->r_stamp = jiffies;
req              1416 net/ceph/osd_client.c static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
req              1419 net/ceph/osd_client.c 	WARN_ON(!req->r_tid || req->r_osd);
req              1421 net/ceph/osd_client.c 	     req, req->r_tid);
req              1429 net/ceph/osd_client.c 	insert_request(&osd->o_requests, req);
req              1430 net/ceph/osd_client.c 	req->r_osd = osd;
req              1433 net/ceph/osd_client.c static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
req              1436 net/ceph/osd_client.c 	WARN_ON(req->r_osd != osd);
req              1438 net/ceph/osd_client.c 	     req, req->r_tid);
req              1440 net/ceph/osd_client.c 	req->r_osd = NULL;
req              1441 net/ceph/osd_client.c 	erase_request(&osd->o_requests, req);
req              1914 net/ceph/osd_client.c static bool should_plug_request(struct ceph_osd_request *req)
req              1916 net/ceph/osd_client.c 	struct ceph_osd *osd = req->r_osd;
req              1921 net/ceph/osd_client.c 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
req              1925 net/ceph/osd_client.c 	hoid_fill_from_target(&hoid, &req->r_t);
req              1931 net/ceph/osd_client.c 	     __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
req              1939 net/ceph/osd_client.c static void setup_request_data(struct ceph_osd_request *req)
req              1941 net/ceph/osd_client.c 	struct ceph_msg *request_msg = req->r_request;
req              1942 net/ceph/osd_client.c 	struct ceph_msg *reply_msg = req->r_reply;
req              1945 net/ceph/osd_client.c 	if (req->r_request->num_data_items || req->r_reply->num_data_items)
req              1949 net/ceph/osd_client.c 	for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
req              2041 net/ceph/osd_client.c static void encode_request_partial(struct ceph_osd_request *req,
req              2049 net/ceph/osd_client.c 	if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
req              2051 net/ceph/osd_client.c 		WARN_ON(req->r_snapid != CEPH_NOSNAP);
req              2053 net/ceph/osd_client.c 		WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
req              2054 net/ceph/osd_client.c 			req->r_data_offset || req->r_snapc);
req              2057 net/ceph/osd_client.c 	setup_request_data(req);
req              2059 net/ceph/osd_client.c 	encode_spgid(&p, &req->r_t.spgid); /* actual spg */
req              2060 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
req              2061 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
req              2062 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_flags);
req              2074 net/ceph/osd_client.c 	ceph_encode_timespec64(p, &req->r_mtime);
req              2077 net/ceph/osd_client.c 	encode_oloc(&p, end, &req->r_t.target_oloc);
req              2078 net/ceph/osd_client.c 	ceph_encode_string(&p, end, req->r_t.target_oid.name,
req              2079 net/ceph/osd_client.c 			   req->r_t.target_oid.name_len);
req              2082 net/ceph/osd_client.c 	ceph_encode_16(&p, req->r_num_ops);
req              2083 net/ceph/osd_client.c 	for (i = 0; i < req->r_num_ops; i++) {
req              2084 net/ceph/osd_client.c 		data_len += osd_req_encode_op(p, &req->r_ops[i]);
req              2088 net/ceph/osd_client.c 	ceph_encode_64(&p, req->r_snapid); /* snapid */
req              2089 net/ceph/osd_client.c 	if (req->r_snapc) {
req              2090 net/ceph/osd_client.c 		ceph_encode_64(&p, req->r_snapc->seq);
req              2091 net/ceph/osd_client.c 		ceph_encode_32(&p, req->r_snapc->num_snaps);
req              2092 net/ceph/osd_client.c 		for (i = 0; i < req->r_snapc->num_snaps; i++)
req              2093 net/ceph/osd_client.c 			ceph_encode_64(&p, req->r_snapc->snaps[i]);
req              2099 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
req              2112 net/ceph/osd_client.c 	msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
req              2114 net/ceph/osd_client.c 	dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
req              2115 net/ceph/osd_client.c 	     req->r_t.target_oid.name, req->r_t.target_oid.name_len);
req              2213 net/ceph/osd_client.c static void send_request(struct ceph_osd_request *req)
req              2215 net/ceph/osd_client.c 	struct ceph_osd *osd = req->r_osd;
req              2218 net/ceph/osd_client.c 	WARN_ON(osd->o_osd != req->r_t.osd);
req              2221 net/ceph/osd_client.c 	if (should_plug_request(req))
req              2228 net/ceph/osd_client.c 	if (req->r_sent)
req              2229 net/ceph/osd_client.c 		ceph_msg_revoke(req->r_request);
req              2231 net/ceph/osd_client.c 	req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
req              2232 net/ceph/osd_client.c 	if (req->r_attempts)
req              2233 net/ceph/osd_client.c 		req->r_flags |= CEPH_OSD_FLAG_RETRY;
req              2235 net/ceph/osd_client.c 		WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
req              2237 net/ceph/osd_client.c 	encode_request_partial(req, req->r_request);
req              2240 net/ceph/osd_client.c 	     __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
req              2241 net/ceph/osd_client.c 	     req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
req              2242 net/ceph/osd_client.c 	     req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
req              2243 net/ceph/osd_client.c 	     req->r_attempts);
req              2245 net/ceph/osd_client.c 	req->r_t.paused = false;
req              2246 net/ceph/osd_client.c 	req->r_stamp = jiffies;
req              2247 net/ceph/osd_client.c 	req->r_attempts++;
req              2249 net/ceph/osd_client.c 	req->r_sent = osd->o_incarnation;
req              2250 net/ceph/osd_client.c 	req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
req              2251 net/ceph/osd_client.c 	ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
req              2275 net/ceph/osd_client.c static void complete_request(struct ceph_osd_request *req, int err);
req              2276 net/ceph/osd_client.c static void send_map_check(struct ceph_osd_request *req);
req              2278 net/ceph/osd_client.c static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
req              2280 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              2287 net/ceph/osd_client.c 	WARN_ON(req->r_tid);
req              2288 net/ceph/osd_client.c 	dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
req              2291 net/ceph/osd_client.c 	ct_res = calc_target(osdc, &req->r_t, false);
req              2295 net/ceph/osd_client.c 	osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
req              2302 net/ceph/osd_client.c 		dout("req %p abort_err %d\n", req, osdc->abort_err);
req              2305 net/ceph/osd_client.c 		dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
req              2307 net/ceph/osd_client.c 		req->r_t.paused = true;
req              2309 net/ceph/osd_client.c 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
req              2311 net/ceph/osd_client.c 		dout("req %p pausewr\n", req);
req              2312 net/ceph/osd_client.c 		req->r_t.paused = true;
req              2314 net/ceph/osd_client.c 	} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
req              2316 net/ceph/osd_client.c 		dout("req %p pauserd\n", req);
req              2317 net/ceph/osd_client.c 		req->r_t.paused = true;
req              2319 net/ceph/osd_client.c 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
req              2320 net/ceph/osd_client.c 		   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
req              2323 net/ceph/osd_client.c 		    pool_full(osdc, req->r_t.base_oloc.pool))) {
req              2324 net/ceph/osd_client.c 		dout("req %p full/pool_full\n", req);
req              2329 net/ceph/osd_client.c 			req->r_t.paused = true;
req              2344 net/ceph/osd_client.c 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
req              2345 net/ceph/osd_client.c 	link_request(osd, req);
req              2347 net/ceph/osd_client.c 		send_request(req);
req              2349 net/ceph/osd_client.c 		complete_request(req, err);
req              2353 net/ceph/osd_client.c 		send_map_check(req);
req              2367 net/ceph/osd_client.c static void account_request(struct ceph_osd_request *req)
req              2369 net/ceph/osd_client.c 	WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
req              2370 net/ceph/osd_client.c 	WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
req              2372 net/ceph/osd_client.c 	req->r_flags |= CEPH_OSD_FLAG_ONDISK;
req              2373 net/ceph/osd_client.c 	atomic_inc(&req->r_osdc->num_requests);
req              2375 net/ceph/osd_client.c 	req->r_start_stamp = jiffies;
req              2378 net/ceph/osd_client.c static void submit_request(struct ceph_osd_request *req, bool wrlocked)
req              2380 net/ceph/osd_client.c 	ceph_osdc_get_request(req);
req              2381 net/ceph/osd_client.c 	account_request(req);
req              2382 net/ceph/osd_client.c 	__submit_request(req, wrlocked);
req              2385 net/ceph/osd_client.c static void finish_request(struct ceph_osd_request *req)
req              2387 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              2389 net/ceph/osd_client.c 	WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
req              2390 net/ceph/osd_client.c 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
req              2392 net/ceph/osd_client.c 	if (req->r_osd)
req              2393 net/ceph/osd_client.c 		unlink_request(req->r_osd, req);
req              2402 net/ceph/osd_client.c 	ceph_msg_revoke(req->r_request);
req              2403 net/ceph/osd_client.c 	ceph_msg_revoke_incoming(req->r_reply);
req              2406 net/ceph/osd_client.c static void __complete_request(struct ceph_osd_request *req)
req              2408 net/ceph/osd_client.c 	dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
req              2409 net/ceph/osd_client.c 	     req->r_tid, req->r_callback, req->r_result);
req              2411 net/ceph/osd_client.c 	if (req->r_callback)
req              2412 net/ceph/osd_client.c 		req->r_callback(req);
req              2413 net/ceph/osd_client.c 	complete_all(&req->r_completion);
req              2414 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              2419 net/ceph/osd_client.c 	struct ceph_osd_request *req =
req              2422 net/ceph/osd_client.c 	__complete_request(req);
req              2428 net/ceph/osd_client.c static void complete_request(struct ceph_osd_request *req, int err)
req              2430 net/ceph/osd_client.c 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
req              2432 net/ceph/osd_client.c 	req->r_result = err;
req              2433 net/ceph/osd_client.c 	finish_request(req);
req              2435 net/ceph/osd_client.c 	INIT_WORK(&req->r_complete_work, complete_request_workfn);
req              2436 net/ceph/osd_client.c 	queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
req              2439 net/ceph/osd_client.c static void cancel_map_check(struct ceph_osd_request *req)
req              2441 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              2446 net/ceph/osd_client.c 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
req              2450 net/ceph/osd_client.c 	WARN_ON(lookup_req != req);
req              2451 net/ceph/osd_client.c 	erase_request_mc(&osdc->map_checks, req);
req              2452 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              2455 net/ceph/osd_client.c static void cancel_request(struct ceph_osd_request *req)
req              2457 net/ceph/osd_client.c 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
req              2459 net/ceph/osd_client.c 	cancel_map_check(req);
req              2460 net/ceph/osd_client.c 	finish_request(req);
req              2461 net/ceph/osd_client.c 	complete_all(&req->r_completion);
req              2462 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              2465 net/ceph/osd_client.c static void abort_request(struct ceph_osd_request *req, int err)
req              2467 net/ceph/osd_client.c 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
req              2469 net/ceph/osd_client.c 	cancel_map_check(req);
req              2470 net/ceph/osd_client.c 	complete_request(req, err);
req              2473 net/ceph/osd_client.c static int abort_fn(struct ceph_osd_request *req, void *arg)
req              2477 net/ceph/osd_client.c 	abort_request(req, err);
req              2535 net/ceph/osd_client.c static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
req              2537 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              2540 net/ceph/osd_client.c 	if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
req              2542 net/ceph/osd_client.c 	     pool_full(osdc, req->r_t.base_oloc.pool))) {
req              2547 net/ceph/osd_client.c 		abort_request(req, -ENOSPC);
req              2568 net/ceph/osd_client.c static void check_pool_dne(struct ceph_osd_request *req)
req              2570 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              2576 net/ceph/osd_client.c 	if (req->r_attempts) {
req              2582 net/ceph/osd_client.c 		req->r_map_dne_bound = map->epoch;
req              2583 net/ceph/osd_client.c 		dout("%s req %p tid %llu pool disappeared\n", __func__, req,
req              2584 net/ceph/osd_client.c 		     req->r_tid);
req              2587 net/ceph/osd_client.c 		     req, req->r_tid, req->r_map_dne_bound, map->epoch);
req              2590 net/ceph/osd_client.c 	if (req->r_map_dne_bound) {
req              2591 net/ceph/osd_client.c 		if (map->epoch >= req->r_map_dne_bound) {
req              2594 net/ceph/osd_client.c 					    req->r_tid);
req              2595 net/ceph/osd_client.c 			complete_request(req, -ENOENT);
req              2598 net/ceph/osd_client.c 		send_map_check(req);
req              2605 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              2611 net/ceph/osd_client.c 	req = lookup_request_mc(&osdc->map_checks, tid);
req              2612 net/ceph/osd_client.c 	if (!req) {
req              2618 net/ceph/osd_client.c 	     req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
req              2619 net/ceph/osd_client.c 	if (!req->r_map_dne_bound)
req              2620 net/ceph/osd_client.c 		req->r_map_dne_bound = greq->u.newest;
req              2621 net/ceph/osd_client.c 	erase_request_mc(&osdc->map_checks, req);
req              2622 net/ceph/osd_client.c 	check_pool_dne(req);
req              2624 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              2629 net/ceph/osd_client.c static void send_map_check(struct ceph_osd_request *req)
req              2631 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              2637 net/ceph/osd_client.c 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
req              2639 net/ceph/osd_client.c 		WARN_ON(lookup_req != req);
req              2643 net/ceph/osd_client.c 	ceph_osdc_get_request(req);
req              2644 net/ceph/osd_client.c 	insert_request_mc(&osdc->map_checks, req);
req              2646 net/ceph/osd_client.c 					  map_check_cb, req->r_tid);
req              2800 net/ceph/osd_client.c static void cancel_linger_request(struct ceph_osd_request *req)
req              2802 net/ceph/osd_client.c 	struct ceph_osd_linger_request *lreq = req->r_priv;
req              2804 net/ceph/osd_client.c 	WARN_ON(!req->r_linger);
req              2805 net/ceph/osd_client.c 	cancel_request(req);
req              2934 net/ceph/osd_client.c static void linger_commit_cb(struct ceph_osd_request *req)
req              2936 net/ceph/osd_client.c 	struct ceph_osd_linger_request *lreq = req->r_priv;
req              2940 net/ceph/osd_client.c 	     lreq->linger_id, req->r_result);
req              2941 net/ceph/osd_client.c 	linger_reg_commit_complete(lreq, req->r_result);
req              2946 net/ceph/osd_client.c 		    osd_req_op_data(req, 0, notify, response_data);
req              2949 net/ceph/osd_client.c 		WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
req              2953 net/ceph/osd_client.c 		if (req->r_ops[0].outdata_len >= sizeof(u64)) {
req              2979 net/ceph/osd_client.c static void linger_reconnect_cb(struct ceph_osd_request *req)
req              2981 net/ceph/osd_client.c 	struct ceph_osd_linger_request *lreq = req->r_priv;
req              2985 net/ceph/osd_client.c 	     lreq, lreq->linger_id, req->r_result, lreq->last_error);
req              2986 net/ceph/osd_client.c 	if (req->r_result < 0) {
req              2988 net/ceph/osd_client.c 			lreq->last_error = normalize_watch_error(req->r_result);
req              2999 net/ceph/osd_client.c 	struct ceph_osd_request *req = lreq->reg_req;
req              3000 net/ceph/osd_client.c 	struct ceph_osd_req_op *op = &req->r_ops[0];
req              3002 net/ceph/osd_client.c 	verify_osdc_wrlocked(req->r_osdc);
req              3005 net/ceph/osd_client.c 	if (req->r_osd)
req              3006 net/ceph/osd_client.c 		cancel_linger_request(req);
req              3008 net/ceph/osd_client.c 	request_reinit(req);
req              3009 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
req              3010 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
req              3011 net/ceph/osd_client.c 	req->r_flags = lreq->t.flags;
req              3012 net/ceph/osd_client.c 	req->r_mtime = lreq->mtime;
req              3022 net/ceph/osd_client.c 		req->r_callback = linger_reconnect_cb;
req              3029 net/ceph/osd_client.c 		req->r_callback = linger_commit_cb;
req              3033 net/ceph/osd_client.c 	req->r_priv = linger_get(lreq);
req              3034 net/ceph/osd_client.c 	req->r_linger = true;
req              3036 net/ceph/osd_client.c 	submit_request(req, true);
req              3039 net/ceph/osd_client.c static void linger_ping_cb(struct ceph_osd_request *req)
req              3041 net/ceph/osd_client.c 	struct ceph_osd_linger_request *lreq = req->r_priv;
req              3045 net/ceph/osd_client.c 	     __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
req              3047 net/ceph/osd_client.c 	if (lreq->register_gen == req->r_ops[0].watch.gen) {
req              3048 net/ceph/osd_client.c 		if (!req->r_result) {
req              3051 net/ceph/osd_client.c 			lreq->last_error = normalize_watch_error(req->r_result);
req              3056 net/ceph/osd_client.c 		     lreq->register_gen, req->r_ops[0].watch.gen);
req              3066 net/ceph/osd_client.c 	struct ceph_osd_request *req = lreq->ping_req;
req              3067 net/ceph/osd_client.c 	struct ceph_osd_req_op *op = &req->r_ops[0];
req              3079 net/ceph/osd_client.c 	if (req->r_osd)
req              3080 net/ceph/osd_client.c 		cancel_linger_request(req);
req              3082 net/ceph/osd_client.c 	request_reinit(req);
req              3083 net/ceph/osd_client.c 	target_copy(&req->r_t, &lreq->t);
req              3089 net/ceph/osd_client.c 	req->r_callback = linger_ping_cb;
req              3090 net/ceph/osd_client.c 	req->r_priv = linger_get(lreq);
req              3091 net/ceph/osd_client.c 	req->r_linger = true;
req              3093 net/ceph/osd_client.c 	ceph_osdc_get_request(req);
req              3094 net/ceph/osd_client.c 	account_request(req);
req              3095 net/ceph/osd_client.c 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
req              3096 net/ceph/osd_client.c 	link_request(lreq->osd, req);
req              3097 net/ceph/osd_client.c 	send_request(req);
req              3293 net/ceph/osd_client.c 			struct ceph_osd_request *req =
req              3298 net/ceph/osd_client.c 			if (time_before(req->r_stamp, cutoff)) {
req              3300 net/ceph/osd_client.c 				     req, req->r_tid, osd->o_osd);
req              3304 net/ceph/osd_client.c 			    time_before(req->r_start_stamp, expiry_cutoff)) {
req              3306 net/ceph/osd_client.c 				       req->r_tid, osd->o_osd);
req              3307 net/ceph/osd_client.c 				abort_request(req, -ETIMEDOUT);
req              3330 net/ceph/osd_client.c 			struct ceph_osd_request *req =
req              3335 net/ceph/osd_client.c 			if (time_before(req->r_start_stamp, expiry_cutoff)) {
req              3337 net/ceph/osd_client.c 				       req->r_tid, osdc->homeless_osd.o_osd);
req              3338 net/ceph/osd_client.c 				abort_request(req, -ETIMEDOUT);
req              3596 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              3613 net/ceph/osd_client.c 	req = lookup_request(&osd->o_requests, tid);
req              3614 net/ceph/osd_client.c 	if (!req) {
req              3619 net/ceph/osd_client.c 	m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
req              3624 net/ceph/osd_client.c 		       req->r_tid, ret);
req              3629 net/ceph/osd_client.c 	     __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
req              3634 net/ceph/osd_client.c 		if (m.retry_attempt != req->r_attempts - 1) {
req              3636 net/ceph/osd_client.c 			     req, req->r_tid, m.retry_attempt,
req              3637 net/ceph/osd_client.c 			     req->r_attempts - 1);
req              3645 net/ceph/osd_client.c 		dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
req              3647 net/ceph/osd_client.c 		unlink_request(osd, req);
req              3654 net/ceph/osd_client.c 		req->r_t.target_oloc.pool = m.redirect.oloc.pool;
req              3655 net/ceph/osd_client.c 		req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
req              3658 net/ceph/osd_client.c 		req->r_tid = 0;
req              3659 net/ceph/osd_client.c 		__submit_request(req, false);
req              3663 net/ceph/osd_client.c 	if (m.num_ops != req->r_num_ops) {
req              3665 net/ceph/osd_client.c 		       req->r_num_ops, req->r_tid);
req              3668 net/ceph/osd_client.c 	for (i = 0; i < req->r_num_ops; i++) {
req              3669 net/ceph/osd_client.c 		dout(" req %p tid %llu op %d rval %d len %u\n", req,
req              3670 net/ceph/osd_client.c 		     req->r_tid, i, m.rval[i], m.outdata_len[i]);
req              3671 net/ceph/osd_client.c 		req->r_ops[i].rval = m.rval[i];
req              3672 net/ceph/osd_client.c 		req->r_ops[i].outdata_len = m.outdata_len[i];
req              3677 net/ceph/osd_client.c 		       le32_to_cpu(msg->hdr.data_len), req->r_tid);
req              3681 net/ceph/osd_client.c 	     req, req->r_tid, m.result, data_len);
req              3688 net/ceph/osd_client.c 	req->r_result = m.result ?: data_len;
req              3689 net/ceph/osd_client.c 	finish_request(req);
req              3693 net/ceph/osd_client.c 	__complete_request(req);
req              3697 net/ceph/osd_client.c 	complete_request(req, -EIO);
req              3798 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              3804 net/ceph/osd_client.c 		dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
req              3805 net/ceph/osd_client.c 		ct_res = calc_target(osdc, &req->r_t, false);
req              3810 net/ceph/osd_client.c 			     pool_cleared_full(osdc, req->r_t.base_oloc.pool));
req              3812 net/ceph/osd_client.c 			    (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
req              3818 net/ceph/osd_client.c 			cancel_map_check(req);
req              3819 net/ceph/osd_client.c 			unlink_request(osd, req);
req              3820 net/ceph/osd_client.c 			insert_request(need_resend, req);
req              3823 net/ceph/osd_client.c 			check_pool_dne(req);
req              3908 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              3913 net/ceph/osd_client.c 		if (req->r_t.epoch < osdc->osdmap->epoch) {
req              3914 net/ceph/osd_client.c 			ct_res = calc_target(osdc, &req->r_t, false);
req              3916 net/ceph/osd_client.c 				erase_request(need_resend, req);
req              3917 net/ceph/osd_client.c 				check_pool_dne(req);
req              3923 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              3928 net/ceph/osd_client.c 		erase_request(need_resend, req); /* before link_request() */
req              3930 net/ceph/osd_client.c 		osd = lookup_create_osd(osdc, req->r_t.osd, true);
req              3931 net/ceph/osd_client.c 		link_request(osd, req);
req              3932 net/ceph/osd_client.c 		if (!req->r_linger) {
req              3933 net/ceph/osd_client.c 			if (!osd_homeless(osd) && !req->r_t.paused)
req              3934 net/ceph/osd_client.c 				send_request(req);
req              3936 net/ceph/osd_client.c 			cancel_linger_request(req);
req              4075 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              4080 net/ceph/osd_client.c 		if (!req->r_linger) {
req              4081 net/ceph/osd_client.c 			if (!req->r_t.paused)
req              4082 net/ceph/osd_client.c 				send_request(req);
req              4084 net/ceph/osd_client.c 			cancel_linger_request(req);
req              4314 net/ceph/osd_client.c 		struct ceph_osd_request *req =
req              4317 net/ceph/osd_client.c 		if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
req              4322 net/ceph/osd_client.c 			if (target_contained_by(&req->r_t, m->begin, m->end)) {
req              4327 net/ceph/osd_client.c 				send_request(req);
req              4477 net/ceph/osd_client.c 			    struct ceph_osd_request *req,
req              4481 net/ceph/osd_client.c 	submit_request(req, false);
req              4492 net/ceph/osd_client.c void ceph_osdc_cancel_request(struct ceph_osd_request *req)
req              4494 net/ceph/osd_client.c 	struct ceph_osd_client *osdc = req->r_osdc;
req              4497 net/ceph/osd_client.c 	if (req->r_osd)
req              4498 net/ceph/osd_client.c 		cancel_request(req);
req              4506 net/ceph/osd_client.c static int wait_request_timeout(struct ceph_osd_request *req,
req              4511 net/ceph/osd_client.c 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
req              4512 net/ceph/osd_client.c 	left = wait_for_completion_killable_timeout(&req->r_completion,
req              4516 net/ceph/osd_client.c 		ceph_osdc_cancel_request(req);
req              4518 net/ceph/osd_client.c 		left = req->r_result; /* completed */
req              4528 net/ceph/osd_client.c 			   struct ceph_osd_request *req)
req              4530 net/ceph/osd_client.c 	return wait_request_timeout(req, 0);
req              4549 net/ceph/osd_client.c 			struct ceph_osd_request *req =
req              4552 net/ceph/osd_client.c 			if (req->r_tid > last_tid)
req              4555 net/ceph/osd_client.c 			if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
req              4558 net/ceph/osd_client.c 			ceph_osdc_get_request(req);
req              4562 net/ceph/osd_client.c 			     __func__, req, req->r_tid, last_tid);
req              4563 net/ceph/osd_client.c 			wait_for_completion(&req->r_completion);
req              4564 net/ceph/osd_client.c 			ceph_osdc_put_request(req);
req              4579 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              4581 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
req              4582 net/ceph/osd_client.c 	if (!req)
req              4585 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
req              4586 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
req              4587 net/ceph/osd_client.c 	return req;
req              4593 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              4595 net/ceph/osd_client.c 	req = alloc_linger_request(lreq);
req              4596 net/ceph/osd_client.c 	if (!req)
req              4603 net/ceph/osd_client.c 	osd_req_op_watch_init(req, 0, 0, watch_opcode);
req              4605 net/ceph/osd_client.c 	if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
req              4606 net/ceph/osd_client.c 		ceph_osdc_put_request(req);
req              4610 net/ceph/osd_client.c 	return req;
req              4680 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              4683 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
req              4684 net/ceph/osd_client.c 	if (!req)
req              4687 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
req              4688 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
req              4689 net/ceph/osd_client.c 	req->r_flags = CEPH_OSD_FLAG_WRITE;
req              4690 net/ceph/osd_client.c 	ktime_get_real_ts64(&req->r_mtime);
req              4691 net/ceph/osd_client.c 	osd_req_op_watch_init(req, 0, lreq->linger_id,
req              4694 net/ceph/osd_client.c 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
req              4698 net/ceph/osd_client.c 	ceph_osdc_start_request(osdc, req, false);
req              4701 net/ceph/osd_client.c 	ret = wait_request_timeout(req, opts->mount_timeout);
req              4704 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              4709 net/ceph/osd_client.c static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
req              4717 net/ceph/osd_client.c 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
req              4749 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              4752 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
req              4753 net/ceph/osd_client.c 	if (!req)
req              4756 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, oid);
req              4757 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, oloc);
req              4758 net/ceph/osd_client.c 	req->r_flags = CEPH_OSD_FLAG_READ;
req              4760 net/ceph/osd_client.c 	ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
req              4765 net/ceph/osd_client.c 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
req              4769 net/ceph/osd_client.c 	ceph_osdc_start_request(osdc, req, false);
req              4770 net/ceph/osd_client.c 	ret = ceph_osdc_wait_request(osdc, req);
req              4773 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              4778 net/ceph/osd_client.c static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
req              4786 net/ceph/osd_client.c 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
req              4994 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              4998 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
req              4999 net/ceph/osd_client.c 	if (!req)
req              5002 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, oid);
req              5003 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, oloc);
req              5004 net/ceph/osd_client.c 	req->r_flags = CEPH_OSD_FLAG_READ;
req              5012 net/ceph/osd_client.c 	osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
req              5013 net/ceph/osd_client.c 	ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
req              5017 net/ceph/osd_client.c 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
req              5021 net/ceph/osd_client.c 	ceph_osdc_start_request(osdc, req, false);
req              5022 net/ceph/osd_client.c 	ret = ceph_osdc_wait_request(osdc, req);
req              5025 net/ceph/osd_client.c 		void *const end = p + req->r_ops[0].outdata_len;
req              5031 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              5069 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              5075 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
req              5076 net/ceph/osd_client.c 	if (!req)
req              5079 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, oid);
req              5080 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, oloc);
req              5081 net/ceph/osd_client.c 	req->r_flags = flags;
req              5083 net/ceph/osd_client.c 	ret = osd_req_op_cls_init(req, 0, class, method);
req              5088 net/ceph/osd_client.c 		osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
req              5091 net/ceph/osd_client.c 		osd_req_op_cls_response_data_pages(req, 0, resp_pages,
req              5094 net/ceph/osd_client.c 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
req              5098 net/ceph/osd_client.c 	ceph_osdc_start_request(osdc, req, false);
req              5099 net/ceph/osd_client.c 	ret = ceph_osdc_wait_request(osdc, req);
req              5101 net/ceph/osd_client.c 		ret = req->r_ops[0].rval;
req              5103 net/ceph/osd_client.c 			*resp_len = req->r_ops[0].outdata_len;
req              5107 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              5243 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              5248 net/ceph/osd_client.c 	req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
req              5252 net/ceph/osd_client.c 	if (IS_ERR(req))
req              5253 net/ceph/osd_client.c 		return PTR_ERR(req);
req              5256 net/ceph/osd_client.c 	osd_req_op_extent_osd_data_pages(req, 0,
req              5262 net/ceph/osd_client.c 	rc = ceph_osdc_start_request(osdc, req, false);
req              5264 net/ceph/osd_client.c 		rc = ceph_osdc_wait_request(osdc, req);
req              5266 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              5283 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              5287 net/ceph/osd_client.c 	req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
req              5291 net/ceph/osd_client.c 	if (IS_ERR(req))
req              5292 net/ceph/osd_client.c 		return PTR_ERR(req);
req              5295 net/ceph/osd_client.c 	osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
req              5299 net/ceph/osd_client.c 	req->r_mtime = *mtime;
req              5300 net/ceph/osd_client.c 	rc = ceph_osdc_start_request(osdc, req, true);
req              5302 net/ceph/osd_client.c 		rc = ceph_osdc_wait_request(osdc, req);
req              5304 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              5312 net/ceph/osd_client.c static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
req              5328 net/ceph/osd_client.c 	op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
req              5355 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              5358 net/ceph/osd_client.c 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
req              5359 net/ceph/osd_client.c 	if (!req)
req              5362 net/ceph/osd_client.c 	req->r_flags = CEPH_OSD_FLAG_WRITE;
req              5364 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
req              5365 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_t.base_oid, dst_oid);
req              5367 net/ceph/osd_client.c 	ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
req              5373 net/ceph/osd_client.c 	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
req              5377 net/ceph/osd_client.c 	ceph_osdc_start_request(osdc, req, false);
req              5378 net/ceph/osd_client.c 	ret = ceph_osdc_wait_request(osdc, req);
req              5381 net/ceph/osd_client.c 	ceph_osdc_put_request(req);
req              5448 net/ceph/osd_client.c 	struct ceph_osd_request *req;
req              5462 net/ceph/osd_client.c 	req = lookup_request(&osd->o_requests, tid);
req              5463 net/ceph/osd_client.c 	if (!req) {
req              5470 net/ceph/osd_client.c 	ceph_msg_revoke_incoming(req->r_reply);
req              5472 net/ceph/osd_client.c 	if (front_len > req->r_reply->front_alloc_len) {
req              5474 net/ceph/osd_client.c 			__func__, osd->o_osd, req->r_tid, front_len,
req              5475 net/ceph/osd_client.c 			req->r_reply->front_alloc_len);
req              5480 net/ceph/osd_client.c 		ceph_msg_put(req->r_reply);
req              5481 net/ceph/osd_client.c 		req->r_reply = m;
req              5484 net/ceph/osd_client.c 	if (data_len > req->r_reply->data_length) {
req              5486 net/ceph/osd_client.c 			__func__, osd->o_osd, req->r_tid, data_len,
req              5487 net/ceph/osd_client.c 			req->r_reply->data_length);
req              5493 net/ceph/osd_client.c 	m = ceph_msg_get(req->r_reply);
req              3957 net/core/devlink.c int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
req              3959 net/core/devlink.c 	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
req              3963 net/core/devlink.c int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
req              3965 net/core/devlink.c 	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
req              3969 net/core/devlink.c static int devlink_info_version_put(struct devlink_info_req *req, int attr,
req              3976 net/core/devlink.c 	nest = nla_nest_start_noflag(req->msg, attr);
req              3980 net/core/devlink.c 	err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
req              3985 net/core/devlink.c 	err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
req              3990 net/core/devlink.c 	nla_nest_end(req->msg, nest);
req              3995 net/core/devlink.c 	nla_nest_cancel(req->msg, nest);
req              3999 net/core/devlink.c int devlink_info_version_fixed_put(struct devlink_info_req *req,
req              4003 net/core/devlink.c 	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
req              4008 net/core/devlink.c int devlink_info_version_stored_put(struct devlink_info_req *req,
req              4012 net/core/devlink.c 	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
req              4017 net/core/devlink.c int devlink_info_version_running_put(struct devlink_info_req *req,
req              4021 net/core/devlink.c 	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
req              4031 net/core/devlink.c 	struct devlink_info_req req;
req              4043 net/core/devlink.c 	req.msg = msg;
req              4044 net/core/devlink.c 	err = devlink->ops->info_get(devlink, &req, extack);
req              8012 net/core/devlink.c 	struct devlink_info_req req;
req              8020 net/core/devlink.c 	req.msg = msg;
req              8021 net/core/devlink.c 	err = devlink->ops->info_get(devlink, &req, NULL);
req                91 net/core/request_sock.c void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
req                94 net/core/request_sock.c 	struct sock *lsk = req->rsk_listener;
req               102 net/core/request_sock.c 	tcp_rsk(req)->tfo_listener = false;
req               103 net/core/request_sock.c 	if (req->sk)	/* the child socket hasn't been accepted yet */
req               111 net/core/request_sock.c 		reqsk_put(req);
req               121 net/core/request_sock.c 	req->rsk_timer.expires = jiffies + 60*HZ;
req               123 net/core/request_sock.c 		fastopenq->rskq_rst_head = req;
req               125 net/core/request_sock.c 		fastopenq->rskq_rst_tail->dl_next = req;
req               127 net/core/request_sock.c 	req->dl_next = NULL;
req               128 net/core/request_sock.c 	fastopenq->rskq_rst_tail = req;
req               215 net/core/sock_diag.c 	struct sock_diag_req *req = nlmsg_data(nlh);
req               218 net/core/sock_diag.c 	if (nlmsg_len(nlh) < sizeof(*req))
req               221 net/core/sock_diag.c 	if (req->sdiag_family >= AF_MAX)
req               223 net/core/sock_diag.c 	req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
req               225 net/core/sock_diag.c 	if (sock_diag_handlers[req->sdiag_family] == NULL)
req               226 net/core/sock_diag.c 		sock_load_diag_module(req->sdiag_family, 0);
req               229 net/core/sock_diag.c 	hndl = sock_diag_handlers[req->sdiag_family];
req               271 net/dccp/dccp.h 				       const struct request_sock *req,
req               277 net/dccp/dccp.h 				       struct request_sock *req,
req               282 net/dccp/dccp.h 			    struct request_sock *req);
req               296 net/dccp/dccp.h 				   struct request_sock *req);
req                56 net/dccp/diag.c 			      const struct inet_diag_req_v2 *req)
req                58 net/dccp/diag.c 	return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
req               196 net/dccp/ipv4.c 	struct request_sock *req = inet_reqsk(sk);
req               203 net/dccp/ipv4.c 	if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
req               212 net/dccp/ipv4.c 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
req               214 net/dccp/ipv4.c 	reqsk_put(req);
req               395 net/dccp/ipv4.c 				       struct request_sock *req,
req               407 net/dccp/ipv4.c 	newsk = dccp_create_openreq_child(sk, req, skb);
req               412 net/dccp/ipv4.c 	ireq		   = inet_rsk(req);
req               421 net/dccp/ipv4.c 	if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
req               477 net/dccp/ipv4.c static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req)
req               484 net/dccp/ipv4.c 	dst = inet_csk_route_req(sk, &fl4, req);
req               488 net/dccp/ipv4.c 	skb = dccp_make_response(sk, dst, req);
req               490 net/dccp/ipv4.c 		const struct inet_request_sock *ireq = inet_rsk(req);
req               552 net/dccp/ipv4.c static void dccp_v4_reqsk_destructor(struct request_sock *req)
req               554 net/dccp/ipv4.c 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
req               555 net/dccp/ipv4.c 	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
req               558 net/dccp/ipv4.c void dccp_syn_ack_timeout(const struct request_sock *req)
req               576 net/dccp/ipv4.c 	struct request_sock *req;
req               601 net/dccp/ipv4.c 	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
req               602 net/dccp/ipv4.c 	if (req == NULL)
req               605 net/dccp/ipv4.c 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
req               608 net/dccp/ipv4.c 	dreq = dccp_rsk(req);
req               612 net/dccp/ipv4.c 	if (security_inet_conn_request(sk, skb, req))
req               615 net/dccp/ipv4.c 	ireq = inet_rsk(req);
req               616 net/dccp/ipv4.c 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
req               617 net/dccp/ipv4.c 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
req               635 net/dccp/ipv4.c 	if (dccp_v4_send_response(sk, req))
req               638 net/dccp/ipv4.c 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
req               639 net/dccp/ipv4.c 	reqsk_put(req);
req               643 net/dccp/ipv4.c 	reqsk_free(req);
req               832 net/dccp/ipv4.c 		struct request_sock *req = inet_reqsk(sk);
req               835 net/dccp/ipv4.c 		sk = req->rsk_listener;
req               837 net/dccp/ipv4.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
req               842 net/dccp/ipv4.c 		nsk = dccp_check_req(sk, skb, req);
req               844 net/dccp/ipv4.c 			reqsk_put(req);
req               848 net/dccp/ipv4.c 			reqsk_put(req);
req               188 net/dccp/ipv6.c static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
req               190 net/dccp/ipv6.c 	struct inet_request_sock *ireq = inet_rsk(req);
req               206 net/dccp/ipv6.c 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
req               220 net/dccp/ipv6.c 	skb = dccp_make_response(sk, dst, req);
req               244 net/dccp/ipv6.c static void dccp_v6_reqsk_destructor(struct request_sock *req)
req               246 net/dccp/ipv6.c 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
req               247 net/dccp/ipv6.c 	kfree(inet_rsk(req)->ipv6_opt);
req               248 net/dccp/ipv6.c 	kfree_skb(inet_rsk(req)->pktopts);
req               309 net/dccp/ipv6.c 	struct request_sock *req;
req               336 net/dccp/ipv6.c 	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
req               337 net/dccp/ipv6.c 	if (req == NULL)
req               340 net/dccp/ipv6.c 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
req               343 net/dccp/ipv6.c 	dreq = dccp_rsk(req);
req               347 net/dccp/ipv6.c 	if (security_inet_conn_request(sk, skb, req))
req               350 net/dccp/ipv6.c 	ireq = inet_rsk(req);
req               382 net/dccp/ipv6.c 	if (dccp_v6_send_response(sk, req))
req               385 net/dccp/ipv6.c 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
req               386 net/dccp/ipv6.c 	reqsk_put(req);
req               390 net/dccp/ipv6.c 	reqsk_free(req);
req               398 net/dccp/ipv6.c 					      struct request_sock *req,
req               403 net/dccp/ipv6.c 	struct inet_request_sock *ireq = inet_rsk(req);
req               415 net/dccp/ipv6.c 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
req               461 net/dccp/ipv6.c 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
req               466 net/dccp/ipv6.c 	newsk = dccp_create_openreq_child(sk, req, skb);
req               734 net/dccp/ipv6.c 		struct request_sock *req = inet_reqsk(sk);
req               737 net/dccp/ipv6.c 		sk = req->rsk_listener;
req               739 net/dccp/ipv6.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
req               744 net/dccp/ipv6.c 		nsk = dccp_check_req(sk, skb, req);
req               746 net/dccp/ipv6.c 			reqsk_put(req);
req               750 net/dccp/ipv6.c 			reqsk_put(req);
req                78 net/dccp/minisocks.c 				       const struct request_sock *req,
req                87 net/dccp/minisocks.c 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
req                90 net/dccp/minisocks.c 		struct dccp_request_sock *dreq = dccp_rsk(req);
req               141 net/dccp/minisocks.c 			    struct request_sock *req)
req               144 net/dccp/minisocks.c 	struct dccp_request_sock *dreq = dccp_rsk(req);
req               165 net/dccp/minisocks.c 			inet_rtx_syn_ack(sk, req);
req               192 net/dccp/minisocks.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
req               193 net/dccp/minisocks.c 							 req, &own_req);
req               195 net/dccp/minisocks.c 		child = inet_csk_complete_hashdance(sk, child, req, own_req);
req               202 net/dccp/minisocks.c 		req->rsk_ops->send_reset(sk, skb);
req               204 net/dccp/minisocks.c 	inet_csk_reqsk_queue_drop(sk, req);
req               253 net/dccp/minisocks.c int dccp_reqsk_init(struct request_sock *req,
req               256 net/dccp/minisocks.c 	struct dccp_request_sock *dreq = dccp_rsk(req);
req               259 net/dccp/minisocks.c 	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
req               260 net/dccp/minisocks.c 	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
req               261 net/dccp/minisocks.c 	inet_rsk(req)->acked	   = 0;
req               391 net/dccp/output.c 				   struct request_sock *req)
req               413 net/dccp/output.c 	dreq = dccp_rsk(req);
req               414 net/dccp/output.c 	if (inet_rsk(req)->acked)	/* increase GSS upon retransmission */
req               429 net/dccp/output.c 	dh->dccph_sport	= htons(inet_rsk(req)->ir_num);
req               430 net/dccp/output.c 	dh->dccph_dport	= inet_rsk(req)->ir_rmt_port;
req               442 net/dccp/output.c 	inet_rsk(req)->acked = 1;
req               565 net/decnet/dn_fib.c 	} req;
req               590 net/decnet/dn_fib.c 	memset(&req.rtm, 0, sizeof(req.rtm));
req               600 net/decnet/dn_fib.c 	req.nlh.nlmsg_len = sizeof(req);
req               601 net/decnet/dn_fib.c 	req.nlh.nlmsg_type = cmd;
req               602 net/decnet/dn_fib.c 	req.nlh.nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE|NLM_F_APPEND;
req               603 net/decnet/dn_fib.c 	req.nlh.nlmsg_pid = 0;
req               604 net/decnet/dn_fib.c 	req.nlh.nlmsg_seq = 0;
req               606 net/decnet/dn_fib.c 	req.rtm.rtm_dst_len = dst_len;
req               607 net/decnet/dn_fib.c 	req.rtm.rtm_table = tb->n;
req               608 net/decnet/dn_fib.c 	req.rtm.rtm_protocol = RTPROT_KERNEL;
req               609 net/decnet/dn_fib.c 	req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST);
req               610 net/decnet/dn_fib.c 	req.rtm.rtm_type = type;
req               613 net/decnet/dn_fib.c 		tb->insert(tb, &req.rtm, attrs, &req.nlh, NULL);
req               615 net/decnet/dn_fib.c 		tb->delete(tb, &req.rtm, attrs, &req.nlh, NULL);
req               383 net/decnet/dn_table.c 			struct nlmsghdr *nlh, struct netlink_skb_parms *req)
req               386 net/decnet/dn_table.c 	u32 portid = req ? req->portid : 0;
req               530 net/decnet/dn_table.c 			       struct nlmsghdr *n, struct netlink_skb_parms *req)
req               648 net/decnet/dn_table.c 			dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
req               657 net/decnet/dn_table.c 	dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
req               667 net/decnet/dn_table.c 			       struct nlmsghdr *n, struct netlink_skb_parms *req)
req               721 net/decnet/dn_table.c 		dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
req                28 net/ieee802154/ieee802154.h struct sk_buff *ieee802154_nl_create(int flags, u8 req);
req                31 net/ieee802154/ieee802154.h 					int flags, u8 req);
req                24 net/ieee802154/netlink.c struct sk_buff *ieee802154_nl_create(int flags, u8 req)
req                35 net/ieee802154/netlink.c 			  &nl802154_family, flags, req);
req                56 net/ieee802154/netlink.c 					int flags, u8 req)
req                65 net/ieee802154/netlink.c 				&nl802154_family, flags, req);
req                58 net/ipv4/ah4.c 	struct ahash_request *req;
req                60 net/ipv4/ah4.c 	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
req                63 net/ipv4/ah4.c 	ahash_request_set_tfm(req, ahash);
req                65 net/ipv4/ah4.c 	return req;
req                69 net/ipv4/ah4.c 					     struct ahash_request *req)
req                71 net/ipv4/ah4.c 	return (void *)ALIGN((unsigned long)(req + 1) +
req               155 net/ipv4/ah4.c 	struct ahash_request *req;
req               186 net/ipv4/ah4.c 	req = ah_tmp_req(ahash, icv);
req               187 net/ipv4/ah4.c 	sg = ah_req_sg(ahash, req);
req               234 net/ipv4/ah4.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
req               235 net/ipv4/ah4.c 	ahash_request_set_callback(req, 0, ah_output_done, skb);
req               239 net/ipv4/ah4.c 	err = crypto_ahash_digest(req);
req               313 net/ipv4/ah4.c 	struct ahash_request *req;
req               378 net/ipv4/ah4.c 	req = ah_tmp_req(ahash, icv);
req               379 net/ipv4/ah4.c 	sg = ah_req_sg(ahash, req);
req               409 net/ipv4/ah4.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
req               410 net/ipv4/ah4.c 	ahash_request_set_callback(req, 0, ah_input_done, skb);
req               414 net/ipv4/ah4.c 	err = crypto_ahash_digest(req);
req              1921 net/ipv4/cipso_ipv4.c int cipso_v4_req_setattr(struct request_sock *req,
req              1963 net/ipv4/cipso_ipv4.c 	req_inet = inet_rsk(req);
req              2075 net/ipv4/cipso_ipv4.c void cipso_v4_req_delattr(struct request_sock *req)
req              2077 net/ipv4/cipso_ipv4.c 	cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
req                80 net/ipv4/esp4.c 	struct aead_request *req;
req                82 net/ipv4/esp4.c 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
req                84 net/ipv4/esp4.c 	aead_request_set_tfm(req, aead);
req                85 net/ipv4/esp4.c 	return req;
req                89 net/ipv4/esp4.c 					     struct aead_request *req)
req                91 net/ipv4/esp4.c 	return (void *)ALIGN((unsigned long)(req + 1) +
req               102 net/ipv4/esp4.c 	struct aead_request *req;
req               110 net/ipv4/esp4.c 	req = esp_tmp_req(aead, iv);
req               115 net/ipv4/esp4.c 	if (req->src != req->dst)
req               116 net/ipv4/esp4.c 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
req               376 net/ipv4/esp4.c 	struct aead_request *req;
req               399 net/ipv4/esp4.c 	req = esp_tmp_req(aead, iv);
req               400 net/ipv4/esp4.c 	sg = esp_req_sg(aead, req);
req               447 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
req               449 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_output_done, skb);
req               451 net/ipv4/esp4.c 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
req               452 net/ipv4/esp4.c 	aead_request_set_ad(req, assoclen);
req               459 net/ipv4/esp4.c 	err = crypto_aead_encrypt(req);
req               699 net/ipv4/esp4.c 	struct aead_request *req;
req               754 net/ipv4/esp4.c 	req = esp_tmp_req(aead, iv);
req               755 net/ipv4/esp4.c 	sg = esp_req_sg(aead, req);
req               769 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
req               771 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_input_done, skb);
req               773 net/ipv4/esp4.c 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
req               774 net/ipv4/esp4.c 	aead_request_set_ad(req, assoclen);
req               776 net/ipv4/esp4.c 	err = crypto_aead_decrypt(req);
req               446 net/ipv4/inet_connection_sock.c 	struct request_sock *req;
req               472 net/ipv4/inet_connection_sock.c 	req = reqsk_queue_remove(queue, sk);
req               473 net/ipv4/inet_connection_sock.c 	newsk = req->sk;
req               476 net/ipv4/inet_connection_sock.c 	    tcp_rsk(req)->tfo_listener) {
req               478 net/ipv4/inet_connection_sock.c 		if (tcp_rsk(req)->tfo_listener) {
req               485 net/ipv4/inet_connection_sock.c 			req->sk = NULL;
req               486 net/ipv4/inet_connection_sock.c 			req = NULL;
req               512 net/ipv4/inet_connection_sock.c 	if (req)
req               513 net/ipv4/inet_connection_sock.c 		reqsk_put(req);
req               517 net/ipv4/inet_connection_sock.c 	req = NULL;
req               568 net/ipv4/inet_connection_sock.c 				     const struct request_sock *req)
req               570 net/ipv4/inet_connection_sock.c 	const struct inet_request_sock *ireq = inet_rsk(req);
req               584 net/ipv4/inet_connection_sock.c 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
req               604 net/ipv4/inet_connection_sock.c 					    const struct request_sock *req)
req               606 net/ipv4/inet_connection_sock.c 	const struct inet_request_sock *ireq = inet_rsk(req);
req               622 net/ipv4/inet_connection_sock.c 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
req               645 net/ipv4/inet_connection_sock.c static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
req               651 net/ipv4/inet_connection_sock.c 		*expire = req->num_timeout >= thresh;
req               655 net/ipv4/inet_connection_sock.c 	*expire = req->num_timeout >= thresh &&
req               656 net/ipv4/inet_connection_sock.c 		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
req               662 net/ipv4/inet_connection_sock.c 	*resend = !inet_rsk(req)->acked ||
req               663 net/ipv4/inet_connection_sock.c 		  req->num_timeout >= rskq_defer_accept - 1;
req               666 net/ipv4/inet_connection_sock.c int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
req               668 net/ipv4/inet_connection_sock.c 	int err = req->rsk_ops->rtx_syn_ack(parent, req);
req               671 net/ipv4/inet_connection_sock.c 		req->num_retrans++;
req               677 net/ipv4/inet_connection_sock.c static bool reqsk_queue_unlink(struct request_sock *req)
req               679 net/ipv4/inet_connection_sock.c 	struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
req               682 net/ipv4/inet_connection_sock.c 	if (sk_hashed(req_to_sk(req))) {
req               683 net/ipv4/inet_connection_sock.c 		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
req               686 net/ipv4/inet_connection_sock.c 		found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
req               689 net/ipv4/inet_connection_sock.c 	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
req               690 net/ipv4/inet_connection_sock.c 		reqsk_put(req);
req               694 net/ipv4/inet_connection_sock.c void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
req               696 net/ipv4/inet_connection_sock.c 	if (reqsk_queue_unlink(req)) {
req               697 net/ipv4/inet_connection_sock.c 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
req               698 net/ipv4/inet_connection_sock.c 		reqsk_put(req);
req               703 net/ipv4/inet_connection_sock.c void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
req               705 net/ipv4/inet_connection_sock.c 	inet_csk_reqsk_queue_drop(sk, req);
req               706 net/ipv4/inet_connection_sock.c 	reqsk_put(req);
req               712 net/ipv4/inet_connection_sock.c 	struct request_sock *req = from_timer(req, t, rsk_timer);
req               713 net/ipv4/inet_connection_sock.c 	struct sock *sk_listener = req->rsk_listener;
req               757 net/ipv4/inet_connection_sock.c 	syn_ack_recalc(req, thresh, max_retries, defer_accept,
req               759 net/ipv4/inet_connection_sock.c 	req->rsk_ops->syn_ack_timeout(req);
req               762 net/ipv4/inet_connection_sock.c 	     !inet_rtx_syn_ack(sk_listener, req) ||
req               763 net/ipv4/inet_connection_sock.c 	     inet_rsk(req)->acked)) {
req               766 net/ipv4/inet_connection_sock.c 		if (req->num_timeout++ == 0)
req               768 net/ipv4/inet_connection_sock.c 		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
req               769 net/ipv4/inet_connection_sock.c 		mod_timer(&req->rsk_timer, jiffies + timeo);
req               773 net/ipv4/inet_connection_sock.c 	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
req               776 net/ipv4/inet_connection_sock.c static void reqsk_queue_hash_req(struct request_sock *req,
req               779 net/ipv4/inet_connection_sock.c 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
req               780 net/ipv4/inet_connection_sock.c 	mod_timer(&req->rsk_timer, jiffies + timeout);
req               782 net/ipv4/inet_connection_sock.c 	inet_ehash_insert(req_to_sk(req), NULL);
req               787 net/ipv4/inet_connection_sock.c 	refcount_set(&req->rsk_refcnt, 2 + 1);
req               790 net/ipv4/inet_connection_sock.c void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
req               793 net/ipv4/inet_connection_sock.c 	reqsk_queue_hash_req(req, timeout);
req               807 net/ipv4/inet_connection_sock.c 				 const struct request_sock *req,
req               818 net/ipv4/inet_connection_sock.c 		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
req               819 net/ipv4/inet_connection_sock.c 		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
req               820 net/ipv4/inet_connection_sock.c 		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
req               827 net/ipv4/inet_connection_sock.c 		newsk->sk_mark = inet_rsk(req)->ir_mark;
req               829 net/ipv4/inet_connection_sock.c 			     atomic64_read(&inet_rsk(req)->ir_cookie));
req               838 net/ipv4/inet_connection_sock.c 		security_inet_csk_clone(newsk, req);
req               924 net/ipv4/inet_connection_sock.c static void inet_child_forget(struct sock *sk, struct request_sock *req,
req               933 net/ipv4/inet_connection_sock.c 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
req               934 net/ipv4/inet_connection_sock.c 		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
req               935 net/ipv4/inet_connection_sock.c 		BUG_ON(sk != req->rsk_listener);
req               949 net/ipv4/inet_connection_sock.c 				      struct request_sock *req,
req               956 net/ipv4/inet_connection_sock.c 		inet_child_forget(sk, req, child);
req               959 net/ipv4/inet_connection_sock.c 		req->sk = child;
req               960 net/ipv4/inet_connection_sock.c 		req->dl_next = NULL;
req               962 net/ipv4/inet_connection_sock.c 			WRITE_ONCE(queue->rskq_accept_head, req);
req               964 net/ipv4/inet_connection_sock.c 			queue->rskq_accept_tail->dl_next = req;
req               965 net/ipv4/inet_connection_sock.c 		queue->rskq_accept_tail = req;
req               974 net/ipv4/inet_connection_sock.c 					 struct request_sock *req, bool own_req)
req               977 net/ipv4/inet_connection_sock.c 		inet_csk_reqsk_queue_drop(sk, req);
req               978 net/ipv4/inet_connection_sock.c 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
req               979 net/ipv4/inet_connection_sock.c 		if (inet_csk_reqsk_queue_add(sk, req, child))
req               997 net/ipv4/inet_connection_sock.c 	struct request_sock *next, *req;
req              1007 net/ipv4/inet_connection_sock.c 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
req              1008 net/ipv4/inet_connection_sock.c 		struct sock *child = req->sk;
req              1015 net/ipv4/inet_connection_sock.c 		inet_child_forget(sk, req, child);
req              1016 net/ipv4/inet_connection_sock.c 		reqsk_put(req);
req              1026 net/ipv4/inet_connection_sock.c 		req = queue->fastopenq.rskq_rst_head;
req              1029 net/ipv4/inet_connection_sock.c 		while (req != NULL) {
req              1030 net/ipv4/inet_connection_sock.c 			next = req->dl_next;
req              1031 net/ipv4/inet_connection_sock.c 			reqsk_put(req);
req              1032 net/ipv4/inet_connection_sock.c 			req = next;
req                92 net/ipv4/inet_diag.c 				const struct inet_diag_req_v2 *req,
req                98 net/ipv4/inet_diag.c 	handler = inet_diag_table[req->sdiag_protocol];
req               174 net/ipv4/inet_diag.c 		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
req               182 net/ipv4/inet_diag.c 	int ext = req->idiag_ext;
req               188 net/ipv4/inet_diag.c 	handler = inet_diag_table[req->sdiag_protocol];
req               313 net/ipv4/inet_diag.c 			      const struct inet_diag_req_v2 *req,
req               319 net/ipv4/inet_diag.c 	return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
req               418 net/ipv4/inet_diag.c 				     const struct inet_diag_req_v2 *req)
req               423 net/ipv4/inet_diag.c 	if (req->sdiag_family == AF_INET)
req               424 net/ipv4/inet_diag.c 		sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
req               425 net/ipv4/inet_diag.c 				 req->id.idiag_dport, req->id.idiag_src[0],
req               426 net/ipv4/inet_diag.c 				 req->id.idiag_sport, req->id.idiag_if);
req               428 net/ipv4/inet_diag.c 	else if (req->sdiag_family == AF_INET6) {
req               429 net/ipv4/inet_diag.c 		if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
req               430 net/ipv4/inet_diag.c 		    ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
req               431 net/ipv4/inet_diag.c 			sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
req               432 net/ipv4/inet_diag.c 					 req->id.idiag_dport, req->id.idiag_src[3],
req               433 net/ipv4/inet_diag.c 					 req->id.idiag_sport, req->id.idiag_if);
req               436 net/ipv4/inet_diag.c 					  (struct in6_addr *)req->id.idiag_dst,
req               437 net/ipv4/inet_diag.c 					  req->id.idiag_dport,
req               438 net/ipv4/inet_diag.c 					  (struct in6_addr *)req->id.idiag_src,
req               439 net/ipv4/inet_diag.c 					  req->id.idiag_sport,
req               440 net/ipv4/inet_diag.c 					  req->id.idiag_if);
req               451 net/ipv4/inet_diag.c 	if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
req               463 net/ipv4/inet_diag.c 			    const struct inet_diag_req_v2 *req)
req               471 net/ipv4/inet_diag.c 	sk = inet_diag_find_one_icsk(net, hashinfo, req);
req               475 net/ipv4/inet_diag.c 	rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
req               481 net/ipv4/inet_diag.c 	err = sk_diag_fill(sk, rep, req,
req               505 net/ipv4/inet_diag.c 			       const struct inet_diag_req_v2 *req)
req               510 net/ipv4/inet_diag.c 	handler = inet_diag_lock_handler(req->sdiag_protocol);
req               514 net/ipv4/inet_diag.c 		err = handler->dump_one(in_skb, nlh, req);
req               516 net/ipv4/inet_diag.c 		err = handler->destroy(in_skb, req);
req              1088 net/ipv4/inet_diag.c 	struct inet_diag_req_v2 req;
req              1091 net/ipv4/inet_diag.c 	req.sdiag_family = AF_UNSPEC; /* compatibility */
req              1092 net/ipv4/inet_diag.c 	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
req              1093 net/ipv4/inet_diag.c 	req.idiag_ext = rc->idiag_ext;
req              1094 net/ipv4/inet_diag.c 	req.idiag_states = rc->idiag_states;
req              1095 net/ipv4/inet_diag.c 	req.id = rc->id;
req              1100 net/ipv4/inet_diag.c 	return __inet_diag_dump(skb, cb, &req, bc);
req              1107 net/ipv4/inet_diag.c 	struct inet_diag_req_v2 req;
req              1109 net/ipv4/inet_diag.c 	req.sdiag_family = rc->idiag_family;
req              1110 net/ipv4/inet_diag.c 	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
req              1111 net/ipv4/inet_diag.c 	req.idiag_ext = rc->idiag_ext;
req              1112 net/ipv4/inet_diag.c 	req.idiag_states = rc->idiag_states;
req              1113 net/ipv4/inet_diag.c 	req.id = rc->id;
req              1115 net/ipv4/inet_diag.c 	return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req);
req                36 net/ipv4/netfilter/nf_nat_pptp.c #define REQ_CID(req, off)		(*(__be16 *)((char *)(req) + (off)))
req                38 net/ipv4/raw_diag.c 			       const struct inet_diag_req_v2 *req)
req                40 net/ipv4/raw_diag.c 	struct inet_diag_req_raw *r = (void *)req;
req                65 net/ipv4/syncookies.c u64 cookie_init_timestamp(struct request_sock *req)
req                71 net/ipv4/syncookies.c 	ireq = inet_rsk(req);
req               202 net/ipv4/syncookies.c 				 struct request_sock *req,
req               209 net/ipv4/syncookies.c 	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
req               212 net/ipv4/syncookies.c 		refcount_set(&req->rsk_refcnt, 1);
req               215 net/ipv4/syncookies.c 		if (inet_csk_reqsk_queue_add(sk, req, child))
req               221 net/ipv4/syncookies.c 	__reqsk_free(req);
req               293 net/ipv4/syncookies.c 	struct request_sock *req;
req               329 net/ipv4/syncookies.c 	req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */
req               330 net/ipv4/syncookies.c 	if (!req)
req               333 net/ipv4/syncookies.c 	ireq = inet_rsk(req);
req               334 net/ipv4/syncookies.c 	treq = tcp_rsk(req);
req               339 net/ipv4/syncookies.c 	req->mss		= mss;
req               342 net/ipv4/syncookies.c 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
req               343 net/ipv4/syncookies.c 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
req               349 net/ipv4/syncookies.c 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
req               362 net/ipv4/syncookies.c 	if (security_inet_conn_request(sk, skb, req)) {
req               363 net/ipv4/syncookies.c 		reqsk_free(req);
req               367 net/ipv4/syncookies.c 	req->num_retrans = 0;
req               380 net/ipv4/syncookies.c 	security_req_classify_flow(req, flowi4_to_flowi(&fl4));
req               383 net/ipv4/syncookies.c 		reqsk_free(req);
req               388 net/ipv4/syncookies.c 	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
req               390 net/ipv4/syncookies.c 	tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
req               391 net/ipv4/syncookies.c 				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
req               398 net/ipv4/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
req              2499 net/ipv4/tcp.c 		struct request_sock *req;
req              2501 net/ipv4/tcp.c 		req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
req              2507 net/ipv4/tcp.c 		if (req)
req              2508 net/ipv4/tcp.c 			reqsk_fastopen_remove(sk, req, false);
req              3738 net/ipv4/tcp.c 		struct ahash_request *req;
req              3752 net/ipv4/tcp.c 		req = ahash_request_alloc(hash, GFP_KERNEL);
req              3753 net/ipv4/tcp.c 		if (!req)
req              3756 net/ipv4/tcp.c 		ahash_request_set_callback(req, 0, NULL, NULL);
req              3758 net/ipv4/tcp.c 		per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
req              3811 net/ipv4/tcp.c 	struct ahash_request *req = hp->md5_req;
req              3821 net/ipv4/tcp.c 	ahash_request_set_crypt(req, &sg, NULL, head_data_len);
req              3822 net/ipv4/tcp.c 	if (crypto_ahash_update(req))
req              3832 net/ipv4/tcp.c 		ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
req              3833 net/ipv4/tcp.c 		if (crypto_ahash_update(req))
req              3859 net/ipv4/tcp.c 	struct request_sock *req;
req              3865 net/ipv4/tcp.c 	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
req              3872 net/ipv4/tcp.c 	if (req)
req              3873 net/ipv4/tcp.c 		reqsk_fastopen_remove(sk, req, false);
req              3888 net/ipv4/tcp.c 			struct request_sock *req = inet_reqsk(sk);
req              3891 net/ipv4/tcp.c 			inet_csk_reqsk_queue_drop(req->rsk_listener, req);
req               188 net/ipv4/tcp_diag.c 			     const struct inet_diag_req_v2 *req)
req               190 net/ipv4/tcp_diag.c 	return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
req               195 net/ipv4/tcp_diag.c 			    const struct inet_diag_req_v2 *req)
req               198 net/ipv4/tcp_diag.c 	struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
req               111 net/ipv4/tcp_fastopen.c static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
req               118 net/ipv4/tcp_fastopen.c 	if (req->rsk_ops->family == AF_INET) {
req               129 net/ipv4/tcp_fastopen.c 	if (req->rsk_ops->family == AF_INET6) {
req               147 net/ipv4/tcp_fastopen.c 				    struct request_sock *req,
req               156 net/ipv4/tcp_fastopen.c 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
req               205 net/ipv4/tcp_fastopen.c 					 struct request_sock *req,
req               220 net/ipv4/tcp_fastopen.c 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
req               234 net/ipv4/tcp_fastopen.c 					      struct request_sock *req)
req               241 net/ipv4/tcp_fastopen.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
req               256 net/ipv4/tcp_fastopen.c 	rcu_assign_pointer(tp->fastopen_rsk, req);
req               257 net/ipv4/tcp_fastopen.c 	tcp_rsk(req)->tfo_listener = true;
req               272 net/ipv4/tcp_fastopen.c 	refcount_set(&req->rsk_refcnt, 2);
req               281 net/ipv4/tcp_fastopen.c 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
req               339 net/ipv4/tcp_fastopen.c 			      struct request_sock *req,
req               365 net/ipv4/tcp_fastopen.c 		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
req               367 net/ipv4/tcp_fastopen.c 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
req               383 net/ipv4/tcp_fastopen.c 			child = tcp_fastopen_create_child(sk, skb, req);
req              2968 net/ipv4/tcp_input.c void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
req              2973 net/ipv4/tcp_input.c 	if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
req              2974 net/ipv4/tcp_input.c 		rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
req              6098 net/ipv4/tcp_input.c 	struct request_sock *req;
req              6113 net/ipv4/tcp_input.c 	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
req              6115 net/ipv4/tcp_input.c 	reqsk_fastopen_remove(sk, req, false);
req              6140 net/ipv4/tcp_input.c 	struct request_sock *req;
req              6190 net/ipv4/tcp_input.c 	req = rcu_dereference_protected(tp->fastopen_rsk,
req              6192 net/ipv4/tcp_input.c 	if (req) {
req              6198 net/ipv4/tcp_input.c 		if (!tcp_check_req(sk, skb, req, true, &req_stolen))
req              6223 net/ipv4/tcp_input.c 			tcp_synack_rtt_meas(sk, req);
req              6225 net/ipv4/tcp_input.c 		if (req) {
req              6264 net/ipv4/tcp_input.c 		if (req)
req              6376 net/ipv4/tcp_input.c static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
req              6378 net/ipv4/tcp_input.c 	struct inet_request_sock *ireq = inet_rsk(req);
req              6407 net/ipv4/tcp_input.c static void tcp_ecn_create_request(struct request_sock *req,
req              6427 net/ipv4/tcp_input.c 	    tcp_bpf_ca_needs_ecn((struct sock *)req))
req              6428 net/ipv4/tcp_input.c 		inet_rsk(req)->ecn_ok = 1;
req              6431 net/ipv4/tcp_input.c static void tcp_openreq_init(struct request_sock *req,
req              6435 net/ipv4/tcp_input.c 	struct inet_request_sock *ireq = inet_rsk(req);
req              6437 net/ipv4/tcp_input.c 	req->rsk_rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
req              6438 net/ipv4/tcp_input.c 	req->cookie_ts = 0;
req              6439 net/ipv4/tcp_input.c 	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
req              6440 net/ipv4/tcp_input.c 	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
req              6441 net/ipv4/tcp_input.c 	tcp_rsk(req)->snt_synack = 0;
req              6442 net/ipv4/tcp_input.c 	tcp_rsk(req)->last_oow_ack_time = 0;
req              6443 net/ipv4/tcp_input.c 	req->mss = rx_opt->mss_clamp;
req              6444 net/ipv4/tcp_input.c 	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
req              6463 net/ipv4/tcp_input.c 	struct request_sock *req = reqsk_alloc(ops, sk_listener,
req              6466 net/ipv4/tcp_input.c 	if (req) {
req              6467 net/ipv4/tcp_input.c 		struct inet_request_sock *ireq = inet_rsk(req);
req              6479 net/ipv4/tcp_input.c 	return req;
req              6512 net/ipv4/tcp_input.c 				 struct request_sock *req,
req              6523 net/ipv4/tcp_input.c 			req->saved_syn = copy;
req              6568 net/ipv4/tcp_input.c 	struct request_sock *req;
req              6589 net/ipv4/tcp_input.c 	req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
req              6590 net/ipv4/tcp_input.c 	if (!req)
req              6593 net/ipv4/tcp_input.c 	tcp_rsk(req)->af_specific = af_ops;
req              6594 net/ipv4/tcp_input.c 	tcp_rsk(req)->ts_off = 0;
req              6609 net/ipv4/tcp_input.c 	tcp_openreq_init(req, &tmp_opt, skb, sk);
req              6610 net/ipv4/tcp_input.c 	inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
req              6613 net/ipv4/tcp_input.c 	inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
req              6615 net/ipv4/tcp_input.c 	af_ops->init_req(req, sk, skb);
req              6617 net/ipv4/tcp_input.c 	if (security_inet_conn_request(sk, skb, req))
req              6621 net/ipv4/tcp_input.c 		tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
req              6623 net/ipv4/tcp_input.c 	dst = af_ops->route_req(sk, &fl, req);
req              6632 net/ipv4/tcp_input.c 		    !tcp_peer_is_proven(req, dst)) {
req              6640 net/ipv4/tcp_input.c 			pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
req              6648 net/ipv4/tcp_input.c 	tcp_ecn_create_request(req, skb, sk, dst);
req              6651 net/ipv4/tcp_input.c 		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
req              6652 net/ipv4/tcp_input.c 		req->cookie_ts = tmp_opt.tstamp_ok;
req              6654 net/ipv4/tcp_input.c 			inet_rsk(req)->ecn_ok = 0;
req              6657 net/ipv4/tcp_input.c 	tcp_rsk(req)->snt_isn = isn;
req              6658 net/ipv4/tcp_input.c 	tcp_rsk(req)->txhash = net_tx_rndhash();
req              6659 net/ipv4/tcp_input.c 	tcp_openreq_init_rwin(req, sk, dst);
req              6660 net/ipv4/tcp_input.c 	sk_rx_queue_set(req_to_sk(req), skb);
req              6662 net/ipv4/tcp_input.c 		tcp_reqsk_record_syn(sk, req, skb);
req              6663 net/ipv4/tcp_input.c 		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
req              6666 net/ipv4/tcp_input.c 		af_ops->send_synack(fastopen_sk, dst, &fl, req,
req              6669 net/ipv4/tcp_input.c 		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
req              6670 net/ipv4/tcp_input.c 			reqsk_fastopen_remove(fastopen_sk, req, false);
req              6679 net/ipv4/tcp_input.c 		tcp_rsk(req)->tfo_listener = false;
req              6681 net/ipv4/tcp_input.c 			inet_csk_reqsk_queue_hash_add(sk, req,
req              6682 net/ipv4/tcp_input.c 				tcp_timeout_init((struct sock *)req));
req              6683 net/ipv4/tcp_input.c 		af_ops->send_synack(sk, dst, &fl, req, &foc,
req              6687 net/ipv4/tcp_input.c 			reqsk_free(req);
req              6691 net/ipv4/tcp_input.c 	reqsk_put(req);
req              6697 net/ipv4/tcp_input.c 	__reqsk_free(req);
req               386 net/ipv4/tcp_ipv4.c 	struct request_sock *req = inet_reqsk(sk);
req               392 net/ipv4/tcp_ipv4.c 	if (seq != tcp_rsk(req)->snt_isn) {
req               401 net/ipv4/tcp_ipv4.c 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
req               402 net/ipv4/tcp_ipv4.c 		tcp_listendrop(req->rsk_listener);
req               404 net/ipv4/tcp_ipv4.c 	reqsk_put(req);
req               908 net/ipv4/tcp_ipv4.c 				  struct request_sock *req)
req               913 net/ipv4/tcp_ipv4.c 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
req               922 net/ipv4/tcp_ipv4.c 			tcp_rsk(req)->rcv_nxt,
req               923 net/ipv4/tcp_ipv4.c 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
req               924 net/ipv4/tcp_ipv4.c 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req               925 net/ipv4/tcp_ipv4.c 			req->ts_recent,
req               929 net/ipv4/tcp_ipv4.c 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
req               940 net/ipv4/tcp_ipv4.c 			      struct request_sock *req,
req               944 net/ipv4/tcp_ipv4.c 	const struct inet_request_sock *ireq = inet_rsk(req);
req               950 net/ipv4/tcp_ipv4.c 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
req               953 net/ipv4/tcp_ipv4.c 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
req               972 net/ipv4/tcp_ipv4.c static void tcp_v4_reqsk_destructor(struct request_sock *req)
req               974 net/ipv4/tcp_ipv4.c 	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
req              1213 net/ipv4/tcp_ipv4.c 	struct ahash_request *req;
req              1218 net/ipv4/tcp_ipv4.c 	req = hp->md5_req;
req              1220 net/ipv4/tcp_ipv4.c 	if (crypto_ahash_init(req))
req              1226 net/ipv4/tcp_ipv4.c 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
req              1227 net/ipv4/tcp_ipv4.c 	if (crypto_ahash_final(req))
req              1245 net/ipv4/tcp_ipv4.c 	struct ahash_request *req;
req              1261 net/ipv4/tcp_ipv4.c 	req = hp->md5_req;
req              1263 net/ipv4/tcp_ipv4.c 	if (crypto_ahash_init(req))
req              1272 net/ipv4/tcp_ipv4.c 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
req              1273 net/ipv4/tcp_ipv4.c 	if (crypto_ahash_final(req))
req              1348 net/ipv4/tcp_ipv4.c static void tcp_v4_init_req(struct request_sock *req,
req              1352 net/ipv4/tcp_ipv4.c 	struct inet_request_sock *ireq = inet_rsk(req);
req              1355 net/ipv4/tcp_ipv4.c 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
req              1356 net/ipv4/tcp_ipv4.c 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
req              1362 net/ipv4/tcp_ipv4.c 					  const struct request_sock *req)
req              1364 net/ipv4/tcp_ipv4.c 	return inet_csk_route_req(sk, &fl->u.ip4, req);
req              1414 net/ipv4/tcp_ipv4.c 				  struct request_sock *req,
req              1431 net/ipv4/tcp_ipv4.c 	newsk = tcp_create_openreq_child(sk, req, skb);
req              1440 net/ipv4/tcp_ipv4.c 	ireq		      = inet_rsk(req);
req              1456 net/ipv4/tcp_ipv4.c 		dst = inet_csk_route_child_sock(sk, newsk, req);
req              1492 net/ipv4/tcp_ipv4.c 		tcp_move_syn(newtp, req);
req              1856 net/ipv4/tcp_ipv4.c 		struct request_sock *req = inet_reqsk(sk);
req              1860 net/ipv4/tcp_ipv4.c 		sk = req->rsk_listener;
req              1863 net/ipv4/tcp_ipv4.c 			reqsk_put(req);
req              1867 net/ipv4/tcp_ipv4.c 			reqsk_put(req);
req              1871 net/ipv4/tcp_ipv4.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
req              1884 net/ipv4/tcp_ipv4.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
req              1887 net/ipv4/tcp_ipv4.c 			reqsk_put(req);
req              1901 net/ipv4/tcp_ipv4.c 			reqsk_put(req);
req              2398 net/ipv4/tcp_ipv4.c static void get_openreq4(const struct request_sock *req,
req              2401 net/ipv4/tcp_ipv4.c 	const struct inet_request_sock *ireq = inet_rsk(req);
req              2402 net/ipv4/tcp_ipv4.c 	long delta = req->rsk_timer.expires - jiffies;
req              2415 net/ipv4/tcp_ipv4.c 		req->num_timeout,
req              2417 net/ipv4/tcp_ipv4.c 				 sock_i_uid(req->rsk_listener)),
req              2421 net/ipv4/tcp_ipv4.c 		req);
req               229 net/ipv4/tcp_metrics.c static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
req               237 net/ipv4/tcp_metrics.c 	saddr.family = req->rsk_ops->family;
req               238 net/ipv4/tcp_metrics.c 	daddr.family = req->rsk_ops->family;
req               241 net/ipv4/tcp_metrics.c 		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
req               242 net/ipv4/tcp_metrics.c 		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
req               243 net/ipv4/tcp_metrics.c 		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
req               247 net/ipv4/tcp_metrics.c 		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
req               248 net/ipv4/tcp_metrics.c 		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
req               249 net/ipv4/tcp_metrics.c 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
req               517 net/ipv4/tcp_metrics.c bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
req               526 net/ipv4/tcp_metrics.c 	tm = __tcp_get_metrics_req(req, dst);
req               360 net/ipv4/tcp_minisocks.c void tcp_openreq_init_rwin(struct request_sock *req,
req               364 net/ipv4/tcp_minisocks.c 	struct inet_request_sock *ireq = inet_rsk(req);
req               375 net/ipv4/tcp_minisocks.c 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
req               379 net/ipv4/tcp_minisocks.c 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
req               380 net/ipv4/tcp_minisocks.c 		req->rsk_window_clamp = full_space;
req               382 net/ipv4/tcp_minisocks.c 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
req               391 net/ipv4/tcp_minisocks.c 		&req->rsk_rcv_wnd,
req               392 net/ipv4/tcp_minisocks.c 		&req->rsk_window_clamp,
req               401 net/ipv4/tcp_minisocks.c 				  const struct request_sock *req)
req               403 net/ipv4/tcp_minisocks.c 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
req               436 net/ipv4/tcp_minisocks.c 				    struct request_sock *req,
req               443 net/ipv4/tcp_minisocks.c 		ireq = inet_rsk(req);
req               457 net/ipv4/tcp_minisocks.c 				      struct request_sock *req,
req               460 net/ipv4/tcp_minisocks.c 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
req               461 net/ipv4/tcp_minisocks.c 	const struct inet_request_sock *ireq = inet_rsk(req);
req               462 net/ipv4/tcp_minisocks.c 	struct tcp_request_sock *treq = tcp_rsk(req);
req               474 net/ipv4/tcp_minisocks.c 	smc_check_reset_syn_req(oldtp, req, newtp);
req               500 net/ipv4/tcp_minisocks.c 	newtp->total_retrans = req->num_retrans;
req               511 net/ipv4/tcp_minisocks.c 	newtp->window_clamp = req->rsk_window_clamp;
req               512 net/ipv4/tcp_minisocks.c 	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
req               513 net/ipv4/tcp_minisocks.c 	newtp->rcv_wnd = req->rsk_rcv_wnd;
req               526 net/ipv4/tcp_minisocks.c 		newtp->rx_opt.ts_recent = req->ts_recent;
req               533 net/ipv4/tcp_minisocks.c 	if (req->num_timeout) {
req               546 net/ipv4/tcp_minisocks.c 	newtp->rx_opt.mss_clamp = req->mss;
req               547 net/ipv4/tcp_minisocks.c 	tcp_ecn_openreq_child(newtp, req);
req               569 net/ipv4/tcp_minisocks.c 			   struct request_sock *req,
req               584 net/ipv4/tcp_minisocks.c 			tmp_opt.ts_recent = req->ts_recent;
req               586 net/ipv4/tcp_minisocks.c 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
req               591 net/ipv4/tcp_minisocks.c 			tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
req               597 net/ipv4/tcp_minisocks.c 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
req               625 net/ipv4/tcp_minisocks.c 					  &tcp_rsk(req)->last_oow_ack_time) &&
req               627 net/ipv4/tcp_minisocks.c 		    !inet_rtx_syn_ack(sk, req)) {
req               630 net/ipv4/tcp_minisocks.c 			expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
req               633 net/ipv4/tcp_minisocks.c 				mod_timer_pending(&req->rsk_timer, expires);
req               635 net/ipv4/tcp_minisocks.c 				req->rsk_timer.expires = expires;
req               699 net/ipv4/tcp_minisocks.c 	     tcp_rsk(req)->snt_isn + 1))
req               710 net/ipv4/tcp_minisocks.c 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
req               715 net/ipv4/tcp_minisocks.c 					  &tcp_rsk(req)->last_oow_ack_time))
req               716 net/ipv4/tcp_minisocks.c 			req->rsk_ops->send_ack(sk, skb, req);
req               724 net/ipv4/tcp_minisocks.c 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
req               725 net/ipv4/tcp_minisocks.c 		req->ts_recent = tmp_opt.rcv_tsval;
req               727 net/ipv4/tcp_minisocks.c 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
req               757 net/ipv4/tcp_minisocks.c 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
req               758 net/ipv4/tcp_minisocks.c 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
req               759 net/ipv4/tcp_minisocks.c 		inet_rsk(req)->acked = 1;
req               770 net/ipv4/tcp_minisocks.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
req               771 net/ipv4/tcp_minisocks.c 							 req, &own_req);
req               776 net/ipv4/tcp_minisocks.c 	tcp_synack_rtt_meas(child, req);
req               778 net/ipv4/tcp_minisocks.c 	return inet_csk_complete_hashdance(sk, child, req, own_req);
req               782 net/ipv4/tcp_minisocks.c 		inet_rsk(req)->acked = 1;
req               793 net/ipv4/tcp_minisocks.c 		req->rsk_ops->send_reset(sk, skb);
req               795 net/ipv4/tcp_minisocks.c 		reqsk_fastopen_remove(sk, req, true);
req               799 net/ipv4/tcp_minisocks.c 		inet_csk_reqsk_queue_drop(sk, req);
req               355 net/ipv4/tcp_output.c tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
req               357 net/ipv4/tcp_output.c 	if (inet_rsk(req)->ecn_ok)
req               661 net/ipv4/tcp_output.c 				       struct request_sock *req,
req               667 net/ipv4/tcp_output.c 	struct inet_request_sock *ireq = inet_rsk(req);
req               695 net/ipv4/tcp_output.c 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
req               696 net/ipv4/tcp_output.c 		opts->tsecr = req->ts_recent;
req              3267 net/ipv4/tcp_output.c 				struct request_sock *req,
req              3271 net/ipv4/tcp_output.c 	struct inet_request_sock *ireq = inet_rsk(req);
req              3291 net/ipv4/tcp_output.c 		skb_set_owner_w(skb, req_to_sk(req));
req              3313 net/ipv4/tcp_output.c 	if (unlikely(req->cookie_ts))
req              3314 net/ipv4/tcp_output.c 		skb->skb_mstamp_ns = cookie_init_timestamp(req);
req              3319 net/ipv4/tcp_output.c 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
req              3320 net/ipv4/tcp_output.c 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
req              3325 net/ipv4/tcp_output.c 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
req              3327 net/ipv4/tcp_output.c 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
req              3328 net/ipv4/tcp_output.c 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
req              3338 net/ipv4/tcp_output.c 	tcp_ecn_make_synack(req, th);
req              3343 net/ipv4/tcp_output.c 	th->seq = htonl(tcp_rsk(req)->snt_isn);
req              3345 net/ipv4/tcp_output.c 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
req              3348 net/ipv4/tcp_output.c 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
req              3356 net/ipv4/tcp_output.c 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
req              3357 net/ipv4/tcp_output.c 					       md5, req_to_sk(req), skb);
req              3847 net/ipv4/tcp_output.c int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
req              3849 net/ipv4/tcp_output.c 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
req              3853 net/ipv4/tcp_output.c 	tcp_rsk(req)->txhash = net_tx_rndhash();
req              3854 net/ipv4/tcp_output.c 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
req              3860 net/ipv4/tcp_output.c 		trace_tcp_retransmit_synack(sk, req);
req               389 net/ipv4/tcp_timer.c static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
req               396 net/ipv4/tcp_timer.c 	req->rsk_ops->syn_ack_timeout(req);
req               398 net/ipv4/tcp_timer.c 	if (req->num_timeout >= max_retries) {
req               410 net/ipv4/tcp_timer.c 	inet_rtx_syn_ack(sk, req);
req               411 net/ipv4/tcp_timer.c 	req->num_timeout++;
req               416 net/ipv4/tcp_timer.c 			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
req               436 net/ipv4/tcp_timer.c 	struct request_sock *req;
req               438 net/ipv4/tcp_timer.c 	req = rcu_dereference_protected(tp->fastopen_rsk,
req               440 net/ipv4/tcp_timer.c 	if (req) {
req               443 net/ipv4/tcp_timer.c 		tcp_fastopen_synack_timer(sk, req);
req               629 net/ipv4/tcp_timer.c void tcp_syn_ack_timeout(const struct request_sock *req)
req               631 net/ipv4/tcp_timer.c 	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
req                18 net/ipv4/udp_diag.c 			const struct inet_diag_req_v2 *req,
req                24 net/ipv4/udp_diag.c 	return inet_sk_diag_fill(sk, NULL, skb, req,
req                32 net/ipv4/udp_diag.c 			const struct inet_diag_req_v2 *req)
req                40 net/ipv4/udp_diag.c 	if (req->sdiag_family == AF_INET)
req                43 net/ipv4/udp_diag.c 				req->id.idiag_src[0], req->id.idiag_sport,
req                44 net/ipv4/udp_diag.c 				req->id.idiag_dst[0], req->id.idiag_dport,
req                45 net/ipv4/udp_diag.c 				req->id.idiag_if, 0, tbl, NULL);
req                47 net/ipv4/udp_diag.c 	else if (req->sdiag_family == AF_INET6)
req                49 net/ipv4/udp_diag.c 				(struct in6_addr *)req->id.idiag_src,
req                50 net/ipv4/udp_diag.c 				req->id.idiag_sport,
req                51 net/ipv4/udp_diag.c 				(struct in6_addr *)req->id.idiag_dst,
req                52 net/ipv4/udp_diag.c 				req->id.idiag_dport,
req                53 net/ipv4/udp_diag.c 				req->id.idiag_if, 0, tbl, NULL);
req                62 net/ipv4/udp_diag.c 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
req                74 net/ipv4/udp_diag.c 	err = inet_sk_diag_fill(sk, NULL, rep, req,
req               156 net/ipv4/udp_diag.c 			     const struct inet_diag_req_v2 *req)
req               158 net/ipv4/udp_diag.c 	return udp_dump_one(&udp_table, in_skb, nlh, req);
req               170 net/ipv4/udp_diag.c 			      const struct inet_diag_req_v2 *req,
req               179 net/ipv4/udp_diag.c 	if (req->sdiag_family == AF_INET)
req               181 net/ipv4/udp_diag.c 				req->id.idiag_dst[0], req->id.idiag_dport,
req               182 net/ipv4/udp_diag.c 				req->id.idiag_src[0], req->id.idiag_sport,
req               183 net/ipv4/udp_diag.c 				req->id.idiag_if, 0, tbl, NULL);
req               185 net/ipv4/udp_diag.c 	else if (req->sdiag_family == AF_INET6) {
req               186 net/ipv4/udp_diag.c 		if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
req               187 net/ipv4/udp_diag.c 		    ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
req               189 net/ipv4/udp_diag.c 					req->id.idiag_dst[3], req->id.idiag_dport,
req               190 net/ipv4/udp_diag.c 					req->id.idiag_src[3], req->id.idiag_sport,
req               191 net/ipv4/udp_diag.c 					req->id.idiag_if, 0, tbl, NULL);
req               195 net/ipv4/udp_diag.c 					(struct in6_addr *)req->id.idiag_dst,
req               196 net/ipv4/udp_diag.c 					req->id.idiag_dport,
req               197 net/ipv4/udp_diag.c 					(struct in6_addr *)req->id.idiag_src,
req               198 net/ipv4/udp_diag.c 					req->id.idiag_sport,
req               199 net/ipv4/udp_diag.c 					req->id.idiag_if, 0, tbl, NULL);
req               215 net/ipv4/udp_diag.c 	if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
req               228 net/ipv4/udp_diag.c 			    const struct inet_diag_req_v2 *req)
req               230 net/ipv4/udp_diag.c 	return __udp_diag_destroy(in_skb, req, &udp_table);
req               234 net/ipv4/udp_diag.c 				const struct inet_diag_req_v2 *req)
req               236 net/ipv4/udp_diag.c 	return __udp_diag_destroy(in_skb, req, &udplite_table);
req               260 net/ipv4/udp_diag.c 				 const struct inet_diag_req_v2 *req)
req               262 net/ipv4/udp_diag.c 	return udp_dump_one(&udplite_table, in_skb, nlh, req);
req                87 net/ipv6/ah6.c 	struct ahash_request *req;
req                89 net/ipv6/ah6.c 	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
req                92 net/ipv6/ah6.c 	ahash_request_set_tfm(req, ahash);
req                94 net/ipv6/ah6.c 	return req;
req                98 net/ipv6/ah6.c 					     struct ahash_request *req)
req               100 net/ipv6/ah6.c 	return (void *)ALIGN((unsigned long)(req + 1) +
req               332 net/ipv6/ah6.c 	struct ahash_request *req;
req               369 net/ipv6/ah6.c 	req = ah_tmp_req(ahash, icv);
req               370 net/ipv6/ah6.c 	sg = ah_req_sg(ahash, req);
req               425 net/ipv6/ah6.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
req               426 net/ipv6/ah6.c 	ahash_request_set_callback(req, 0, ah6_output_done, skb);
req               430 net/ipv6/ah6.c 	err = crypto_ahash_digest(req);
req               518 net/ipv6/ah6.c 	struct ahash_request *req;
req               583 net/ipv6/ah6.c 	req = ah_tmp_req(ahash, icv);
req               584 net/ipv6/ah6.c 	sg = ah_req_sg(ahash, req);
req               611 net/ipv6/ah6.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
req               612 net/ipv6/ah6.c 	ahash_request_set_callback(req, 0, ah6_input_done, skb);
req               616 net/ipv6/ah6.c 	err = crypto_ahash_digest(req);
req              1193 net/ipv6/calipso.c static int calipso_req_setattr(struct request_sock *req,
req              1198 net/ipv6/calipso.c 	struct inet_request_sock *req_inet = inet_rsk(req);
req              1200 net/ipv6/calipso.c 	struct sock *sk = sk_to_full_sk(req_to_sk(req));
req              1235 net/ipv6/calipso.c static void calipso_req_delattr(struct request_sock *req)
req              1237 net/ipv6/calipso.c 	struct inet_request_sock *req_inet = inet_rsk(req);
req              1240 net/ipv6/calipso.c 	struct sock *sk = sk_to_full_sk(req_to_sk(req));
req                89 net/ipv6/esp6.c 	struct aead_request *req;
req                91 net/ipv6/esp6.c 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
req                93 net/ipv6/esp6.c 	aead_request_set_tfm(req, aead);
req                94 net/ipv6/esp6.c 	return req;
req                98 net/ipv6/esp6.c 					     struct aead_request *req)
req               100 net/ipv6/esp6.c 	return (void *)ALIGN((unsigned long)(req + 1) +
req               110 net/ipv6/esp6.c 	struct aead_request *req;
req               117 net/ipv6/esp6.c 	req = esp_tmp_req(aead, iv);
req               122 net/ipv6/esp6.c 	if (req->src != req->dst)
req               123 net/ipv6/esp6.c 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
req               318 net/ipv6/esp6.c 	struct aead_request *req;
req               341 net/ipv6/esp6.c 	req = esp_tmp_req(aead, iv);
req               342 net/ipv6/esp6.c 	sg = esp_req_sg(aead, req);
req               388 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
req               390 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_output_done, skb);
req               392 net/ipv6/esp6.c 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
req               393 net/ipv6/esp6.c 	aead_request_set_ad(req, assoclen);
req               400 net/ipv6/esp6.c 	err = crypto_aead_encrypt(req);
req               594 net/ipv6/esp6.c 	struct aead_request *req;
req               653 net/ipv6/esp6.c 	req = esp_tmp_req(aead, iv);
req               654 net/ipv6/esp6.c 	sg = esp_req_sg(aead, req);
req               668 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
req               670 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_input_done, skb);
req               672 net/ipv6/esp6.c 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
req               673 net/ipv6/esp6.c 	aead_request_set_ad(req, assoclen);
req               675 net/ipv6/esp6.c 	ret = crypto_aead_decrypt(req);
req                29 net/ipv6/inet6_connection_sock.c 				      const struct request_sock *req,
req                32 net/ipv6/inet6_connection_sock.c 	struct inet_request_sock *ireq = inet_rsk(req);
req                49 net/ipv6/inet6_connection_sock.c 	security_req_classify_flow(req, flowi6_to_flowi(fl6));
req               138 net/ipv6/syncookies.c 	struct request_sock *req;
req               173 net/ipv6/syncookies.c 	req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);
req               174 net/ipv6/syncookies.c 	if (!req)
req               177 net/ipv6/syncookies.c 	ireq = inet_rsk(req);
req               178 net/ipv6/syncookies.c 	treq = tcp_rsk(req);
req               181 net/ipv6/syncookies.c 	if (security_inet_conn_request(sk, skb, req))
req               184 net/ipv6/syncookies.c 	req->mss = mss;
req               204 net/ipv6/syncookies.c 	req->num_retrans = 0;
req               209 net/ipv6/syncookies.c 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
req               236 net/ipv6/syncookies.c 		security_req_classify_flow(req, flowi6_to_flowi(&fl6));
req               243 net/ipv6/syncookies.c 	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
req               244 net/ipv6/syncookies.c 	tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
req               245 net/ipv6/syncookies.c 				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
req               252 net/ipv6/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
req               256 net/ipv6/syncookies.c 	reqsk_free(req);
req                73 net/ipv6/tcp_ipv6.c 				      struct request_sock *req);
req               486 net/ipv6/tcp_ipv6.c 			      struct request_sock *req,
req               490 net/ipv6/tcp_ipv6.c 	struct inet_request_sock *ireq = inet_rsk(req);
req               498 net/ipv6/tcp_ipv6.c 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
req               502 net/ipv6/tcp_ipv6.c 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
req               527 net/ipv6/tcp_ipv6.c static void tcp_v6_reqsk_destructor(struct request_sock *req)
req               529 net/ipv6/tcp_ipv6.c 	kfree(inet_rsk(req)->ipv6_opt);
req               530 net/ipv6/tcp_ipv6.c 	kfree_skb(inet_rsk(req)->pktopts);
req               624 net/ipv6/tcp_ipv6.c 	struct ahash_request *req;
req               629 net/ipv6/tcp_ipv6.c 	req = hp->md5_req;
req               631 net/ipv6/tcp_ipv6.c 	if (crypto_ahash_init(req))
req               637 net/ipv6/tcp_ipv6.c 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
req               638 net/ipv6/tcp_ipv6.c 	if (crypto_ahash_final(req))
req               658 net/ipv6/tcp_ipv6.c 	struct ahash_request *req;
req               673 net/ipv6/tcp_ipv6.c 	req = hp->md5_req;
req               675 net/ipv6/tcp_ipv6.c 	if (crypto_ahash_init(req))
req               684 net/ipv6/tcp_ipv6.c 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
req               685 net/ipv6/tcp_ipv6.c 	if (crypto_ahash_final(req))
req               745 net/ipv6/tcp_ipv6.c static void tcp_v6_init_req(struct request_sock *req,
req               750 net/ipv6/tcp_ipv6.c 	struct inet_request_sock *ireq = inet_rsk(req);
req               773 net/ipv6/tcp_ipv6.c 					  const struct request_sock *req)
req               775 net/ipv6/tcp_ipv6.c 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
req              1041 net/ipv6/tcp_ipv6.c 				  struct request_sock *req)
req              1052 net/ipv6/tcp_ipv6.c 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
req              1053 net/ipv6/tcp_ipv6.c 			tcp_rsk(req)->rcv_nxt,
req              1054 net/ipv6/tcp_ipv6.c 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
req              1055 net/ipv6/tcp_ipv6.c 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req              1056 net/ipv6/tcp_ipv6.c 			req->ts_recent, sk->sk_bound_dev_if,
req              1115 net/ipv6/tcp_ipv6.c 					 struct request_sock *req,
req              1137 net/ipv6/tcp_ipv6.c 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
req              1185 net/ipv6/tcp_ipv6.c 	ireq = inet_rsk(req);
req              1191 net/ipv6/tcp_ipv6.c 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
req              1196 net/ipv6/tcp_ipv6.c 	newsk = tcp_create_openreq_child(sk, req, skb);
req              1293 net/ipv6/tcp_ipv6.c 		tcp_move_syn(newtp, req);
req              1526 net/ipv6/tcp_ipv6.c 		struct request_sock *req = inet_reqsk(sk);
req              1530 net/ipv6/tcp_ipv6.c 		sk = req->rsk_listener;
req              1533 net/ipv6/tcp_ipv6.c 			reqsk_put(req);
req              1537 net/ipv6/tcp_ipv6.c 			reqsk_put(req);
req              1541 net/ipv6/tcp_ipv6.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
req              1551 net/ipv6/tcp_ipv6.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
req              1554 net/ipv6/tcp_ipv6.c 			reqsk_put(req);
req              1568 net/ipv6/tcp_ipv6.c 			reqsk_put(req);
req              1827 net/ipv6/tcp_ipv6.c 			 const struct request_sock *req, int i)
req              1829 net/ipv6/tcp_ipv6.c 	long ttd = req->rsk_timer.expires - jiffies;
req              1830 net/ipv6/tcp_ipv6.c 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
req              1831 net/ipv6/tcp_ipv6.c 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
req              1842 net/ipv6/tcp_ipv6.c 		   inet_rsk(req)->ir_num,
req              1845 net/ipv6/tcp_ipv6.c 		   ntohs(inet_rsk(req)->ir_rmt_port),
req              1850 net/ipv6/tcp_ipv6.c 		   req->num_timeout,
req              1852 net/ipv6/tcp_ipv6.c 				    sock_i_uid(req->rsk_listener)),
req              1855 net/ipv6/tcp_ipv6.c 		   0, req);
req               183 net/mac80211/agg-rx.c 				   const struct ieee80211_addba_ext_ie *req)
req               202 net/mac80211/agg-rx.c 	resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG;
req               204 net/mac80211/agg-rx.c 	frag_level = u32_get_bits(req->data,
req              2291 net/mac80211/cfg.c 			  struct cfg80211_scan_request *req)
req              2295 net/mac80211/cfg.c 	sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
req              2323 net/mac80211/cfg.c 		     !(req->flags & NL80211_SCAN_FLAG_AP)))
req              2331 net/mac80211/cfg.c 	return ieee80211_request_scan(sdata, req);
req              2342 net/mac80211/cfg.c 			   struct cfg80211_sched_scan_request *req)
req              2349 net/mac80211/cfg.c 	return ieee80211_request_sched_scan_start(sdata, req);
req              2365 net/mac80211/cfg.c 			  struct cfg80211_auth_request *req)
req              2367 net/mac80211/cfg.c 	return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req);
req              2371 net/mac80211/cfg.c 			   struct cfg80211_assoc_request *req)
req              2373 net/mac80211/cfg.c 	return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
req              2377 net/mac80211/cfg.c 			    struct cfg80211_deauth_request *req)
req              2379 net/mac80211/cfg.c 	return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req);
req              2383 net/mac80211/cfg.c 			      struct cfg80211_disassoc_request *req)
req              2385 net/mac80211/cfg.c 	return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
req               282 net/mac80211/driver-ops.h 			      struct ieee80211_scan_request *req)
req               292 net/mac80211/driver-ops.h 	ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
req               313 net/mac80211/driver-ops.h 		     struct cfg80211_sched_scan_request *req,
req               325 net/mac80211/driver-ops.h 					      req, ies);
req                74 net/mac80211/fils_aead.c 	struct skcipher_request *req;
req               126 net/mac80211/fils_aead.c 	req = skcipher_request_alloc(tfm2, GFP_KERNEL);
req               127 net/mac80211/fils_aead.c 	if (!req) {
req               134 net/mac80211/fils_aead.c 	skcipher_request_set_crypt(req, src, dst, plain_len, v);
req               135 net/mac80211/fils_aead.c 	res = crypto_skcipher_encrypt(req);
req               136 net/mac80211/fils_aead.c 	skcipher_request_free(req);
req               151 net/mac80211/fils_aead.c 	struct skcipher_request *req;
req               185 net/mac80211/fils_aead.c 	req = skcipher_request_alloc(tfm2, GFP_KERNEL);
req               186 net/mac80211/fils_aead.c 	if (!req) {
req               193 net/mac80211/fils_aead.c 	skcipher_request_set_crypt(req, src, dst, crypt_len, iv);
req               194 net/mac80211/fils_aead.c 	res = crypto_skcipher_decrypt(req);
req               195 net/mac80211/fils_aead.c 	skcipher_request_free(req);
req              1526 net/mac80211/ibss.c 					struct sk_buff *req)
req              1528 net/mac80211/ibss.c 	struct ieee80211_mgmt *mgmt = (void *)req->data;
req              1531 net/mac80211/ibss.c 	int tx_last_beacon, len = req->len;
req              1613 net/mac80211/ieee80211_i.h 		       struct cfg80211_auth_request *req);
req              1615 net/mac80211/ieee80211_i.h 			struct cfg80211_assoc_request *req);
req              1617 net/mac80211/ieee80211_i.h 			 struct cfg80211_deauth_request *req);
req              1619 net/mac80211/ieee80211_i.h 			   struct cfg80211_disassoc_request *req);
req              1678 net/mac80211/ieee80211_i.h 			   struct cfg80211_scan_request *req);
req              1696 net/mac80211/ieee80211_i.h 				     struct cfg80211_sched_scan_request *req);
req              1698 net/mac80211/ieee80211_i.h 				       struct cfg80211_sched_scan_request *req);
req              4541 net/mac80211/mlme.c 		struct cfg80211_deauth_request req = {
req              4547 net/mac80211/mlme.c 		ieee80211_mgd_deauth(sdata, &req);
req              5065 net/mac80211/mlme.c 		       struct cfg80211_auth_request *req)
req              5076 net/mac80211/mlme.c 	switch (req->auth_type) {
req              5110 net/mac80211/mlme.c 	auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req              5111 net/mac80211/mlme.c 			    req->ie_len, GFP_KERNEL);
req              5115 net/mac80211/mlme.c 	auth_data->bss = req->bss;
req              5117 net/mac80211/mlme.c 	if (req->auth_data_len >= 4) {
req              5118 net/mac80211/mlme.c 		if (req->auth_type == NL80211_AUTHTYPE_SAE) {
req              5119 net/mac80211/mlme.c 			__le16 *pos = (__le16 *) req->auth_data;
req              5124 net/mac80211/mlme.c 		memcpy(auth_data->data, req->auth_data + 4,
req              5125 net/mac80211/mlme.c 		       req->auth_data_len - 4);
req              5126 net/mac80211/mlme.c 		auth_data->data_len += req->auth_data_len - 4;
req              5134 net/mac80211/mlme.c 	cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss;
req              5136 net/mac80211/mlme.c 	if (req->ie && req->ie_len) {
req              5138 net/mac80211/mlme.c 		       req->ie, req->ie_len);
req              5139 net/mac80211/mlme.c 		auth_data->data_len += req->ie_len;
req              5142 net/mac80211/mlme.c 	if (req->key && req->key_len) {
req              5143 net/mac80211/mlme.c 		auth_data->key_len = req->key_len;
req              5144 net/mac80211/mlme.c 		auth_data->key_idx = req->key_idx;
req              5145 net/mac80211/mlme.c 		memcpy(auth_data->key, req->key, req->key_len);
req              5153 net/mac80211/mlme.c 		if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) {
req              5168 net/mac80211/mlme.c 	if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE &&
req              5170 net/mac80211/mlme.c 		ieee80211_mark_sta_auth(sdata, req->bss->bssid);
req              5177 net/mac80211/mlme.c 			   ifmgd->associated->bssid, req->bss->bssid);
req              5187 net/mac80211/mlme.c 	sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
req              5189 net/mac80211/mlme.c 	err = ieee80211_prep_connection(sdata, req->bss, cont_auth, false);
req              5195 net/mac80211/mlme.c 		sta_info_destroy_addr(sdata, req->bss->bssid);
req              5215 net/mac80211/mlme.c 			struct cfg80211_assoc_request *req)
req              5219 net/mac80211/mlme.c 	struct ieee80211_bss *bss = (void *)req->bss->priv;
req              5227 net/mac80211/mlme.c 	assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
req              5232 net/mac80211/mlme.c 	ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
req              5247 net/mac80211/mlme.c 			   ifmgd->associated->bssid, req->bss->bssid);
req              5271 net/mac80211/mlme.c 		match = ether_addr_equal(ifmgd->bssid, req->bss->bssid);
req              5289 net/mac80211/mlme.c 	for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
req              5290 net/mac80211/mlme.c 		if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
req              5291 net/mac80211/mlme.c 		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
req              5292 net/mac80211/mlme.c 		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
req              5302 net/mac80211/mlme.c 	sband = local->hw.wiphy->bands[req->bss->channel->band];
req              5324 net/mac80211/mlme.c 	memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
req              5325 net/mac80211/mlme.c 	memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
req              5328 net/mac80211/mlme.c 	memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
req              5329 net/mac80211/mlme.c 	memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
req              5332 net/mac80211/mlme.c 	if (req->ie && req->ie_len) {
req              5333 net/mac80211/mlme.c 		memcpy(assoc_data->ie, req->ie, req->ie_len);
req              5334 net/mac80211/mlme.c 		assoc_data->ie_len = req->ie_len;
req              5337 net/mac80211/mlme.c 	if (req->fils_kek) {
req              5339 net/mac80211/mlme.c 		if (WARN_ON(req->fils_kek_len > FILS_MAX_KEK_LEN)) {
req              5343 net/mac80211/mlme.c 		memcpy(assoc_data->fils_kek, req->fils_kek,
req              5344 net/mac80211/mlme.c 		       req->fils_kek_len);
req              5345 net/mac80211/mlme.c 		assoc_data->fils_kek_len = req->fils_kek_len;
req              5348 net/mac80211/mlme.c 	if (req->fils_nonces)
req              5349 net/mac80211/mlme.c 		memcpy(assoc_data->fils_nonces, req->fils_nonces,
req              5352 net/mac80211/mlme.c 	assoc_data->bss = req->bss;
req              5362 net/mac80211/mlme.c 	assoc_data->capability = req->bss->capability;
req              5367 net/mac80211/mlme.c 	ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
req              5373 net/mac80211/mlme.c 	vht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_VHT_CAPABILITY);
req              5395 net/mac80211/mlme.c 	if (req->prev_bssid)
req              5396 net/mac80211/mlme.c 		memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN);
req              5398 net/mac80211/mlme.c 	if (req->use_mfp) {
req              5406 net/mac80211/mlme.c 	if (req->flags & ASSOC_REQ_USE_RRM)
req              5411 net/mac80211/mlme.c 	if (req->crypto.control_port)
req              5416 net/mac80211/mlme.c 	sdata->control_port_protocol = req->crypto.control_port_ethertype;
req              5417 net/mac80211/mlme.c 	sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
req              5419 net/mac80211/mlme.c 					req->crypto.control_port_over_nl80211;
req              5420 net/mac80211/mlme.c 	sdata->encrypt_headroom = ieee80211_cs_headroom(local, &req->crypto,
req              5433 net/mac80211/mlme.c 		if (req->flags & ASSOC_REQ_DISABLE_HT)
req              5446 net/mac80211/mlme.c 		    req->flags & ASSOC_REQ_DISABLE_VHT)
req              5450 net/mac80211/mlme.c 	if (req->flags & ASSOC_REQ_DISABLE_HT) {
req              5455 net/mac80211/mlme.c 	if (req->flags & ASSOC_REQ_DISABLE_VHT)
req              5458 net/mac80211/mlme.c 	err = ieee80211_prep_connection(sdata, req->bss, true, override);
req              5463 net/mac80211/mlme.c 	beacon_ies = rcu_dereference(req->bss->beacon_ies);
req              5473 net/mac80211/mlme.c 		assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
req              5537 net/mac80211/mlme.c 			 struct cfg80211_deauth_request *req)
req              5541 net/mac80211/mlme.c 	bool tx = !req->local_state_change;
req              5544 net/mac80211/mlme.c 	    ether_addr_equal(ifmgd->auth_data->bss->bssid, req->bssid)) {
req              5547 net/mac80211/mlme.c 			   req->bssid, req->reason_code,
req              5548 net/mac80211/mlme.c 			   ieee80211_get_reason_code_string(req->reason_code));
req              5551 net/mac80211/mlme.c 		ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
req              5553 net/mac80211/mlme.c 					       req->reason_code, tx,
req              5558 net/mac80211/mlme.c 					    req->reason_code);
req              5564 net/mac80211/mlme.c 	    ether_addr_equal(ifmgd->assoc_data->bss->bssid, req->bssid)) {
req              5567 net/mac80211/mlme.c 			   req->bssid, req->reason_code,
req              5568 net/mac80211/mlme.c 			   ieee80211_get_reason_code_string(req->reason_code));
req              5571 net/mac80211/mlme.c 		ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
req              5573 net/mac80211/mlme.c 					       req->reason_code, tx,
req              5578 net/mac80211/mlme.c 					    req->reason_code);
req              5583 net/mac80211/mlme.c 	    ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
req              5586 net/mac80211/mlme.c 			   req->bssid, req->reason_code,
req              5587 net/mac80211/mlme.c 			   ieee80211_get_reason_code_string(req->reason_code));
req              5590 net/mac80211/mlme.c 				       req->reason_code, tx, frame_buf);
req              5593 net/mac80211/mlme.c 					    req->reason_code);
req              5601 net/mac80211/mlme.c 			   struct cfg80211_disassoc_request *req)
req              5613 net/mac80211/mlme.c 	if (ifmgd->associated != req->bss)
req              5618 net/mac80211/mlme.c 		   req->bss->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code));
req              5620 net/mac80211/mlme.c 	memcpy(bssid, req->bss->bssid, ETH_ALEN);
req              5622 net/mac80211/mlme.c 			       req->reason_code, !req->local_state_change,
req              5626 net/mac80211/mlme.c 				    req->reason_code);
req               312 net/mac80211/scan.c 	struct cfg80211_scan_request *req;
req               318 net/mac80211/scan.c 	req = rcu_dereference_protected(local->scan_req,
req               325 net/mac80211/scan.c 		for (i = 0; i < req->n_channels; i++) {
req               326 net/mac80211/scan.c 			local->hw_scan_req->req.channels[i] = req->channels[i];
req               327 net/mac80211/scan.c 			bands_used |= BIT(req->channels[i]->band);
req               330 net/mac80211/scan.c 		n_chans = req->n_channels;
req               338 net/mac80211/scan.c 			for (i = 0; i < req->n_channels; i++) {
req               339 net/mac80211/scan.c 				if (req->channels[i]->band !=
req               342 net/mac80211/scan.c 				local->hw_scan_req->req.channels[n_chans] =
req               343 net/mac80211/scan.c 							req->channels[i];
req               345 net/mac80211/scan.c 				bands_used |= BIT(req->channels[i]->band);
req               352 net/mac80211/scan.c 	local->hw_scan_req->req.n_channels = n_chans;
req               353 net/mac80211/scan.c 	ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
req               355 net/mac80211/scan.c 	if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
req               359 net/mac80211/scan.c 					 (u8 *)local->hw_scan_req->req.ie,
req               362 net/mac80211/scan.c 					 req->ie, req->ie_len,
req               363 net/mac80211/scan.c 					 bands_used, req->rates, &chandef,
req               365 net/mac80211/scan.c 	local->hw_scan_req->req.ie_len = ielen;
req               366 net/mac80211/scan.c 	local->hw_scan_req->req.no_cck = req->no_cck;
req               367 net/mac80211/scan.c 	ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
req               368 net/mac80211/scan.c 	ether_addr_copy(local->hw_scan_req->req.mac_addr_mask,
req               369 net/mac80211/scan.c 			req->mac_addr_mask);
req               370 net/mac80211/scan.c 	ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid);
req               648 net/mac80211/scan.c 				  struct cfg80211_scan_request *req)
req               664 net/mac80211/scan.c 		rcu_assign_pointer(local->scan_req, req);
req               673 net/mac80211/scan.c 		local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
req               679 net/mac80211/scan.c 			for (i = 0; i < req->n_channels; i++) {
req               680 net/mac80211/scan.c 				if (bands_counted & BIT(req->channels[i]->band))
req               682 net/mac80211/scan.c 				bands_counted |= BIT(req->channels[i]->band);
req               691 net/mac80211/scan.c 				req->n_channels * sizeof(req->channels[0]) +
req               696 net/mac80211/scan.c 		local->hw_scan_req->req.ssids = req->ssids;
req               697 net/mac80211/scan.c 		local->hw_scan_req->req.n_ssids = req->n_ssids;
req               700 net/mac80211/scan.c 			req->n_channels * sizeof(req->channels[0]);
req               701 net/mac80211/scan.c 		local->hw_scan_req->req.ie = ies;
req               702 net/mac80211/scan.c 		local->hw_scan_req->req.flags = req->flags;
req               703 net/mac80211/scan.c 		eth_broadcast_addr(local->hw_scan_req->req.bssid);
req               704 net/mac80211/scan.c 		local->hw_scan_req->req.duration = req->duration;
req               705 net/mac80211/scan.c 		local->hw_scan_req->req.duration_mandatory =
req               706 net/mac80211/scan.c 			req->duration_mandatory;
req               719 net/mac80211/scan.c 	rcu_assign_pointer(local->scan_req, req);
req               722 net/mac80211/scan.c 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
req               724 net/mac80211/scan.c 				     req->mac_addr,
req               725 net/mac80211/scan.c 				     req->mac_addr_mask);
req               731 net/mac80211/scan.c 	} else if ((req->n_channels == 1) &&
req               732 net/mac80211/scan.c 		   (req->channels[0] == local->_oper_chandef.chan)) {
req               752 net/mac80211/scan.c 		if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
req               754 net/mac80211/scan.c 		    !req->n_ssids) {
req              1087 net/mac80211/scan.c 			   struct cfg80211_scan_request *req)
req              1092 net/mac80211/scan.c 	res = __ieee80211_start_scan(sdata, req);
req              1233 net/mac80211/scan.c 					struct cfg80211_sched_scan_request *req)
req              1244 net/mac80211/scan.c 	iebufsz = local->scan_ies_len + req->ie_len;
req              1259 net/mac80211/scan.c 	if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
req              1268 net/mac80211/scan.c 	ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
req              1271 net/mac80211/scan.c 				 &sched_scan_ies, req->ie,
req              1272 net/mac80211/scan.c 				 req->ie_len, bands_used, rate_masks, &chandef,
req              1275 net/mac80211/scan.c 	ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
req              1278 net/mac80211/scan.c 		rcu_assign_pointer(local->sched_scan_req, req);
req              1294 net/mac80211/scan.c 				       struct cfg80211_sched_scan_request *req)
req              1306 net/mac80211/scan.c 	ret = __ieee80211_request_sched_scan_start(sdata, req);
req               617 net/mac802154/llsec.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
req               627 net/mac802154/llsec.c 	skcipher_request_set_sync_tfm(req, key->tfm0);
req               628 net/mac802154/llsec.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               629 net/mac802154/llsec.c 	skcipher_request_set_crypt(req, &src, &src, datalen, iv);
req               630 net/mac802154/llsec.c 	err = crypto_skcipher_encrypt(req);
req               631 net/mac802154/llsec.c 	skcipher_request_zero(req);
req               656 net/mac802154/llsec.c 	struct aead_request *req;
req               661 net/mac802154/llsec.c 	req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
req               662 net/mac802154/llsec.c 	if (!req)
req               679 net/mac802154/llsec.c 	aead_request_set_callback(req, 0, NULL, NULL);
req               680 net/mac802154/llsec.c 	aead_request_set_crypt(req, &sg, &sg, datalen, iv);
req               681 net/mac802154/llsec.c 	aead_request_set_ad(req, assoclen);
req               683 net/mac802154/llsec.c 	rc = crypto_aead_encrypt(req);
req               685 net/mac802154/llsec.c 	kzfree(req);
req               835 net/mac802154/llsec.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
req               844 net/mac802154/llsec.c 	skcipher_request_set_sync_tfm(req, key->tfm0);
req               845 net/mac802154/llsec.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               846 net/mac802154/llsec.c 	skcipher_request_set_crypt(req, &src, &src, datalen, iv);
req               848 net/mac802154/llsec.c 	err = crypto_skcipher_decrypt(req);
req               849 net/mac802154/llsec.c 	skcipher_request_zero(req);
req               862 net/mac802154/llsec.c 	struct aead_request *req;
req               867 net/mac802154/llsec.c 	req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
req               868 net/mac802154/llsec.c 	if (!req)
req               883 net/mac802154/llsec.c 	aead_request_set_callback(req, 0, NULL, NULL);
req               884 net/mac802154/llsec.c 	aead_request_set_crypt(req, &sg, &sg, datalen, iv);
req               885 net/mac802154/llsec.c 	aead_request_set_ad(req, assoclen);
req               887 net/mac802154/llsec.c 	rc = crypto_aead_decrypt(req);
req               889 net/mac802154/llsec.c 	kzfree(req);
req                76 net/netfilter/nf_conntrack_sane.c 	struct sane_request *req;
req               104 net/netfilter/nf_conntrack_sane.c 		req = sb_ptr;
req               105 net/netfilter/nf_conntrack_sane.c 		if (req->RPC_code != htonl(SANE_NET_START)) {
req               527 net/netfilter/nf_tables_api.c 	struct nft_module_request *req;
req               537 net/netfilter/nf_tables_api.c 	list_for_each_entry(req, &net->nft.module_list, list) {
req               538 net/netfilter/nf_tables_api.c 		if (!strcmp(req->module, module_name)) {
req               539 net/netfilter/nf_tables_api.c 			if (req->done)
req               547 net/netfilter/nf_tables_api.c 	req = kmalloc(sizeof(*req), GFP_KERNEL);
req               548 net/netfilter/nf_tables_api.c 	if (!req)
req               551 net/netfilter/nf_tables_api.c 	req->done = false;
req               552 net/netfilter/nf_tables_api.c 	strlcpy(req->module, module_name, MODULE_NAME_LEN);
req               553 net/netfilter/nf_tables_api.c 	list_add_tail(&req->list, &net->nft.module_list);
req              6741 net/netfilter/nf_tables_api.c 	struct nft_module_request *req, *next;
req              6744 net/netfilter/nf_tables_api.c 	list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
req              6745 net/netfilter/nf_tables_api.c 		WARN_ON_ONCE(!req->done);
req              6746 net/netfilter/nf_tables_api.c 		list_del(&req->list);
req              6747 net/netfilter/nf_tables_api.c 		kfree(req);
req              6972 net/netfilter/nf_tables_api.c 	struct nft_module_request *req, *next;
req              6977 net/netfilter/nf_tables_api.c 	list_for_each_entry_safe(req, next, &module_list, list) {
req              6978 net/netfilter/nf_tables_api.c 		request_module("%s", req->module);
req              6979 net/netfilter/nf_tables_api.c 		req->done = true;
req               584 net/netlabel/netlabel_calipso.c int calipso_req_setattr(struct request_sock *req,
req               592 net/netlabel/netlabel_calipso.c 		ret_val = ops->req_setattr(req, doi_def, secattr);
req               604 net/netlabel/netlabel_calipso.c void calipso_req_delattr(struct request_sock *req)
req               609 net/netlabel/netlabel_calipso.c 		ops->req_delattr(req);
req               122 net/netlabel/netlabel_calipso.h int calipso_req_setattr(struct request_sock *req,
req               125 net/netlabel/netlabel_calipso.h void calipso_req_delattr(struct request_sock *req);
req              1182 net/netlabel/netlabel_kapi.c int netlbl_req_setattr(struct request_sock *req,
req              1187 net/netlabel/netlabel_kapi.c 	struct inet_request_sock *ireq = inet_rsk(req);
req              1190 net/netlabel/netlabel_kapi.c 	switch (req->rsk_ops->family) {
req              1200 net/netlabel/netlabel_kapi.c 			ret_val = cipso_v4_req_setattr(req,
req              1204 net/netlabel/netlabel_kapi.c 			netlbl_req_delattr(req);
req              1221 net/netlabel/netlabel_kapi.c 			ret_val = calipso_req_setattr(req,
req              1225 net/netlabel/netlabel_kapi.c 			netlbl_req_delattr(req);
req              1250 net/netlabel/netlabel_kapi.c void netlbl_req_delattr(struct request_sock *req)
req              1252 net/netlabel/netlabel_kapi.c 	switch (req->rsk_ops->family) {
req              1254 net/netlabel/netlabel_kapi.c 		cipso_v4_req_delattr(req);
req              1258 net/netlabel/netlabel_kapi.c 		calipso_req_delattr(req);
req                45 net/netlink/diag.c 			struct netlink_diag_req *req,
req                69 net/netlink/diag.c 	if ((req->ndiag_show & NDIAG_SHOW_GROUPS) &&
req                73 net/netlink/diag.c 	if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) &&
req                77 net/netlink/diag.c 	if ((req->ndiag_show & NDIAG_SHOW_FLAGS) &&
req                95 net/netlink/diag.c 	struct netlink_diag_req *req;
req               101 net/netlink/diag.c 	req = nlmsg_data(cb->nlh);
req               136 net/netlink/diag.c 		if (sk_diag_fill(sk, skb, req,
req               166 net/netlink/diag.c 		if (sk_diag_fill(sk, skb, req,
req               186 net/netlink/diag.c 	struct netlink_diag_req *req;
req               190 net/netlink/diag.c 	req = nlmsg_data(cb->nlh);
req               192 net/netlink/diag.c 	if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
req               203 net/netlink/diag.c 		if (req->sdiag_protocol >= MAX_LINKS)
req               206 net/netlink/diag.c 		err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
req                16 net/nfc/digital.h #define PROTOCOL_ERR(req) pr_err("%d: NFC Digital Protocol error: %s\n", \
req                17 net/nfc/digital.h 				 __LINE__, req)
req                34 net/nfc/digital_core.c 	struct sk_buff *req;
req               174 net/nfc/digital_core.c 	if (cmd->req)
req               176 net/nfc/digital_core.c 				     cmd->req->data, cmd->req->len, false);
req               180 net/nfc/digital_core.c 		rc = ddev->ops->in_send_cmd(ddev, cmd->req, cmd->timeout,
req               185 net/nfc/digital_core.c 		rc = ddev->ops->tg_send_cmd(ddev, cmd->req, cmd->timeout,
req               220 net/nfc/digital_core.c 	kfree_skb(cmd->req);
req               240 net/nfc/digital_core.c 	cmd->req = skb;
req               885 net/nfc/digital_technology.c 	struct digital_iso15693_inv_req *req;
req               899 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, sizeof(*req));
req               903 net/nfc/digital_technology.c 	skb_put(skb, sizeof(*req) - sizeof(req->mask)); /* No mask */
req               904 net/nfc/digital_technology.c 	req = (struct digital_iso15693_inv_req *)skb->data;
req               909 net/nfc/digital_technology.c 	req->flags = DIGITAL_ISO15693_REQ_FLAG_DATA_RATE |
req               912 net/nfc/digital_technology.c 	req->cmd = DIGITAL_CMD_ISO15693_INVENTORY_REQ;
req               913 net/nfc/digital_technology.c 	req->mask_len = 0;
req                98 net/nfc/nci/core.c 			 void (*req)(struct nci_dev *ndev, unsigned long opt),
req               107 net/nfc/nci/core.c 	req(ndev, opt);
req               141 net/nfc/nci/core.c 		       void (*req)(struct nci_dev *ndev,
req               152 net/nfc/nci/core.c 	rc = __nci_request(ndev, req, opt, timeout);
req               176 net/nfc/nci/spi.c 	unsigned char req[2], resp_hdr[2];
req               184 net/nfc/nci/spi.c 	req[0] = NCI_SPI_DIRECT_READ;
req               185 net/nfc/nci/spi.c 	req[1] = nspi->acknowledge_mode;
req               186 net/nfc/nci/spi.c 	tx.tx_buf = req;
req              3695 net/packet/af_packet.c 			len = sizeof(req_u.req);
req              3705 net/packet/af_packet.c 			if (copy_from_user(&req_u.req, optval, len))
req              4260 net/packet/af_packet.c static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
req              4262 net/packet/af_packet.c 	unsigned int block_nr = req->tp_block_nr;
req              4297 net/packet/af_packet.c 	struct tpacket_req *req = &req_u->req;
req              4310 net/packet/af_packet.c 	if (req->tp_block_nr) {
req              4331 net/packet/af_packet.c 		if (unlikely((int)req->tp_block_size <= 0))
req              4333 net/packet/af_packet.c 		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
req              4337 net/packet/af_packet.c 		    req->tp_block_size <
req              4340 net/packet/af_packet.c 		if (unlikely(req->tp_frame_size < min_frame_size))
req              4342 net/packet/af_packet.c 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
req              4345 net/packet/af_packet.c 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
req              4348 net/packet/af_packet.c 		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
req              4350 net/packet/af_packet.c 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req              4351 net/packet/af_packet.c 					req->tp_frame_nr))
req              4355 net/packet/af_packet.c 		order = get_order(req->tp_block_size);
req              4356 net/packet/af_packet.c 		pg_vec = alloc_pg_vec(req, order);
req              4377 net/packet/af_packet.c 				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
req              4388 net/packet/af_packet.c 		if (unlikely(req->tp_frame_nr))
req              4413 net/packet/af_packet.c 		rb->frame_max = (req->tp_frame_nr - 1);
req              4415 net/packet/af_packet.c 		rb->frame_size = req->tp_frame_size;
req              4419 net/packet/af_packet.c 		swap(rb->pg_vec_len, req->tp_block_nr);
req              4421 net/packet/af_packet.c 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
req              4446 net/packet/af_packet.c 		free_pg_vec(pg_vec, order, req->tp_block_nr);
req               130 net/packet/diag.c 			struct packet_diag_req *req,
req               150 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_INFO) &&
req               154 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_INFO) &&
req               159 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
req               163 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
req               167 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
req               171 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
req               175 net/packet/diag.c 	if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
req               191 net/packet/diag.c 	struct packet_diag_req *req;
req               197 net/packet/diag.c 	req = nlmsg_data(cb->nlh);
req               207 net/packet/diag.c 		if (sk_diag_fill(sk, skb, req,
req               228 net/packet/diag.c 	struct packet_diag_req *req;
req               233 net/packet/diag.c 	req = nlmsg_data(h);
req               235 net/packet/diag.c 	if (req->sdiag_protocol)
req               235 net/phonet/pn_dev.c 	struct if_phonet_req req;
req               241 net/phonet/pn_dev.c 	ret = dev->netdev_ops->ndo_do_ioctl(dev, (struct ifreq *)&req,
req               247 net/phonet/pn_dev.c 	ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device);
req               251 net/phonet/pn_dev.c 				req.ifr_phonet_autoconf.device);
req               103 net/rxrpc/rxkad.c 	struct skcipher_request *req;
req               119 net/rxrpc/rxkad.c 	req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS);
req               120 net/rxrpc/rxkad.c 	if (!req) {
req               134 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, conn->cipher);
req               135 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               136 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x);
req               137 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               138 net/rxrpc/rxkad.c 	skcipher_request_free(req);
req               182 net/rxrpc/rxkad.c 				    struct skcipher_request *req)
req               202 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
req               203 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               204 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
req               205 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               206 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req               219 net/rxrpc/rxkad.c 				       struct skcipher_request *req)
req               245 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
req               246 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               247 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
req               248 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               262 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, sg, sg, len, iv.x);
req               263 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               269 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req               282 net/rxrpc/rxkad.c 	struct skcipher_request	*req;
req               301 net/rxrpc/rxkad.c 	req = rxkad_get_call_crypto(call);
req               302 net/rxrpc/rxkad.c 	if (!req)
req               315 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
req               316 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               317 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
req               318 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               319 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req               333 net/rxrpc/rxkad.c 					       req);
req               337 net/rxrpc/rxkad.c 						  sechdr, req);
req               354 net/rxrpc/rxkad.c 				 struct skcipher_request *req)
req               383 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
req               384 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               385 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
req               386 net/rxrpc/rxkad.c 	crypto_skcipher_decrypt(req);
req               387 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req               431 net/rxrpc/rxkad.c 				 struct skcipher_request *req)
req               475 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
req               476 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               477 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, sg, sg, len, iv.x);
req               478 net/rxrpc/rxkad.c 	crypto_skcipher_decrypt(req);
req               479 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req               531 net/rxrpc/rxkad.c 	struct skcipher_request	*req;
req               544 net/rxrpc/rxkad.c 	req = rxkad_get_call_crypto(call);
req               545 net/rxrpc/rxkad.c 	if (!req)
req               558 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
req               559 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               560 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
req               561 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               562 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req               579 net/rxrpc/rxkad.c 		return rxkad_verify_packet_1(call, skb, offset, len, seq, req);
req               581 net/rxrpc/rxkad.c 		return rxkad_verify_packet_2(call, skb, offset, len, seq, req);
req               784 net/rxrpc/rxkad.c 	struct skcipher_request *req;
req               788 net/rxrpc/rxkad.c 	req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS);
req               789 net/rxrpc/rxkad.c 	if (!req)
req               797 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, conn->cipher);
req               798 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               799 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
req               800 net/rxrpc/rxkad.c 	crypto_skcipher_encrypt(req);
req               801 net/rxrpc/rxkad.c 	skcipher_request_free(req);
req               901 net/rxrpc/rxkad.c 	struct skcipher_request *req;
req               936 net/rxrpc/rxkad.c 	req = skcipher_request_alloc(conn->server_key->payload.data[0],
req               938 net/rxrpc/rxkad.c 	if (!req)
req               942 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               943 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, sg, sg, ticket_len, iv.x);
req               944 net/rxrpc/rxkad.c 	crypto_skcipher_decrypt(req);
req               945 net/rxrpc/rxkad.c 	skcipher_request_free(req);
req              1056 net/rxrpc/rxkad.c 	struct skcipher_request *req = rxkad_ci_req;
req              1072 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, rxkad_ci);
req              1073 net/rxrpc/rxkad.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req              1074 net/rxrpc/rxkad.c 	skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
req              1075 net/rxrpc/rxkad.c 	crypto_skcipher_decrypt(req);
req              1076 net/rxrpc/rxkad.c 	skcipher_request_zero(req);
req              1259 net/rxrpc/rxkad.c 	struct skcipher_request *req;
req              1267 net/rxrpc/rxkad.c 	req = skcipher_request_alloc(&tfm->base, GFP_KERNEL);
req              1268 net/rxrpc/rxkad.c 	if (!req)
req              1271 net/rxrpc/rxkad.c 	rxkad_ci_req = req;
req               125 net/sctp/diag.c 			       const struct inet_diag_req_v2 *req,
req               135 net/sctp/diag.c 	int ext = req->idiag_ext;
req               254 net/sctp/diag.c 	const struct inet_diag_req_v2 *req = commp->r;
req               260 net/sctp/diag.c 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
req               275 net/sctp/diag.c 	err = inet_sctp_diag_fill(sk, assoc, rep, req,
req               433 net/sctp/diag.c 			      const struct inet_diag_req_v2 *req)
req               439 net/sctp/diag.c 		.r = req,
req               444 net/sctp/diag.c 	if (req->sdiag_family == AF_INET) {
req               445 net/sctp/diag.c 		laddr.v4.sin_port = req->id.idiag_sport;
req               446 net/sctp/diag.c 		laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
req               449 net/sctp/diag.c 		paddr.v4.sin_port = req->id.idiag_dport;
req               450 net/sctp/diag.c 		paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
req               453 net/sctp/diag.c 		laddr.v6.sin6_port = req->id.idiag_sport;
req               454 net/sctp/diag.c 		memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
req               458 net/sctp/diag.c 		paddr.v6.sin6_port = req->id.idiag_dport;
req               459 net/sctp/diag.c 		memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
req               493 net/sctp/stream.c 		struct sctp_strreset_tsnreq *req = param.v;
req               495 net/sctp/stream.c 		if ((!resp_seq || req->request_seq == resp_seq) &&
req               496 net/sctp/stream.c 		    (!type || type == req->param_hdr.type))
req               917 net/sctp/stream.c 	struct sctp_paramhdr *req;
req               920 net/sctp/stream.c 	req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
req               921 net/sctp/stream.c 	if (!req)
req               935 net/sctp/stream.c 	if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
req               939 net/sctp/stream.c 		outreq = (struct sctp_strreset_outreq *)req;
req               968 net/sctp/stream.c 	} else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
req               976 net/sctp/stream.c 		inreq = (struct sctp_strreset_inreq *)req;
req               985 net/sctp/stream.c 	} else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
req              1033 net/sctp/stream.c 	} else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
req              1037 net/sctp/stream.c 		addstrm = (struct sctp_strreset_addstrm *)req;
req              1049 net/sctp/stream.c 	} else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
req              1058 net/sctp/stream.c 		addstrm = (struct sctp_strreset_addstrm *)req;
req                77 net/smc/smc_diag.c 			   const struct smc_diag_req *req,
req               109 net/smc/smc_diag.c 	if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
req               150 net/smc/smc_diag.c 	    (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
req               170 net/smc/smc_diag.c 	    (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
req               697 net/sunrpc/auth.c 	struct rpc_rqst *req = task->tk_rqstp;
req               726 net/sunrpc/auth.c 	put_rpccred(req->rq_cred);
req               727 net/sunrpc/auth.c 	req->rq_cred = new;
req              1528 net/sunrpc/auth_gss/auth_gss.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1529 net/sunrpc/auth_gss/auth_gss.c 	struct rpc_cred *cred = req->rq_cred;
req              1550 net/sunrpc/auth_gss/auth_gss.c 	req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
req              1552 net/sunrpc/auth_gss/auth_gss.c 	if (req->rq_seqno == MAXSEQ)
req              1558 net/sunrpc/auth_gss/auth_gss.c 	*p++ = cpu_to_be32(req->rq_seqno);
req              1567 net/sunrpc/auth_gss/auth_gss.c 	iov.iov_base = req->rq_snd_buf.head[0].iov_base;
req              2083 net/sunrpc/auth_gss/auth_gss.c 	struct rpc_rqst *req = task->tk_rqstp;
req              2084 net/sunrpc/auth_gss/auth_gss.c 	struct rpc_cred *cred = req->rq_cred;
req              2092 net/sunrpc/auth_gss/auth_gss.c 	if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
req              2096 net/sunrpc/auth_gss/auth_gss.c 	while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
req              2099 net/sunrpc/auth_gss/auth_gss.c 		seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
req              2108 net/sunrpc/auth_gss/auth_gss.c 		ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
req                65 net/sunrpc/auth_gss/gss_krb5_crypto.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
req                82 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_sync_tfm(req, tfm);
req                83 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req                84 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
req                86 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ret = crypto_skcipher_encrypt(req);
req                87 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_zero(req);
req               104 net/sunrpc/auth_gss/gss_krb5_crypto.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
req               120 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_sync_tfm(req, tfm);
req               121 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               122 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
req               124 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ret = crypto_skcipher_decrypt(req);
req               125 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_zero(req);
req               134 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct ahash_request *req = data;
req               136 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, sg, NULL, sg->length);
req               138 net/sunrpc/auth_gss/gss_krb5_crypto.c 	return crypto_ahash_update(req);
req               175 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct ahash_request *req;
req               208 net/sunrpc/auth_gss/gss_krb5_crypto.c 	req = ahash_request_alloc(md5, GFP_NOFS);
req               209 net/sunrpc/auth_gss/gss_krb5_crypto.c 	if (!req)
req               212 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
req               214 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_init(req);
req               218 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, sg, NULL, 4);
req               219 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_update(req);
req               224 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, sg, NULL, hdrlen);
req               225 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_update(req);
req               229 net/sunrpc/auth_gss/gss_krb5_crypto.c 			      checksummer, req);
req               232 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
req               233 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_final(req);
req               237 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_free(req);
req               238 net/sunrpc/auth_gss/gss_krb5_crypto.c 	req = ahash_request_alloc(hmac_md5, GFP_NOFS);
req               239 net/sunrpc/auth_gss/gss_krb5_crypto.c 	if (!req)
req               242 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
req               249 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, sg, checksumdata,
req               251 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_digest(req);
req               258 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_free(req);
req               281 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct ahash_request *req;
req               306 net/sunrpc/auth_gss/gss_krb5_crypto.c 	req = ahash_request_alloc(tfm, GFP_NOFS);
req               307 net/sunrpc/auth_gss/gss_krb5_crypto.c 	if (!req)
req               310 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
req               321 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_init(req);
req               325 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, sg, NULL, hdrlen);
req               326 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_update(req);
req               330 net/sunrpc/auth_gss/gss_krb5_crypto.c 			      checksummer, req);
req               333 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
req               334 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_final(req);
req               357 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_free(req);
req               378 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct ahash_request *req;
req               402 net/sunrpc/auth_gss/gss_krb5_crypto.c 	req = ahash_request_alloc(tfm, GFP_NOFS);
req               403 net/sunrpc/auth_gss/gss_krb5_crypto.c 	if (!req)
req               406 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
req               412 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_init(req);
req               416 net/sunrpc/auth_gss/gss_krb5_crypto.c 			      checksummer, req);
req               421 net/sunrpc/auth_gss/gss_krb5_crypto.c 		ahash_request_set_crypt(req, sg, NULL, hdrlen);
req               422 net/sunrpc/auth_gss/gss_krb5_crypto.c 		err = crypto_ahash_update(req);
req               426 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
req               427 net/sunrpc/auth_gss/gss_krb5_crypto.c 	err = crypto_ahash_final(req);
req               444 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ahash_request_free(req);
req               454 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct skcipher_request *req;
req               470 net/sunrpc/auth_gss/gss_krb5_crypto.c 		crypto_sync_skcipher_reqtfm(desc->req);
req               505 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
req               508 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ret = crypto_skcipher_encrypt(desc->req);
req               535 net/sunrpc/auth_gss/gss_krb5_crypto.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
req               539 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_sync_tfm(req, tfm);
req               540 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               543 net/sunrpc/auth_gss/gss_krb5_crypto.c 	desc.req = req;
req               554 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_zero(req);
req               560 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct skcipher_request *req;
req               572 net/sunrpc/auth_gss/gss_krb5_crypto.c 		crypto_sync_skcipher_reqtfm(desc->req);
req               591 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
req               594 net/sunrpc/auth_gss/gss_krb5_crypto.c 	ret = crypto_skcipher_decrypt(desc->req);
req               618 net/sunrpc/auth_gss/gss_krb5_crypto.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
req               623 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_sync_tfm(req, tfm);
req               624 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               627 net/sunrpc/auth_gss/gss_krb5_crypto.c 	desc.req = req;
req               634 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_zero(req);
req               682 net/sunrpc/auth_gss/gss_krb5_crypto.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
req               711 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_sync_tfm(req, cipher);
req               712 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               713 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_set_crypt(req, sg, sg, len, iv);
req               716 net/sunrpc/auth_gss/gss_krb5_crypto.c 		ret = crypto_skcipher_encrypt(req);
req               718 net/sunrpc/auth_gss/gss_krb5_crypto.c 		ret = crypto_skcipher_decrypt(req);
req               720 net/sunrpc/auth_gss/gss_krb5_crypto.c 	skcipher_request_zero(req);
req               812 net/sunrpc/auth_gss/gss_krb5_crypto.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
req               819 net/sunrpc/auth_gss/gss_krb5_crypto.c 		desc.req = req;
req               821 net/sunrpc/auth_gss/gss_krb5_crypto.c 		skcipher_request_set_sync_tfm(req, aux_cipher);
req               822 net/sunrpc/auth_gss/gss_krb5_crypto.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               829 net/sunrpc/auth_gss/gss_krb5_crypto.c 		skcipher_request_zero(req);
req               896 net/sunrpc/auth_gss/gss_krb5_crypto.c 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
req               900 net/sunrpc/auth_gss/gss_krb5_crypto.c 		desc.req = req;
req               902 net/sunrpc/auth_gss/gss_krb5_crypto.c 		skcipher_request_set_sync_tfm(req, aux_cipher);
req               903 net/sunrpc/auth_gss/gss_krb5_crypto.c 		skcipher_request_set_callback(req, 0, NULL, NULL);
req               908 net/sunrpc/auth_gss/gss_krb5_crypto.c 		skcipher_request_zero(req);
req               722 net/sunrpc/auth_gss/gss_rpc_xdr.c void gssx_enc_accept_sec_context(struct rpc_rqst *req,
req               771 net/sunrpc/auth_gss/gss_rpc_xdr.c 	xdr_inline_pages(&req->rq_rcv_buf,
req               774 net/sunrpc/auth_gss/gss_rpc_xdr.c 	req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
req               167 net/sunrpc/auth_gss/gss_rpc_xdr.h void gssx_enc_accept_sec_context(struct rpc_rqst *req,
req                54 net/sunrpc/backchannel_rqst.c static void xprt_free_allocation(struct rpc_rqst *req)
req                58 net/sunrpc/backchannel_rqst.c 	dprintk("RPC:        free allocations for req= %p\n", req);
req                59 net/sunrpc/backchannel_rqst.c 	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
req                60 net/sunrpc/backchannel_rqst.c 	xbufp = &req->rq_rcv_buf;
req                62 net/sunrpc/backchannel_rqst.c 	xbufp = &req->rq_snd_buf;
req                64 net/sunrpc/backchannel_rqst.c 	kfree(req);
req                81 net/sunrpc/backchannel_rqst.c 	struct rpc_rqst *req;
req                84 net/sunrpc/backchannel_rqst.c 	req = kzalloc(sizeof(*req), gfp_flags);
req                85 net/sunrpc/backchannel_rqst.c 	if (req == NULL)
req                88 net/sunrpc/backchannel_rqst.c 	req->rq_xprt = xprt;
req                89 net/sunrpc/backchannel_rqst.c 	INIT_LIST_HEAD(&req->rq_bc_list);
req                92 net/sunrpc/backchannel_rqst.c 	if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
req                96 net/sunrpc/backchannel_rqst.c 	req->rq_rcv_buf.len = PAGE_SIZE;
req                99 net/sunrpc/backchannel_rqst.c 	if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
req               103 net/sunrpc/backchannel_rqst.c 	return req;
req               105 net/sunrpc/backchannel_rqst.c 	xprt_free_allocation(req);
req               137 net/sunrpc/backchannel_rqst.c 	struct rpc_rqst *req;
req               157 net/sunrpc/backchannel_rqst.c 		req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
req               158 net/sunrpc/backchannel_rqst.c 		if (req == NULL) {
req               164 net/sunrpc/backchannel_rqst.c 		dprintk("RPC:       adding req= %p\n", req);
req               165 net/sunrpc/backchannel_rqst.c 		list_add(&req->rq_bc_pa_list, &tmp_list);
req               186 net/sunrpc/backchannel_rqst.c 		req = list_first_entry(&tmp_list,
req               189 net/sunrpc/backchannel_rqst.c 		list_del(&req->rq_bc_pa_list);
req               190 net/sunrpc/backchannel_rqst.c 		xprt_free_allocation(req);
req               215 net/sunrpc/backchannel_rqst.c 	struct rpc_rqst *req = NULL, *tmp = NULL;
req               224 net/sunrpc/backchannel_rqst.c 	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
req               225 net/sunrpc/backchannel_rqst.c 		dprintk("RPC:        req=%p\n", req);
req               226 net/sunrpc/backchannel_rqst.c 		list_del(&req->rq_bc_pa_list);
req               227 net/sunrpc/backchannel_rqst.c 		xprt_free_allocation(req);
req               243 net/sunrpc/backchannel_rqst.c 	struct rpc_rqst *req = NULL;
req               255 net/sunrpc/backchannel_rqst.c 	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
req               257 net/sunrpc/backchannel_rqst.c 	req->rq_reply_bytes_recvd = 0;
req               258 net/sunrpc/backchannel_rqst.c 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
req               259 net/sunrpc/backchannel_rqst.c 			sizeof(req->rq_private_buf));
req               260 net/sunrpc/backchannel_rqst.c 	req->rq_xid = xid;
req               261 net/sunrpc/backchannel_rqst.c 	req->rq_connect_cookie = xprt->connect_cookie;
req               262 net/sunrpc/backchannel_rqst.c 	dprintk("RPC:       backchannel req=%p\n", req);
req               264 net/sunrpc/backchannel_rqst.c 	return req;
req               271 net/sunrpc/backchannel_rqst.c void xprt_free_bc_request(struct rpc_rqst *req)
req               273 net/sunrpc/backchannel_rqst.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               275 net/sunrpc/backchannel_rqst.c 	xprt->ops->bc_free_rqst(req);
req               278 net/sunrpc/backchannel_rqst.c void xprt_free_bc_rqst(struct rpc_rqst *req)
req               280 net/sunrpc/backchannel_rqst.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               282 net/sunrpc/backchannel_rqst.c 	dprintk("RPC:       free backchannel req=%p\n", req);
req               284 net/sunrpc/backchannel_rqst.c 	req->rq_connect_cookie = xprt->connect_cookie - 1;
req               286 net/sunrpc/backchannel_rqst.c 	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
req               295 net/sunrpc/backchannel_rqst.c 		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
req               298 net/sunrpc/backchannel_rqst.c 		req = NULL;
req               301 net/sunrpc/backchannel_rqst.c 	if (req != NULL) {
req               308 net/sunrpc/backchannel_rqst.c 		dprintk("RPC:       Last session removed req=%p\n", req);
req               309 net/sunrpc/backchannel_rqst.c 		xprt_free_allocation(req);
req               327 net/sunrpc/backchannel_rqst.c 	struct rpc_rqst *req, *new = NULL;
req               331 net/sunrpc/backchannel_rqst.c 		list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
req               332 net/sunrpc/backchannel_rqst.c 			if (req->rq_connect_cookie != xprt->connect_cookie)
req               334 net/sunrpc/backchannel_rqst.c 			if (req->rq_xid == xid)
req               337 net/sunrpc/backchannel_rqst.c 		req = xprt_get_bc_request(xprt, xid, new);
req               341 net/sunrpc/backchannel_rqst.c 			if (req != new)
req               344 net/sunrpc/backchannel_rqst.c 		} else if (req)
req               348 net/sunrpc/backchannel_rqst.c 	return req;
req               357 net/sunrpc/backchannel_rqst.c void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
req               359 net/sunrpc/backchannel_rqst.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               363 net/sunrpc/backchannel_rqst.c 	list_del(&req->rq_bc_pa_list);
req               367 net/sunrpc/backchannel_rqst.c 	req->rq_private_buf.len = copied;
req               368 net/sunrpc/backchannel_rqst.c 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
req               373 net/sunrpc/backchannel_rqst.c 	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
req                39 net/sunrpc/cache.c static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
req               617 net/sunrpc/cache.c static void cache_wait_req(struct cache_req *req, struct cache_head *item)
req               629 net/sunrpc/cache.c 		    &sleeper.completion, req->thread_wait) <= 0) {
req               677 net/sunrpc/cache.c static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
req               681 net/sunrpc/cache.c 	if (req->thread_wait) {
req               682 net/sunrpc/cache.c 		cache_wait_req(req, item);
req               686 net/sunrpc/cache.c 	dreq = req->defer(req);
req              1208 net/sunrpc/clnt.c struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
req              1217 net/sunrpc/clnt.c 	dprintk("RPC: rpc_run_bc_task req= %p\n", req);
req              1222 net/sunrpc/clnt.c 	xprt_init_bc_request(req, task);
req              1243 net/sunrpc/clnt.c void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
req              1250 net/sunrpc/clnt.c 	hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
req              1252 net/sunrpc/clnt.c 	xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
req              1253 net/sunrpc/clnt.c 	trace_rpc_reply_pages(req);
req              1788 net/sunrpc/clnt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1789 net/sunrpc/clnt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1798 net/sunrpc/clnt.c 	if (req->rq_buffer)
req              1812 net/sunrpc/clnt.c 	req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
req              1814 net/sunrpc/clnt.c 	req->rq_callsize <<= 2;
req              1819 net/sunrpc/clnt.c 	req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \
req              1821 net/sunrpc/clnt.c 	req->rq_rcvsize <<= 2;
req              1855 net/sunrpc/clnt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              1858 net/sunrpc/clnt.c 	xdr_buf_init(&req->rq_snd_buf,
req              1859 net/sunrpc/clnt.c 		     req->rq_buffer,
req              1860 net/sunrpc/clnt.c 		     req->rq_callsize);
req              1861 net/sunrpc/clnt.c 	xdr_buf_init(&req->rq_rcv_buf,
req              1862 net/sunrpc/clnt.c 		     req->rq_rbuffer,
req              1863 net/sunrpc/clnt.c 		     req->rq_rcvsize);
req              1865 net/sunrpc/clnt.c 	req->rq_reply_bytes_recvd = 0;
req              1866 net/sunrpc/clnt.c 	req->rq_snd_buf.head[0].iov_len = 0;
req              1867 net/sunrpc/clnt.c 	xdr_init_encode(&xdr, &req->rq_snd_buf,
req              1868 net/sunrpc/clnt.c 			req->rq_snd_buf.head[0].iov_base, req);
req              1869 net/sunrpc/clnt.c 	xdr_free_bvec(&req->rq_snd_buf);
req              2291 net/sunrpc/clnt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              2329 net/sunrpc/clnt.c 		xprt_conditional_disconnect(req->rq_xprt,
req              2330 net/sunrpc/clnt.c 			req->rq_connect_cookie);
req              2412 net/sunrpc/clnt.c rpc_check_connected(const struct rpc_rqst *req)
req              2415 net/sunrpc/clnt.c 	if (!req || !req->rq_xprt)
req              2417 net/sunrpc/clnt.c 	return xprt_connected(req->rq_xprt);
req              2488 net/sunrpc/clnt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              2519 net/sunrpc/clnt.c 	if (!req->rq_reply_bytes_recvd)
req              2522 net/sunrpc/clnt.c 	req->rq_rcv_buf.len = req->rq_private_buf.len;
req              2525 net/sunrpc/clnt.c 	WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
req              2526 net/sunrpc/clnt.c 				sizeof(req->rq_rcv_buf)) != 0);
req              2528 net/sunrpc/clnt.c 	xdr_init_decode(&xdr, &req->rq_rcv_buf,
req              2529 net/sunrpc/clnt.c 			req->rq_rcv_buf.head[0].iov_base, req);
req              2542 net/sunrpc/clnt.c 			xprt_conditional_disconnect(req->rq_xprt,
req              2543 net/sunrpc/clnt.c 						    req->rq_connect_cookie);
req              2560 net/sunrpc/clnt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              2568 net/sunrpc/clnt.c 	*p++ = req->rq_xid;
req               861 net/sunrpc/rpcb_clnt.c static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
req               868 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_pid,
req               869 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name,
req               879 net/sunrpc/rpcb_clnt.c static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
req               893 net/sunrpc/rpcb_clnt.c 	dprintk("RPC: %5u PMAP_%s result: %lu\n", req->rq_task->tk_pid,
req               894 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name, port);
req               902 net/sunrpc/rpcb_clnt.c static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
req               917 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_pid,
req               918 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name,
req               938 net/sunrpc/rpcb_clnt.c static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
req               945 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_pid,
req               946 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name,
req               959 net/sunrpc/rpcb_clnt.c static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
req               981 net/sunrpc/rpcb_clnt.c 				req->rq_task->tk_pid);
req               991 net/sunrpc/rpcb_clnt.c 	dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
req               992 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
req               994 net/sunrpc/rpcb_clnt.c 	if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
req              1003 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_pid,
req              1004 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name);
req               153 net/sunrpc/stats.c 	struct rpc_rqst *req = task->tk_rqstp;
req               156 net/sunrpc/stats.c 	if (!op_metrics || !req)
req               164 net/sunrpc/stats.c 	op_metrics->om_ntrans += max(req->rq_ntrans, 1);
req               167 net/sunrpc/stats.c 	op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
req               168 net/sunrpc/stats.c 	op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
req               171 net/sunrpc/stats.c 	if (ktime_to_ns(req->rq_xtime)) {
req               172 net/sunrpc/stats.c 		backlog = ktime_sub(req->rq_xtime, task->tk_start);
req               176 net/sunrpc/stats.c 	op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
req               185 net/sunrpc/stats.c 	trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
req              1550 net/sunrpc/svc.c bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
req              1559 net/sunrpc/svc.c 	dprintk("svc: %s(%p)\n", __func__, req);
req              1562 net/sunrpc/svc.c 	rqstp->rq_xid = req->rq_xid;
req              1563 net/sunrpc/svc.c 	rqstp->rq_prot = req->rq_xprt->prot;
req              1565 net/sunrpc/svc.c 	rqstp->rq_bc_net = req->rq_xprt->xprt_net;
req              1567 net/sunrpc/svc.c 	rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
req              1568 net/sunrpc/svc.c 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
req              1569 net/sunrpc/svc.c 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
req              1570 net/sunrpc/svc.c 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
req              1573 net/sunrpc/svc.c 	rqstp->rq_arg.len = req->rq_private_buf.len;
req              1598 net/sunrpc/svc.c 	atomic_dec(&req->rq_xprt->bc_slot_count);
req              1601 net/sunrpc/svc.c 		xprt_free_bc_request(req);
req              1606 net/sunrpc/svc.c 	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
req              1607 net/sunrpc/svc.c 	task = rpc_run_bc_task(req);
req                31 net/sunrpc/svc_xprt.c static struct cache_deferred_req *svc_defer(struct cache_req *req);
req              1172 net/sunrpc/svc_xprt.c static struct cache_deferred_req *svc_defer(struct cache_req *req)
req              1174 net/sunrpc/svc_xprt.c 	struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
req               963 net/sunrpc/svcsock.c 	struct rpc_rqst *req = NULL;
req               975 net/sunrpc/svcsock.c 	req = xprt_lookup_rqst(bc_xprt, xid);
req               976 net/sunrpc/svcsock.c 	if (!req)
req               979 net/sunrpc/svcsock.c 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
req               985 net/sunrpc/svcsock.c 	dst = &req->rq_private_buf.head[0];
req               990 net/sunrpc/svcsock.c 	xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
req                77 net/sunrpc/xprt.c static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
req                79 net/sunrpc/xprt.c 	unsigned long timeout = jiffies + req->rq_timeout;
req                81 net/sunrpc/xprt.c 	if (time_before(timeout, req->rq_majortimeo))
req                83 net/sunrpc/xprt.c 	return req->rq_majortimeo;
req               204 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req               225 net/sunrpc/xprt.c 				xprt_request_timeout(req));
req               268 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req               275 net/sunrpc/xprt.c 	if (req == NULL) {
req               292 net/sunrpc/xprt.c 				xprt_request_timeout(req));
req               394 net/sunrpc/xprt.c __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
req               396 net/sunrpc/xprt.c 	if (req->rq_cong)
req               399 net/sunrpc/xprt.c 			req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
req               404 net/sunrpc/xprt.c 	req->rq_cong = 1;
req               414 net/sunrpc/xprt.c __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
req               416 net/sunrpc/xprt.c 	if (!req->rq_cong)
req               418 net/sunrpc/xprt.c 	req->rq_cong = 0;
req               432 net/sunrpc/xprt.c xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
req               436 net/sunrpc/xprt.c 	if (req->rq_cong)
req               439 net/sunrpc/xprt.c 	ret = __xprt_get_cong(xprt, req) != 0;
req               453 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req               455 net/sunrpc/xprt.c 	__xprt_put_cong(req->rq_xprt, req);
req               497 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req               515 net/sunrpc/xprt.c 	__xprt_put_cong(xprt, req);
req               587 net/sunrpc/xprt.c static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
req               589 net/sunrpc/xprt.c 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
req               590 net/sunrpc/xprt.c 	unsigned long majortimeo = req->rq_timeout;
req               601 net/sunrpc/xprt.c static void xprt_reset_majortimeo(struct rpc_rqst *req)
req               603 net/sunrpc/xprt.c 	req->rq_majortimeo += xprt_calc_majortimeo(req);
req               606 net/sunrpc/xprt.c static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
req               609 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               615 net/sunrpc/xprt.c 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
req               616 net/sunrpc/xprt.c 	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
req               624 net/sunrpc/xprt.c int xprt_adjust_timeout(struct rpc_rqst *req)
req               626 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               627 net/sunrpc/xprt.c 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
req               630 net/sunrpc/xprt.c 	if (time_before(jiffies, req->rq_majortimeo)) {
req               632 net/sunrpc/xprt.c 			req->rq_timeout <<= 1;
req               634 net/sunrpc/xprt.c 			req->rq_timeout += to->to_increment;
req               635 net/sunrpc/xprt.c 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
req               636 net/sunrpc/xprt.c 			req->rq_timeout = to->to_maxval;
req               637 net/sunrpc/xprt.c 		req->rq_retries++;
req               639 net/sunrpc/xprt.c 		req->rq_timeout = to->to_initval;
req               640 net/sunrpc/xprt.c 		req->rq_retries = 0;
req               641 net/sunrpc/xprt.c 		xprt_reset_majortimeo(req);
req               644 net/sunrpc/xprt.c 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
req               649 net/sunrpc/xprt.c 	if (req->rq_timeout == 0) {
req               651 net/sunrpc/xprt.c 		req->rq_timeout = 5 * HZ;
req               715 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req               716 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               718 net/sunrpc/xprt.c 	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
req               907 net/sunrpc/xprt.c 	struct rpc_rqst *req;
req               910 net/sunrpc/xprt.c 		req = rb_entry(n, struct rpc_rqst, rq_recv);
req               911 net/sunrpc/xprt.c 		switch (xprt_xid_cmp(xid, req->rq_xid)) {
req               919 net/sunrpc/xprt.c 			return req;
req               930 net/sunrpc/xprt.c 	struct rpc_rqst *req;
req               934 net/sunrpc/xprt.c 		req = rb_entry(n, struct rpc_rqst, rq_recv);
req               935 net/sunrpc/xprt.c 		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
req               943 net/sunrpc/xprt.c 			WARN_ON_ONCE(new != req);
req               952 net/sunrpc/xprt.c xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
req               954 net/sunrpc/xprt.c 	rb_erase(&req->rq_recv, &xprt->recv_queue);
req               984 net/sunrpc/xprt.c xprt_is_pinned_rqst(struct rpc_rqst *req)
req               986 net/sunrpc/xprt.c 	return atomic_read(&req->rq_pin) != 0;
req               996 net/sunrpc/xprt.c void xprt_pin_rqst(struct rpc_rqst *req)
req               998 net/sunrpc/xprt.c 	atomic_inc(&req->rq_pin);
req              1008 net/sunrpc/xprt.c void xprt_unpin_rqst(struct rpc_rqst *req)
req              1010 net/sunrpc/xprt.c 	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
req              1011 net/sunrpc/xprt.c 		atomic_dec(&req->rq_pin);
req              1014 net/sunrpc/xprt.c 	if (atomic_dec_and_test(&req->rq_pin))
req              1015 net/sunrpc/xprt.c 		wake_up_var(&req->rq_pin);
req              1019 net/sunrpc/xprt.c static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
req              1021 net/sunrpc/xprt.c 	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
req              1032 net/sunrpc/xprt.c xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
req              1046 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1047 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1049 net/sunrpc/xprt.c 	if (!xprt_request_need_enqueue_receive(task, req))
req              1056 net/sunrpc/xprt.c 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
req              1057 net/sunrpc/xprt.c 			sizeof(req->rq_private_buf));
req              1060 net/sunrpc/xprt.c 	xprt_request_rb_insert(xprt, req);
req              1077 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1080 net/sunrpc/xprt.c 		xprt_request_rb_remove(req->rq_xprt, req);
req              1091 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1094 net/sunrpc/xprt.c 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
req              1097 net/sunrpc/xprt.c 		if (req->rq_ntrans == 1)
req              1099 net/sunrpc/xprt.c 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
req              1113 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1114 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1117 net/sunrpc/xprt.c 			task->tk_pid, ntohl(req->rq_xid), copied);
req              1118 net/sunrpc/xprt.c 	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
req              1122 net/sunrpc/xprt.c 	req->rq_private_buf.len = copied;
req              1126 net/sunrpc/xprt.c 	req->rq_reply_bytes_recvd = copied;
req              1134 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1135 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1140 net/sunrpc/xprt.c 	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
req              1141 net/sunrpc/xprt.c 	if (!req->rq_reply_bytes_recvd) {
req              1159 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1161 net/sunrpc/xprt.c 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
req              1162 net/sunrpc/xprt.c 			xprt_request_timeout(req));
req              1178 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1183 net/sunrpc/xprt.c 	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
req              1186 net/sunrpc/xprt.c 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
req              1198 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1199 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1224 net/sunrpc/xprt.c xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
req              1238 net/sunrpc/xprt.c 	struct rpc_rqst *pos, *req = task->tk_rqstp;
req              1239 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1241 net/sunrpc/xprt.c 	if (xprt_request_need_enqueue_transmit(task, req)) {
req              1242 net/sunrpc/xprt.c 		req->rq_bytes_sent = 0;
req              1248 net/sunrpc/xprt.c 		if (req->rq_cong) {
req              1254 net/sunrpc/xprt.c 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
req              1255 net/sunrpc/xprt.c 				INIT_LIST_HEAD(&req->rq_xmit2);
req              1266 net/sunrpc/xprt.c 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
req              1267 net/sunrpc/xprt.c 				INIT_LIST_HEAD(&req->rq_xmit2);
req              1271 net/sunrpc/xprt.c 		} else if (!req->rq_seqno) {
req              1275 net/sunrpc/xprt.c 				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
req              1276 net/sunrpc/xprt.c 				INIT_LIST_HEAD(&req->rq_xmit);
req              1281 net/sunrpc/xprt.c 		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
req              1282 net/sunrpc/xprt.c 		INIT_LIST_HEAD(&req->rq_xmit2);
req              1300 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1304 net/sunrpc/xprt.c 	if (!list_empty(&req->rq_xmit)) {
req              1305 net/sunrpc/xprt.c 		list_del(&req->rq_xmit);
req              1306 net/sunrpc/xprt.c 		if (!list_empty(&req->rq_xmit2)) {
req              1307 net/sunrpc/xprt.c 			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
req              1309 net/sunrpc/xprt.c 			list_del(&req->rq_xmit2);
req              1313 net/sunrpc/xprt.c 		list_del(&req->rq_xmit2);
req              1325 net/sunrpc/xprt.c 	struct rpc_rqst *req = task->tk_rqstp;
req              1326 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1343 net/sunrpc/xprt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              1344 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1348 net/sunrpc/xprt.c 	    xprt_is_pinned_rqst(req)) {
req              1352 net/sunrpc/xprt.c 		while (xprt_is_pinned_rqst(req)) {
req              1355 net/sunrpc/xprt.c 			xprt_wait_on_pinned_rqst(req);
req              1371 net/sunrpc/xprt.c xprt_request_prepare(struct rpc_rqst *req)
req              1373 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1376 net/sunrpc/xprt.c 		xprt->ops->prepare_request(req);
req              1398 net/sunrpc/xprt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              1399 net/sunrpc/xprt.c 	struct rpc_xprt	*xprt = req->rq_xprt;
req              1430 net/sunrpc/xprt.c xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
req              1432 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1433 net/sunrpc/xprt.c 	struct rpc_task *task = req->rq_task;
req              1438 net/sunrpc/xprt.c 	if (!req->rq_bytes_sent) {
req              1459 net/sunrpc/xprt.c 	req->rq_ntrans++;
req              1462 net/sunrpc/xprt.c 	status = xprt->ops->send_request(req);
req              1464 net/sunrpc/xprt.c 		req->rq_ntrans--;
req              1465 net/sunrpc/xprt.c 		trace_xprt_transmit(req, status);
req              1484 net/sunrpc/xprt.c 	req->rq_connect_cookie = connect_cookie;
req              1486 net/sunrpc/xprt.c 	trace_xprt_transmit(req, status);
req              1504 net/sunrpc/xprt.c 	struct rpc_rqst *next, *req = task->tk_rqstp;
req              1505 net/sunrpc/xprt.c 	struct rpc_xprt	*xprt = req->rq_xprt;
req              1515 net/sunrpc/xprt.c 		if (status == -EBADMSG && next != req)
req              1561 net/sunrpc/xprt.c 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
req              1567 net/sunrpc/xprt.c 	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
req              1569 net/sunrpc/xprt.c 	if (req != NULL)
req              1572 net/sunrpc/xprt.c 	req = ERR_PTR(-ENOMEM);
req              1574 net/sunrpc/xprt.c 	return req;
req              1577 net/sunrpc/xprt.c static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
req              1581 net/sunrpc/xprt.c 		kfree(req);
req              1589 net/sunrpc/xprt.c 	struct rpc_rqst *req;
req              1593 net/sunrpc/xprt.c 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
req              1594 net/sunrpc/xprt.c 		list_del(&req->rq_list);
req              1597 net/sunrpc/xprt.c 	req = xprt_dynamic_alloc_slot(xprt);
req              1598 net/sunrpc/xprt.c 	if (!IS_ERR(req))
req              1600 net/sunrpc/xprt.c 	switch (PTR_ERR(req)) {
req              1621 net/sunrpc/xprt.c 	task->tk_rqstp = req;
req              1625 net/sunrpc/xprt.c void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
req              1628 net/sunrpc/xprt.c 	if (!xprt_dynamic_free_slot(xprt, req)) {
req              1629 net/sunrpc/xprt.c 		memset(req, 0, sizeof(*req));	/* mark unused */
req              1630 net/sunrpc/xprt.c 		list_add(&req->rq_list, &xprt->free);
req              1639 net/sunrpc/xprt.c 	struct rpc_rqst *req;
req              1641 net/sunrpc/xprt.c 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
req              1642 net/sunrpc/xprt.c 		list_del(&req->rq_list);
req              1643 net/sunrpc/xprt.c 		kfree(req);
req              1652 net/sunrpc/xprt.c 	struct rpc_rqst *req;
req              1662 net/sunrpc/xprt.c 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
req              1663 net/sunrpc/xprt.c 		if (!req)
req              1665 net/sunrpc/xprt.c 		list_add(&req->rq_list, &xprt->free);
req              1692 net/sunrpc/xprt.c xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
req              1694 net/sunrpc/xprt.c 	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
req              1718 net/sunrpc/xprt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              1720 net/sunrpc/xprt.c 	req->rq_task	= task;
req              1721 net/sunrpc/xprt.c 	req->rq_xprt    = xprt;
req              1722 net/sunrpc/xprt.c 	req->rq_buffer  = NULL;
req              1723 net/sunrpc/xprt.c 	req->rq_xid	= xprt_alloc_xid(xprt);
req              1724 net/sunrpc/xprt.c 	xprt_init_connect_cookie(req, xprt);
req              1725 net/sunrpc/xprt.c 	req->rq_snd_buf.len = 0;
req              1726 net/sunrpc/xprt.c 	req->rq_snd_buf.buflen = 0;
req              1727 net/sunrpc/xprt.c 	req->rq_rcv_buf.len = 0;
req              1728 net/sunrpc/xprt.c 	req->rq_rcv_buf.buflen = 0;
req              1729 net/sunrpc/xprt.c 	req->rq_snd_buf.bvec = NULL;
req              1730 net/sunrpc/xprt.c 	req->rq_rcv_buf.bvec = NULL;
req              1731 net/sunrpc/xprt.c 	req->rq_release_snd_buf = NULL;
req              1732 net/sunrpc/xprt.c 	xprt_init_majortimeo(task, req);
req              1734 net/sunrpc/xprt.c 			req, ntohl(req->rq_xid));
req              1795 net/sunrpc/xprt.c 	struct rpc_rqst	*req = task->tk_rqstp;
req              1797 net/sunrpc/xprt.c 	if (req == NULL) {
req              1805 net/sunrpc/xprt.c 	xprt = req->rq_xprt;
req              1813 net/sunrpc/xprt.c 	if (req->rq_buffer)
req              1816 net/sunrpc/xprt.c 	xdr_free_bvec(&req->rq_rcv_buf);
req              1817 net/sunrpc/xprt.c 	xdr_free_bvec(&req->rq_snd_buf);
req              1818 net/sunrpc/xprt.c 	if (req->rq_cred != NULL)
req              1819 net/sunrpc/xprt.c 		put_rpccred(req->rq_cred);
req              1821 net/sunrpc/xprt.c 	if (req->rq_release_snd_buf)
req              1822 net/sunrpc/xprt.c 		req->rq_release_snd_buf(req);
req              1824 net/sunrpc/xprt.c 	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
req              1825 net/sunrpc/xprt.c 	if (likely(!bc_prealloc(req)))
req              1826 net/sunrpc/xprt.c 		xprt->ops->free_slot(xprt, req);
req              1828 net/sunrpc/xprt.c 		xprt_free_bc_request(req);
req              1833 net/sunrpc/xprt.c xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
req              1835 net/sunrpc/xprt.c 	struct xdr_buf *xbufp = &req->rq_snd_buf;
req              1837 net/sunrpc/xprt.c 	task->tk_rqstp = req;
req              1838 net/sunrpc/xprt.c 	req->rq_task = task;
req              1839 net/sunrpc/xprt.c 	xprt_init_connect_cookie(req, req->rq_xprt);
req                63 net/sunrpc/xprtrdma/backchannel.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req                66 net/sunrpc/xprtrdma/backchannel.c 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
req                67 net/sunrpc/xprtrdma/backchannel.c 	xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
req                68 net/sunrpc/xprtrdma/backchannel.c 			rdmab_data(req->rl_rdmabuf), rqst);
req                70 net/sunrpc/xprtrdma/backchannel.c 	p = xdr_reserve_space(&req->rl_stream, 28);
req                81 net/sunrpc/xprtrdma/backchannel.c 	if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
req               105 net/sunrpc/xprtrdma/backchannel.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req               118 net/sunrpc/xprtrdma/backchannel.c 	if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
req               157 net/sunrpc/xprtrdma/backchannel.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req               160 net/sunrpc/xprtrdma/backchannel.c 	rpcrdma_recv_buffer_put(req->rl_reply);
req               161 net/sunrpc/xprtrdma/backchannel.c 	req->rl_reply = NULL;
req               172 net/sunrpc/xprtrdma/backchannel.c 	struct rpcrdma_req *req;
req               194 net/sunrpc/xprtrdma/backchannel.c 	req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
req               195 net/sunrpc/xprtrdma/backchannel.c 	if (!req)
req               199 net/sunrpc/xprtrdma/backchannel.c 	rqst = &req->rl_slot;
req               202 net/sunrpc/xprtrdma/backchannel.c 	xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
req               222 net/sunrpc/xprtrdma/backchannel.c 	struct rpcrdma_req *req;
req               257 net/sunrpc/xprtrdma/backchannel.c 	req = rpcr_to_rdmar(rqst);
req               258 net/sunrpc/xprtrdma/backchannel.c 	req->rl_reply = rep;
req               128 net/sunrpc/xprtrdma/frwr_ops.c void frwr_recycle(struct rpcrdma_req *req)
req               132 net/sunrpc/xprtrdma/frwr_ops.c 	while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
req               146 net/sunrpc/xprtrdma/frwr_ops.c void frwr_reset(struct rpcrdma_req *req)
req               150 net/sunrpc/xprtrdma/frwr_ops.c 	while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
req               423 net/sunrpc/xprtrdma/frwr_ops.c int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
req               428 net/sunrpc/xprtrdma/frwr_ops.c 	post_wr = &req->rl_sendctx->sc_wr;
req               429 net/sunrpc/xprtrdma/frwr_ops.c 	list_for_each_entry(mr, &req->rl_registered, mr_list) {
req               526 net/sunrpc/xprtrdma/frwr_ops.c void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
req               541 net/sunrpc/xprtrdma/frwr_ops.c 	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
req               586 net/sunrpc/xprtrdma/frwr_ops.c 	trace_xprtrdma_post_linv(req, rc);
req               631 net/sunrpc/xprtrdma/frwr_ops.c void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
req               644 net/sunrpc/xprtrdma/frwr_ops.c 	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
req               682 net/sunrpc/xprtrdma/frwr_ops.c 	trace_xprtrdma_post_linv(req, rc);
req               695 net/sunrpc/xprtrdma/frwr_ops.c 	rpcrdma_complete_rqst(req->rl_reply);
req               346 net/sunrpc/xprtrdma/rpc_rdma.c 						 struct rpcrdma_req *req,
req               351 net/sunrpc/xprtrdma/rpc_rdma.c 	*mr = rpcrdma_mr_pop(&req->rl_free_mrs);
req               356 net/sunrpc/xprtrdma/rpc_rdma.c 		trace_xprtrdma_mr_get(req);
req               357 net/sunrpc/xprtrdma/rpc_rdma.c 		(*mr)->mr_req = req;
req               360 net/sunrpc/xprtrdma/rpc_rdma.c 	rpcrdma_mr_push(*mr, &req->rl_registered);
req               361 net/sunrpc/xprtrdma/rpc_rdma.c 	return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
req               364 net/sunrpc/xprtrdma/rpc_rdma.c 	trace_xprtrdma_nomrs(req);
req               386 net/sunrpc/xprtrdma/rpc_rdma.c 				    struct rpcrdma_req *req,
req               390 net/sunrpc/xprtrdma/rpc_rdma.c 	struct xdr_stream *xdr = &req->rl_stream;
req               402 net/sunrpc/xprtrdma/rpc_rdma.c 	seg = req->rl_segments;
req               409 net/sunrpc/xprtrdma/rpc_rdma.c 		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
req               441 net/sunrpc/xprtrdma/rpc_rdma.c 				     struct rpcrdma_req *req,
req               445 net/sunrpc/xprtrdma/rpc_rdma.c 	struct xdr_stream *xdr = &req->rl_stream;
req               454 net/sunrpc/xprtrdma/rpc_rdma.c 	seg = req->rl_segments;
req               470 net/sunrpc/xprtrdma/rpc_rdma.c 		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
req               504 net/sunrpc/xprtrdma/rpc_rdma.c 				      struct rpcrdma_req *req,
req               508 net/sunrpc/xprtrdma/rpc_rdma.c 	struct xdr_stream *xdr = &req->rl_stream;
req               517 net/sunrpc/xprtrdma/rpc_rdma.c 	seg = req->rl_segments;
req               531 net/sunrpc/xprtrdma/rpc_rdma.c 		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
req               553 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_req *req =
req               555 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_rep *rep = req->rl_reply;
req               588 net/sunrpc/xprtrdma/rpc_rdma.c 				    struct rpcrdma_req *req, u32 len)
req               590 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
req               591 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
req               614 net/sunrpc/xprtrdma/rpc_rdma.c 				     struct rpcrdma_req *req,
req               618 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
req               620 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
req               717 net/sunrpc/xprtrdma/rpc_rdma.c 		kref_get(&req->rl_kref);
req               747 net/sunrpc/xprtrdma/rpc_rdma.c 			  struct rpcrdma_req *req, u32 hdrlen,
req               753 net/sunrpc/xprtrdma/rpc_rdma.c 	req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
req               754 net/sunrpc/xprtrdma/rpc_rdma.c 	if (!req->rl_sendctx)
req               756 net/sunrpc/xprtrdma/rpc_rdma.c 	req->rl_sendctx->sc_wr.num_sge = 0;
req               757 net/sunrpc/xprtrdma/rpc_rdma.c 	req->rl_sendctx->sc_unmap_count = 0;
req               758 net/sunrpc/xprtrdma/rpc_rdma.c 	req->rl_sendctx->sc_req = req;
req               759 net/sunrpc/xprtrdma/rpc_rdma.c 	kref_init(&req->rl_kref);
req               762 net/sunrpc/xprtrdma/rpc_rdma.c 	if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
req               765 net/sunrpc/xprtrdma/rpc_rdma.c 		if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
req               770 net/sunrpc/xprtrdma/rpc_rdma.c 	trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
req               796 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req               797 net/sunrpc/xprtrdma/rpc_rdma.c 	struct xdr_stream *xdr = &req->rl_stream;
req               803 net/sunrpc/xprtrdma/rpc_rdma.c 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
req               804 net/sunrpc/xprtrdma/rpc_rdma.c 	xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
req               870 net/sunrpc/xprtrdma/rpc_rdma.c 	frwr_recycle(req);
req               894 net/sunrpc/xprtrdma/rpc_rdma.c 	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
req               897 net/sunrpc/xprtrdma/rpc_rdma.c 	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
req               900 net/sunrpc/xprtrdma/rpc_rdma.c 	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
req               904 net/sunrpc/xprtrdma/rpc_rdma.c 	ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
req               909 net/sunrpc/xprtrdma/rpc_rdma.c 	trace_xprtrdma_marshal(req, rtype, wtype);
req               915 net/sunrpc/xprtrdma/rpc_rdma.c 	frwr_reset(req);
req              1299 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_req *req =
req              1302 net/sunrpc/xprtrdma/rpc_rdma.c 	rpcrdma_complete_rqst(req->rl_reply);
req              1317 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_req *req;
req              1367 net/sunrpc/xprtrdma/rpc_rdma.c 	req = rpcr_to_rdmar(rqst);
req              1368 net/sunrpc/xprtrdma/rpc_rdma.c 	if (req->rl_reply) {
req              1369 net/sunrpc/xprtrdma/rpc_rdma.c 		trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
req              1370 net/sunrpc/xprtrdma/rpc_rdma.c 		rpcrdma_recv_buffer_put(req->rl_reply);
req              1372 net/sunrpc/xprtrdma/rpc_rdma.c 	req->rl_reply = rep;
req              1375 net/sunrpc/xprtrdma/rpc_rdma.c 	trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
req              1378 net/sunrpc/xprtrdma/rpc_rdma.c 		frwr_reminv(rep, &req->rl_registered);
req              1379 net/sunrpc/xprtrdma/rpc_rdma.c 	if (!list_empty(&req->rl_registered))
req              1380 net/sunrpc/xprtrdma/rpc_rdma.c 		frwr_unmap_async(r_xprt, req);
req              1383 net/sunrpc/xprtrdma/rpc_rdma.c 		kref_put(&req->rl_kref, rpcrdma_reply_done);
req                32 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	struct rpc_rqst *req;
req                57 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	req = xprt_lookup_rqst(xprt, xid);
req                58 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	if (!req)
req                61 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	dst = &req->rq_private_buf.head[0];
req                62 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
req                66 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	xprt_pin_rqst(req);
req                81 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	xprt_complete_rqst(req->rq_task, rcvbuf->len);
req                82 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	xprt_unpin_rqst(req);
req               566 net/sunrpc/xprtrdma/transport.c 	struct rpcrdma_req *req;
req               568 net/sunrpc/xprtrdma/transport.c 	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
req               569 net/sunrpc/xprtrdma/transport.c 	if (!req)
req               571 net/sunrpc/xprtrdma/transport.c 	task->tk_rqstp = &req->rl_slot;
req               625 net/sunrpc/xprtrdma/transport.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req               632 net/sunrpc/xprtrdma/transport.c 	if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
req               635 net/sunrpc/xprtrdma/transport.c 	if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize,
req               639 net/sunrpc/xprtrdma/transport.c 	rqst->rq_buffer = rdmab_data(req->rl_sendbuf);
req               640 net/sunrpc/xprtrdma/transport.c 	rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf);
req               641 net/sunrpc/xprtrdma/transport.c 	trace_xprtrdma_op_allocate(task, req);
req               660 net/sunrpc/xprtrdma/transport.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req               662 net/sunrpc/xprtrdma/transport.c 	trace_xprtrdma_op_free(task, req);
req               664 net/sunrpc/xprtrdma/transport.c 	if (!list_empty(&req->rl_registered))
req               665 net/sunrpc/xprtrdma/transport.c 		frwr_unmap_sync(r_xprt, req);
req               694 net/sunrpc/xprtrdma/transport.c 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
req               718 net/sunrpc/xprtrdma/transport.c 	if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
req               408 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_req *req;
req               433 net/sunrpc/xprtrdma/verbs.c 	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
req               434 net/sunrpc/xprtrdma/verbs.c 		rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
req               435 net/sunrpc/xprtrdma/verbs.c 		rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
req               436 net/sunrpc/xprtrdma/verbs.c 		rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
req              1003 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_req *req;
req              1006 net/sunrpc/xprtrdma/verbs.c 	req = kzalloc(sizeof(*req), flags);
req              1007 net/sunrpc/xprtrdma/verbs.c 	if (req == NULL)
req              1018 net/sunrpc/xprtrdma/verbs.c 	req->rl_rdmabuf = rb;
req              1019 net/sunrpc/xprtrdma/verbs.c 	xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
req              1021 net/sunrpc/xprtrdma/verbs.c 	req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
req              1022 net/sunrpc/xprtrdma/verbs.c 	if (!req->rl_sendbuf)
req              1025 net/sunrpc/xprtrdma/verbs.c 	req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
req              1026 net/sunrpc/xprtrdma/verbs.c 	if (!req->rl_recvbuf)
req              1029 net/sunrpc/xprtrdma/verbs.c 	INIT_LIST_HEAD(&req->rl_free_mrs);
req              1030 net/sunrpc/xprtrdma/verbs.c 	INIT_LIST_HEAD(&req->rl_registered);
req              1032 net/sunrpc/xprtrdma/verbs.c 	list_add(&req->rl_all, &buffer->rb_allreqs);
req              1034 net/sunrpc/xprtrdma/verbs.c 	return req;
req              1037 net/sunrpc/xprtrdma/verbs.c 	kfree(req->rl_sendbuf);
req              1039 net/sunrpc/xprtrdma/verbs.c 	kfree(req->rl_rdmabuf);
req              1041 net/sunrpc/xprtrdma/verbs.c 	kfree(req);
req              1058 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_req *req;
req              1060 net/sunrpc/xprtrdma/verbs.c 	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
req              1062 net/sunrpc/xprtrdma/verbs.c 		req->rl_slot.rq_cong = 0;
req              1165 net/sunrpc/xprtrdma/verbs.c 		struct rpcrdma_req *req;
req              1167 net/sunrpc/xprtrdma/verbs.c 		req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE,
req              1169 net/sunrpc/xprtrdma/verbs.c 		if (!req)
req              1171 net/sunrpc/xprtrdma/verbs.c 		list_add(&req->rl_list, &buf->rb_send_bufs);
req              1194 net/sunrpc/xprtrdma/verbs.c void rpcrdma_req_destroy(struct rpcrdma_req *req)
req              1198 net/sunrpc/xprtrdma/verbs.c 	list_del(&req->rl_all);
req              1200 net/sunrpc/xprtrdma/verbs.c 	while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
req              1210 net/sunrpc/xprtrdma/verbs.c 	rpcrdma_regbuf_free(req->rl_recvbuf);
req              1211 net/sunrpc/xprtrdma/verbs.c 	rpcrdma_regbuf_free(req->rl_sendbuf);
req              1212 net/sunrpc/xprtrdma/verbs.c 	rpcrdma_regbuf_free(req->rl_rdmabuf);
req              1213 net/sunrpc/xprtrdma/verbs.c 	kfree(req);
req              1261 net/sunrpc/xprtrdma/verbs.c 		struct rpcrdma_req *req;
req              1263 net/sunrpc/xprtrdma/verbs.c 		req = list_first_entry(&buf->rb_send_bufs,
req              1265 net/sunrpc/xprtrdma/verbs.c 		list_del(&req->rl_list);
req              1266 net/sunrpc/xprtrdma/verbs.c 		rpcrdma_req_destroy(req);
req              1319 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_req *req;
req              1322 net/sunrpc/xprtrdma/verbs.c 	req = list_first_entry_or_null(&buffers->rb_send_bufs,
req              1324 net/sunrpc/xprtrdma/verbs.c 	if (req)
req              1325 net/sunrpc/xprtrdma/verbs.c 		list_del_init(&req->rl_list);
req              1327 net/sunrpc/xprtrdma/verbs.c 	return req;
req              1336 net/sunrpc/xprtrdma/verbs.c void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
req              1338 net/sunrpc/xprtrdma/verbs.c 	if (req->rl_reply)
req              1339 net/sunrpc/xprtrdma/verbs.c 		rpcrdma_rep_put(buffers, req->rl_reply);
req              1340 net/sunrpc/xprtrdma/verbs.c 	req->rl_reply = NULL;
req              1343 net/sunrpc/xprtrdma/verbs.c 	list_add(&req->rl_list, &buffers->rb_send_bufs);
req              1470 net/sunrpc/xprtrdma/verbs.c 		struct rpcrdma_req *req)
req              1472 net/sunrpc/xprtrdma/verbs.c 	struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
req              1475 net/sunrpc/xprtrdma/verbs.c 	if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
req              1483 net/sunrpc/xprtrdma/verbs.c 	rc = frwr_send(ia, req);
req              1484 net/sunrpc/xprtrdma/verbs.c 	trace_xprtrdma_post_send(req, rc);
req               486 net/sunrpc/xprtrdma/xprt_rdma.h void rpcrdma_req_destroy(struct rpcrdma_req *req);
req               502 net/sunrpc/xprtrdma/xprt_rdma.h 			struct rpcrdma_req *req);
req               548 net/sunrpc/xprtrdma/xprt_rdma.h void frwr_recycle(struct rpcrdma_req *req);
req               549 net/sunrpc/xprtrdma/xprt_rdma.h void frwr_reset(struct rpcrdma_req *req);
req               558 net/sunrpc/xprtrdma/xprt_rdma.h int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
req               560 net/sunrpc/xprtrdma/xprt_rdma.h void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
req               561 net/sunrpc/xprtrdma/xprt_rdma.h void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
req               576 net/sunrpc/xprtrdma/xprt_rdma.h 			      struct rpcrdma_req *req, u32 hdrlen,
req               495 net/sunrpc/xprtsock.c 		int flags, struct rpc_rqst *req)
req               497 net/sunrpc/xprtsock.c 	struct xdr_buf *buf = &req->rq_private_buf;
req               556 net/sunrpc/xprtsock.c 	struct rpc_rqst *req;
req               560 net/sunrpc/xprtsock.c 	req = xprt_lookup_bc_request(xprt, transport->recv.xid);
req               561 net/sunrpc/xprtsock.c 	if (!req) {
req               565 net/sunrpc/xprtsock.c 	if (transport->recv.copied && !req->rq_private_buf.len)
req               568 net/sunrpc/xprtsock.c 	ret = xs_read_stream_request(transport, msg, flags, req);
req               570 net/sunrpc/xprtsock.c 		xprt_complete_bc_request(req, transport->recv.copied);
req               572 net/sunrpc/xprtsock.c 		req->rq_private_buf.len = transport->recv.copied;
req               588 net/sunrpc/xprtsock.c 	struct rpc_rqst *req;
req               593 net/sunrpc/xprtsock.c 	req = xprt_lookup_rqst(xprt, transport->recv.xid);
req               594 net/sunrpc/xprtsock.c 	if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
req               598 net/sunrpc/xprtsock.c 	xprt_pin_rqst(req);
req               601 net/sunrpc/xprtsock.c 	ret = xs_read_stream_request(transport, msg, flags, req);
req               605 net/sunrpc/xprtsock.c 		xprt_complete_rqst(req->rq_task, transport->recv.copied);
req               607 net/sunrpc/xprtsock.c 		req->rq_private_buf.len = transport->recv.copied;
req               608 net/sunrpc/xprtsock.c 	xprt_unpin_rqst(req);
req               876 net/sunrpc/xprtsock.c static int xs_nospace(struct rpc_rqst *req)
req               878 net/sunrpc/xprtsock.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               884 net/sunrpc/xprtsock.c 			req->rq_task->tk_pid,
req               885 net/sunrpc/xprtsock.c 			req->rq_slen - transport->xmit.offset,
req               886 net/sunrpc/xprtsock.c 			req->rq_slen);
req               916 net/sunrpc/xprtsock.c xs_stream_prepare_request(struct rpc_rqst *req)
req               918 net/sunrpc/xprtsock.c 	xdr_free_bvec(&req->rq_rcv_buf);
req               919 net/sunrpc/xprtsock.c 	req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
req               927 net/sunrpc/xprtsock.c xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
req               929 net/sunrpc/xprtsock.c 	return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
req               954 net/sunrpc/xprtsock.c static int xs_local_send_request(struct rpc_rqst *req)
req               956 net/sunrpc/xprtsock.c 	struct rpc_xprt *xprt = req->rq_xprt;
req               959 net/sunrpc/xprtsock.c 	struct xdr_buf *xdr = &req->rq_snd_buf;
req               961 net/sunrpc/xprtsock.c 	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
req               966 net/sunrpc/xprtsock.c 	if (xs_send_request_was_aborted(transport, req)) {
req               972 net/sunrpc/xprtsock.c 			req->rq_svec->iov_base, req->rq_svec->iov_len);
req               974 net/sunrpc/xprtsock.c 	req->rq_xtime = ktime_get();
req               985 net/sunrpc/xprtsock.c 		req->rq_bytes_sent = transport->xmit.offset;
req               986 net/sunrpc/xprtsock.c 		if (likely(req->rq_bytes_sent >= msglen)) {
req               987 net/sunrpc/xprtsock.c 			req->rq_xmit_bytes_sent += transport->xmit.offset;
req               998 net/sunrpc/xprtsock.c 		status = xs_nospace(req);
req              1023 net/sunrpc/xprtsock.c static int xs_udp_send_request(struct rpc_rqst *req)
req              1025 net/sunrpc/xprtsock.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1027 net/sunrpc/xprtsock.c 	struct xdr_buf *xdr = &req->rq_snd_buf;
req              1032 net/sunrpc/xprtsock.c 				req->rq_svec->iov_base,
req              1033 net/sunrpc/xprtsock.c 				req->rq_svec->iov_len);
req              1038 net/sunrpc/xprtsock.c 	if (!xprt_request_get_cong(xprt, req))
req              1041 net/sunrpc/xprtsock.c 	req->rq_xtime = ktime_get();
req              1056 net/sunrpc/xprtsock.c 		req->rq_xmit_bytes_sent += sent;
req              1057 net/sunrpc/xprtsock.c 		if (sent >= req->rq_slen)
req              1070 net/sunrpc/xprtsock.c 		status = xs_nospace(req);
req              1102 net/sunrpc/xprtsock.c static int xs_tcp_send_request(struct rpc_rqst *req)
req              1104 net/sunrpc/xprtsock.c 	struct rpc_xprt *xprt = req->rq_xprt;
req              1106 net/sunrpc/xprtsock.c 	struct xdr_buf *xdr = &req->rq_snd_buf;
req              1108 net/sunrpc/xprtsock.c 	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
req              1114 net/sunrpc/xprtsock.c 	if (xs_send_request_was_aborted(transport, req)) {
req              1121 net/sunrpc/xprtsock.c 				req->rq_svec->iov_base,
req              1122 net/sunrpc/xprtsock.c 				req->rq_svec->iov_len);
req              1130 net/sunrpc/xprtsock.c 	req->rq_xtime = ktime_get();
req              1142 net/sunrpc/xprtsock.c 		req->rq_bytes_sent = transport->xmit.offset;
req              1143 net/sunrpc/xprtsock.c 		if (likely(req->rq_bytes_sent >= msglen)) {
req              1144 net/sunrpc/xprtsock.c 			req->rq_xmit_bytes_sent += transport->xmit.offset;
req              1183 net/sunrpc/xprtsock.c 		status = xs_nospace(req);
req              2643 net/sunrpc/xprtsock.c static int bc_sendto(struct rpc_rqst *req)
req              2646 net/sunrpc/xprtsock.c 	struct xdr_buf *xbufp = &req->rq_snd_buf;
req              2648 net/sunrpc/xprtsock.c 			container_of(req->rq_xprt, struct sock_xprt, xprt);
req              2662 net/sunrpc/xprtsock.c 	req->rq_xtime = ktime_get();
req              2684 net/sunrpc/xprtsock.c static int bc_send_request(struct rpc_rqst *req)
req              2692 net/sunrpc/xprtsock.c 	xprt = req->rq_xprt->bc_xprt;
req              2702 net/sunrpc/xprtsock.c 		len = bc_sendto(req);
req                53 net/tipc/diag.c 	struct tipc_sock_diag_req *req = nlmsg_data(cb->nlh);
req                62 net/tipc/diag.c 	err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
req                44 net/tipc/discover.h void tipc_disc_delete(struct tipc_discoverer *req);
req                46 net/tipc/discover.h void tipc_disc_add_dest(struct tipc_discoverer *req);
req                47 net/tipc/discover.h void tipc_disc_remove_dest(struct tipc_discoverer *req);
req                61 net/tipc/netlink_compat.c 	struct tlv_desc *req;
req               262 net/tipc/netlink_compat.c 			      !TLV_CHECK_TYPE(msg->req, msg->req_type)))
req               360 net/tipc/netlink_compat.c 			      !TLV_CHECK_TYPE(msg->req, msg->req_type)))
req               403 net/tipc/netlink_compat.c 	b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
req               409 net/tipc/netlink_compat.c 	len = TLV_GET_DATA_LEN(msg->req);
req               445 net/tipc/netlink_compat.c 	name = (char *)TLV_DATA(msg->req);
req               451 net/tipc/netlink_compat.c 	len = TLV_GET_DATA_LEN(msg->req);
req               547 net/tipc/netlink_compat.c 	name = (char *)TLV_DATA(msg->req);
req               549 net/tipc/netlink_compat.c 	len = TLV_GET_DATA_LEN(msg->req);
req               698 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
req               725 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
req               752 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
req               781 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
req               783 net/tipc/netlink_compat.c 	len = TLV_GET_DATA_LEN(msg->req);
req               815 net/tipc/netlink_compat.c 	name = (char *)TLV_DATA(msg->req);
req               821 net/tipc/netlink_compat.c 	len = TLV_GET_DATA_LEN(msg->req);
req               849 net/tipc/netlink_compat.c 	ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
req               850 net/tipc/netlink_compat.c 	if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
req               894 net/tipc/netlink_compat.c 	ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
req              1111 net/tipc/netlink_compat.c 	val = ntohl(*(__be32 *)TLV_DATA(msg->req));
req              1273 net/tipc/netlink_compat.c 	msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
req              1285 net/tipc/netlink_compat.c 	if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
req               151 net/tls/tls_sw.c static void tls_decrypt_done(struct crypto_async_request *req, int err)
req               153 net/tls/tls_sw.c 	struct aead_request *aead_req = (struct aead_request *)req;
req               164 net/tls/tls_sw.c 	skb = (struct sk_buff *)req->data;
req               423 net/tls/tls_sw.c static void tls_encrypt_done(struct crypto_async_request *req, int err)
req               425 net/tls/tls_sw.c 	struct aead_request *aead_req = (struct aead_request *)req;
req               426 net/tls/tls_sw.c 	struct sock *sk = req->data;
req               672 net/tls/tls_sw.c 	struct aead_request *req;
req               715 net/tls/tls_sw.c 	req = &rec->aead_req;
req               759 net/tls/tls_sw.c 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
req                78 net/unix/diag.c 			struct sock *req, *peer;
req                80 net/unix/diag.c 			req = skb->sk;
req                86 net/unix/diag.c 			unix_state_lock_nested(req);
req                87 net/unix/diag.c 			peer = unix_sk(req)->peer;
req                89 net/unix/diag.c 			unix_state_unlock(req);
req               122 net/unix/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
req               141 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_NAME) &&
req               145 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_VFS) &&
req               149 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_PEER) &&
req               153 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
req               157 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
req               161 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
req               168 net/unix/diag.c 	if ((req->udiag_show & UDIAG_SHOW_UID) &&
req               180 net/unix/diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
req               192 net/unix/diag.c 	return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
req               197 net/unix/diag.c 	struct unix_diag_req *req;
req               201 net/unix/diag.c 	req = nlmsg_data(cb->nlh);
req               218 net/unix/diag.c 			if (!(req->udiag_states & (1 << sk->sk_state)))
req               220 net/unix/diag.c 			if (sk_diag_dump(sk, skb, req,
req               259 net/unix/diag.c 			       struct unix_diag_req *req)
req               267 net/unix/diag.c 	if (req->udiag_ino == 0)
req               270 net/unix/diag.c 	sk = unix_lookup_by_ino(req->udiag_ino);
req               277 net/unix/diag.c 	err = sock_diag_check_cookie(sk, req->udiag_cookie);
req               288 net/unix/diag.c 	err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
req               289 net/unix/diag.c 			   nlh->nlmsg_seq, 0, req->udiag_ino);
req                50 net/vmw_vsock/diag.c 	struct vsock_diag_req *req;
req                58 net/vmw_vsock/diag.c 	req = nlmsg_data(cb->nlh);
req                83 net/vmw_vsock/diag.c 				if (!(req->vdiag_states & (1 << sk->sk_state)))
req               117 net/vmw_vsock/diag.c 			if (!(req->vdiag_states & (1 << sk->sk_state)))
req               351 net/wireless/core.c 	struct cfg80211_sched_scan_request *req, *tmp;
req               357 net/wireless/core.c 	list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
req               358 net/wireless/core.c 		if (req->nl_owner_dead)
req               359 net/wireless/core.c 			cfg80211_stop_sched_scan_req(rdev, req, false);
req               375 net/wireless/core.h 			struct cfg80211_assoc_request *req);
req               442 net/wireless/core.h 				 struct cfg80211_sched_scan_request *req);
req               447 net/wireless/core.h 				 struct cfg80211_sched_scan_request *req,
req               194 net/wireless/lib80211_crypt_ccmp.c 	struct aead_request *req;
req               209 net/wireless/lib80211_crypt_ccmp.c 	req = aead_request_alloc(key->tfm, GFP_ATOMIC);
req               210 net/wireless/lib80211_crypt_ccmp.c 	if (!req)
req               223 net/wireless/lib80211_crypt_ccmp.c 	aead_request_set_callback(req, 0, NULL, NULL);
req               224 net/wireless/lib80211_crypt_ccmp.c 	aead_request_set_ad(req, aad_len);
req               225 net/wireless/lib80211_crypt_ccmp.c 	aead_request_set_crypt(req, sg, sg, data_len, iv);
req               227 net/wireless/lib80211_crypt_ccmp.c 	ret = crypto_aead_encrypt(req);
req               228 net/wireless/lib80211_crypt_ccmp.c 	aead_request_free(req);
req               259 net/wireless/lib80211_crypt_ccmp.c 	struct aead_request *req;
req               313 net/wireless/lib80211_crypt_ccmp.c 	req = aead_request_alloc(key->tfm, GFP_ATOMIC);
req               314 net/wireless/lib80211_crypt_ccmp.c 	if (!req)
req               323 net/wireless/lib80211_crypt_ccmp.c 	aead_request_set_callback(req, 0, NULL, NULL);
req               324 net/wireless/lib80211_crypt_ccmp.c 	aead_request_set_ad(req, aad_len);
req               325 net/wireless/lib80211_crypt_ccmp.c 	aead_request_set_crypt(req, sg, sg, data_len, iv);
req               327 net/wireless/lib80211_crypt_ccmp.c 	ret = crypto_aead_decrypt(req);
req               328 net/wireless/lib80211_crypt_ccmp.c 	aead_request_free(req);
req               232 net/wireless/mlme.c 	struct cfg80211_auth_request req = {
req               254 net/wireless/mlme.c 	req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
req               257 net/wireless/mlme.c 	if (!req.bss)
req               260 net/wireless/mlme.c 	err = rdev_auth(rdev, dev, &req);
req               262 net/wireless/mlme.c 	cfg80211_put_bss(&rdev->wiphy, req.bss);
req               305 net/wireless/mlme.c 			struct cfg80211_assoc_request *req)
req               313 net/wireless/mlme.c 	    (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid,
req               314 net/wireless/mlme.c 						   req->prev_bssid)))
req               317 net/wireless/mlme.c 	cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
req               319 net/wireless/mlme.c 	cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
req               322 net/wireless/mlme.c 	req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
req               325 net/wireless/mlme.c 	if (!req->bss)
req               328 net/wireless/mlme.c 	err = rdev_assoc(rdev, dev, req);
req               330 net/wireless/mlme.c 		cfg80211_hold_bss(bss_from_pub(req->bss));
req               332 net/wireless/mlme.c 		cfg80211_put_bss(&rdev->wiphy, req->bss);
req               343 net/wireless/mlme.c 	struct cfg80211_deauth_request req = {
req               363 net/wireless/mlme.c 	return rdev_deauth(rdev, dev, &req);
req               372 net/wireless/mlme.c 	struct cfg80211_disassoc_request req = {
req               386 net/wireless/mlme.c 		req.bss = &wdev->current_bss->pub;
req               390 net/wireless/mlme.c 	err = rdev_disassoc(rdev, dev, &req);
req              7436 net/wireless/nl80211.c 		struct cfg80211_sched_scan_request *req = request;
req              7441 net/wireless/nl80211.c 		flags = &req->flags;
req              7442 net/wireless/nl80211.c 		mac_addr = req->mac_addr;
req              7443 net/wireless/nl80211.c 		mac_addr_mask = req->mac_addr_mask;
req              7445 net/wireless/nl80211.c 		struct cfg80211_scan_request *req = request;
req              7448 net/wireless/nl80211.c 		flags = &req->flags;
req              7449 net/wireless/nl80211.c 		mac_addr = req->mac_addr;
req              7450 net/wireless/nl80211.c 		mac_addr_mask = req->mac_addr_mask;
req              8301 net/wireless/nl80211.c 	struct cfg80211_sched_scan_request *req;
req              8313 net/wireless/nl80211.c 	req = list_first_or_null_rcu(&rdev->sched_scan_req_list,
req              8316 net/wireless/nl80211.c 	if (!req || req->reqid ||
req              8317 net/wireless/nl80211.c 	    (req->owner_nlportid &&
req              8318 net/wireless/nl80211.c 	     req->owner_nlportid != info->snd_portid))
req              8321 net/wireless/nl80211.c 	return cfg80211_stop_sched_scan_req(rdev, req, false);
req              9187 net/wireless/nl80211.c 	struct cfg80211_assoc_request req = {};
req              9218 net/wireless/nl80211.c 		req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
req              9219 net/wireless/nl80211.c 		req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
req              9226 net/wireless/nl80211.c 			req.use_mfp = true;
req              9232 net/wireless/nl80211.c 		req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
req              9235 net/wireless/nl80211.c 		req.flags |= ASSOC_REQ_DISABLE_HT;
req              9238 net/wireless/nl80211.c 		memcpy(&req.ht_capa_mask,
req              9240 net/wireless/nl80211.c 		       sizeof(req.ht_capa_mask));
req              9245 net/wireless/nl80211.c 		memcpy(&req.ht_capa,
req              9247 net/wireless/nl80211.c 		       sizeof(req.ht_capa));
req              9251 net/wireless/nl80211.c 		req.flags |= ASSOC_REQ_DISABLE_VHT;
req              9254 net/wireless/nl80211.c 		memcpy(&req.vht_capa_mask,
req              9256 net/wireless/nl80211.c 		       sizeof(req.vht_capa_mask));
req              9261 net/wireless/nl80211.c 		memcpy(&req.vht_capa,
req              9263 net/wireless/nl80211.c 		       sizeof(req.vht_capa));
req              9273 net/wireless/nl80211.c 		req.flags |= ASSOC_REQ_USE_RRM;
req              9277 net/wireless/nl80211.c 		req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]);
req              9278 net/wireless/nl80211.c 		req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]);
req              9281 net/wireless/nl80211.c 		req.fils_nonces =
req              9285 net/wireless/nl80211.c 	err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
req              9290 net/wireless/nl80211.c 					  ssid, ssid_len, &req);
req              11214 net/wireless/nl80211.c 				  struct cfg80211_sched_scan_request *req)
req              11219 net/wireless/nl80211.c 	if (!req)
req              11226 net/wireless/nl80211.c 	if (req->n_scan_plans == 1 &&
req              11228 net/wireless/nl80211.c 			req->scan_plans[0].interval * 1000))
req              11231 net/wireless/nl80211.c 	if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
req              11234 net/wireless/nl80211.c 	if (req->relative_rssi_set) {
req              11238 net/wireless/nl80211.c 			       req->relative_rssi))
req              11241 net/wireless/nl80211.c 		rssi_adjust.band = req->rssi_adjust.band;
req              11242 net/wireless/nl80211.c 		rssi_adjust.delta = req->rssi_adjust.delta;
req              11252 net/wireless/nl80211.c 	for (i = 0; i < req->n_channels; i++) {
req              11253 net/wireless/nl80211.c 		if (nla_put_u32(msg, i, req->channels[i]->center_freq))
req              11259 net/wireless/nl80211.c 	if (req->n_match_sets) {
req              11265 net/wireless/nl80211.c 		for (i = 0; i < req->n_match_sets; i++) {
req              11271 net/wireless/nl80211.c 				    req->match_sets[i].ssid.ssid_len,
req              11272 net/wireless/nl80211.c 				    req->match_sets[i].ssid.ssid))
req              11283 net/wireless/nl80211.c 	for (i = 0; i < req->n_scan_plans; i++) {
req              11289 net/wireless/nl80211.c 				req->scan_plans[i].interval) ||
req              11290 net/wireless/nl80211.c 		    (req->scan_plans[i].iterations &&
req              11292 net/wireless/nl80211.c 				 req->scan_plans[i].iterations)))
req              14819 net/wireless/nl80211.c 	struct cfg80211_scan_request *req = rdev->scan_req;
req              14823 net/wireless/nl80211.c 	if (WARN_ON(!req))
req              14829 net/wireless/nl80211.c 	for (i = 0; i < req->n_ssids; i++) {
req              14830 net/wireless/nl80211.c 		if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
req              14838 net/wireless/nl80211.c 	for (i = 0; i < req->n_channels; i++) {
req              14839 net/wireless/nl80211.c 		if (nla_put_u32(msg, i, req->channels[i]->center_freq))
req              14844 net/wireless/nl80211.c 	if (req->ie &&
req              14845 net/wireless/nl80211.c 	    nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
req              14848 net/wireless/nl80211.c 	if (req->flags &&
req              14849 net/wireless/nl80211.c 	    nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
req              14852 net/wireless/nl80211.c 	if (req->info.scan_start_tsf &&
req              14854 net/wireless/nl80211.c 			       req->info.scan_start_tsf, NL80211_BSS_PAD) ||
req              14856 net/wireless/nl80211.c 		     req->info.tsf_bssid)))
req              14896 net/wireless/nl80211.c 			    struct cfg80211_sched_scan_request *req, u32 cmd)
req              14905 net/wireless/nl80211.c 			wiphy_to_rdev(req->wiphy)->wiphy_idx) ||
req              14906 net/wireless/nl80211.c 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, req->dev->ifindex) ||
req              14907 net/wireless/nl80211.c 	    nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->reqid,
req              14968 net/wireless/nl80211.c void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd)
req              14976 net/wireless/nl80211.c 	if (nl80211_prep_sched_scan_msg(msg, req, cmd) < 0) {
req              14981 net/wireless/nl80211.c 	genlmsg_multicast_netns(&nl80211_fam, wiphy_net(req->wiphy), msg, 0,
req                48 net/wireless/nl80211.h void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd);
req               138 net/wireless/pmsr.c 	struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
req               170 net/wireless/pmsr.c 	nla_parse_nested_deprecated(req, NL80211_PMSR_REQ_ATTR_MAX,
req               174 net/wireless/pmsr.c 	if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
req               181 net/wireless/pmsr.c 	if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
req               186 net/wireless/pmsr.c 				    req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
req               191 net/wireless/pmsr.c 	nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
req               214 net/wireless/pmsr.c 	struct cfg80211_pmsr_request *req;
req               240 net/wireless/pmsr.c 	req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
req               241 net/wireless/pmsr.c 	if (!req)
req               245 net/wireless/pmsr.c 		req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
req               256 net/wireless/pmsr.c 		err = nl80211_parse_random_mac(info->attrs, req->mac_addr,
req               257 net/wireless/pmsr.c 					       req->mac_addr_mask);
req               261 net/wireless/pmsr.c 		memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
req               262 net/wireless/pmsr.c 		eth_broadcast_addr(req->mac_addr_mask);
req               268 net/wireless/pmsr.c 		err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
req               274 net/wireless/pmsr.c 	req->n_peers = count;
req               275 net/wireless/pmsr.c 	req->cookie = cfg80211_assign_cookie(rdev);
req               276 net/wireless/pmsr.c 	req->nl_portid = info->snd_portid;
req               278 net/wireless/pmsr.c 	err = rdev_start_pmsr(rdev, wdev, req);
req               282 net/wireless/pmsr.c 	list_add_tail(&req->list, &wdev->pmsr_list);
req               284 net/wireless/pmsr.c 	nl_set_extack_cookie_u64(info->extack, req->cookie);
req               287 net/wireless/pmsr.c 	kfree(req);
req               292 net/wireless/pmsr.c 			    struct cfg80211_pmsr_request *req,
req               299 net/wireless/pmsr.c 	trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie);
req               315 net/wireless/pmsr.c 	if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
req               320 net/wireless/pmsr.c 	genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
req               326 net/wireless/pmsr.c 	list_del(&req->list);
req               328 net/wireless/pmsr.c 	kfree(req);
req               487 net/wireless/pmsr.c 			  struct cfg80211_pmsr_request *req,
req               496 net/wireless/pmsr.c 	trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie,
req               517 net/wireless/pmsr.c 	if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
req               528 net/wireless/pmsr.c 	genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
req               538 net/wireless/pmsr.c 	struct cfg80211_pmsr_request *req, *tmp;
req               544 net/wireless/pmsr.c 	list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
req               545 net/wireless/pmsr.c 		if (req->nl_portid)
req               547 net/wireless/pmsr.c 		list_move_tail(&req->list, &free_list);
req               551 net/wireless/pmsr.c 	list_for_each_entry_safe(req, tmp, &free_list, list) {
req               552 net/wireless/pmsr.c 		rdev_abort_pmsr(rdev, wdev, req);
req               554 net/wireless/pmsr.c 		kfree(req);
req               570 net/wireless/pmsr.c 	struct cfg80211_pmsr_request *req;
req               574 net/wireless/pmsr.c 	list_for_each_entry(req, &wdev->pmsr_list, list) {
req               576 net/wireless/pmsr.c 		req->nl_portid = 0;
req               588 net/wireless/pmsr.c 	struct cfg80211_pmsr_request *req;
req               591 net/wireless/pmsr.c 	list_for_each_entry(req, &wdev->pmsr_list, list) {
req               592 net/wireless/pmsr.c 		if (req->nl_portid == portid) {
req               593 net/wireless/pmsr.c 			req->nl_portid = 0;
req               441 net/wireless/rdev-ops.h 			    struct cfg80211_auth_request *req)
req               444 net/wireless/rdev-ops.h 	trace_rdev_auth(&rdev->wiphy, dev, req);
req               445 net/wireless/rdev-ops.h 	ret = rdev->ops->auth(&rdev->wiphy, dev, req);
req               452 net/wireless/rdev-ops.h 			     struct cfg80211_assoc_request *req)
req               455 net/wireless/rdev-ops.h 	trace_rdev_assoc(&rdev->wiphy, dev, req);
req               456 net/wireless/rdev-ops.h 	ret = rdev->ops->assoc(&rdev->wiphy, dev, req);
req               463 net/wireless/rdev-ops.h 			      struct cfg80211_deauth_request *req)
req               466 net/wireless/rdev-ops.h 	trace_rdev_deauth(&rdev->wiphy, dev, req);
req               467 net/wireless/rdev-ops.h 	ret = rdev->ops->deauth(&rdev->wiphy, dev, req);
req               474 net/wireless/rdev-ops.h 				struct cfg80211_disassoc_request *req)
req               477 net/wireless/rdev-ops.h 	trace_rdev_disassoc(&rdev->wiphy, dev, req);
req               478 net/wireless/rdev-ops.h 	ret = rdev->ops->disassoc(&rdev->wiphy, dev, req);
req               538 net/wireless/scan.c 				 struct cfg80211_sched_scan_request *req)
req               542 net/wireless/scan.c 	list_add_rcu(&req->list, &rdev->sched_scan_req_list);
req               546 net/wireless/scan.c 					struct cfg80211_sched_scan_request *req)
req               550 net/wireless/scan.c 	list_del_rcu(&req->list);
req               551 net/wireless/scan.c 	kfree_rcu(req, rcu_head);
req               604 net/wireless/scan.c 	struct cfg80211_sched_scan_request *req, *tmp;
req               610 net/wireless/scan.c 	list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
req               611 net/wireless/scan.c 		if (req->report_results) {
req               612 net/wireless/scan.c 			req->report_results = false;
req               613 net/wireless/scan.c 			if (req->flags & NL80211_SCAN_FLAG_FLUSH) {
req               616 net/wireless/scan.c 				__cfg80211_bss_expire(rdev, req->scan_start);
req               618 net/wireless/scan.c 				req->scan_start = jiffies;
req               620 net/wireless/scan.c 			nl80211_send_sched_scan(req,
req               666 net/wireless/scan.c 				 struct cfg80211_sched_scan_request *req,
req               672 net/wireless/scan.c 		int err = rdev_sched_scan_stop(rdev, req->dev, req->reqid);
req               677 net/wireless/scan.c 	nl80211_send_sched_scan(req, NL80211_CMD_SCHED_SCAN_STOPPED);
req               679 net/wireless/scan.c 	cfg80211_del_sched_scan_req(rdev, req);
req               151 net/wireless/sme.c 	struct cfg80211_assoc_request req = {};
req               186 net/wireless/sme.c 			req.prev_bssid = wdev->conn->prev_bssid;
req               187 net/wireless/sme.c 		req.ie = params->ie;
req               188 net/wireless/sme.c 		req.ie_len = params->ie_len;
req               189 net/wireless/sme.c 		req.use_mfp = params->mfp != NL80211_MFP_NO;
req               190 net/wireless/sme.c 		req.crypto = params->crypto;
req               191 net/wireless/sme.c 		req.flags = params->flags;
req               192 net/wireless/sme.c 		req.ht_capa = params->ht_capa;
req               193 net/wireless/sme.c 		req.ht_capa_mask = params->ht_capa_mask;
req               194 net/wireless/sme.c 		req.vht_capa = params->vht_capa;
req               195 net/wireless/sme.c 		req.vht_capa_mask = params->vht_capa_mask;
req               199 net/wireless/sme.c 					  params->ssid_len, &req);
req              1149 net/wireless/trace.h 		 struct cfg80211_auth_request *req),
req              1150 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, req),
req              1160 net/wireless/trace.h 		if (req->bss)
req              1161 net/wireless/trace.h 			MAC_ASSIGN(bssid, req->bss->bssid);
req              1164 net/wireless/trace.h 		__entry->auth_type = req->auth_type;
req              1173 net/wireless/trace.h 		 struct cfg80211_assoc_request *req),
req              1174 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, req),
req              1186 net/wireless/trace.h 		if (req->bss)
req              1187 net/wireless/trace.h 			MAC_ASSIGN(bssid, req->bss->bssid);
req              1190 net/wireless/trace.h 		MAC_ASSIGN(prev_bssid, req->prev_bssid);
req              1191 net/wireless/trace.h 		__entry->use_mfp = req->use_mfp;
req              1192 net/wireless/trace.h 		__entry->flags = req->flags;
req              1203 net/wireless/trace.h 		 struct cfg80211_deauth_request *req),
req              1204 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, req),
req              1214 net/wireless/trace.h 		MAC_ASSIGN(bssid, req->bssid);
req              1215 net/wireless/trace.h 		__entry->reason_code = req->reason_code;
req              1224 net/wireless/trace.h 		 struct cfg80211_disassoc_request *req),
req              1225 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, req),
req              1236 net/wireless/trace.h 		if (req->bss)
req              1237 net/wireless/trace.h 			MAC_ASSIGN(bssid, req->bss->bssid);
req              1240 net/wireless/trace.h 		__entry->reason_code = req->reason_code;
req              1241 net/wireless/trace.h 		__entry->local_state_change = req->local_state_change;
req                80 net/xdp/xsk_diag.c 			 struct xdp_diag_req *req,
req               101 net/xdp/xsk_diag.c 	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
req               104 net/xdp/xsk_diag.c 	if ((req->xdiag_show & XDP_SHOW_INFO) &&
req               109 net/xdp/xsk_diag.c 	if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
req               113 net/xdp/xsk_diag.c 	if ((req->xdiag_show & XDP_SHOW_UMEM) &&
req               117 net/xdp/xsk_diag.c 	if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
req               133 net/xdp/xsk_diag.c 	struct xdp_diag_req *req = nlmsg_data(cb->nlh);
req               146 net/xdp/xsk_diag.c 		if (xsk_diag_fill(sk, nlskb, req,
req               326 samples/bpf/xdp_router_ipv4_user.c 	} req;
req               340 samples/bpf/xdp_router_ipv4_user.c 	memset(&req, 0, sizeof(req));
req               341 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
req               342 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
req               343 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_type = RTM_GETROUTE;
req               345 samples/bpf/xdp_router_ipv4_user.c 	req.rt.rtm_family = rtm_family;
req               346 samples/bpf/xdp_router_ipv4_user.c 	req.rt.rtm_table = RT_TABLE_MAIN;
req               347 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_pid = 0;
req               348 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_seq = ++seq;
req               350 samples/bpf/xdp_router_ipv4_user.c 	iov.iov_base = (void *)&req.nl;
req               351 samples/bpf/xdp_router_ipv4_user.c 	iov.iov_len = req.nl.nlmsg_len;
req               465 samples/bpf/xdp_router_ipv4_user.c 	} req;
req               479 samples/bpf/xdp_router_ipv4_user.c 	memset(&req, 0, sizeof(req));
req               480 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
req               481 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
req               482 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_type = RTM_GETNEIGH;
req               483 samples/bpf/xdp_router_ipv4_user.c 	req.rt.ndm_state = NUD_REACHABLE;
req               484 samples/bpf/xdp_router_ipv4_user.c 	req.rt.ndm_family = rtm_family;
req               485 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_pid = 0;
req               486 samples/bpf/xdp_router_ipv4_user.c 	req.nl.nlmsg_seq = ++seq;
req               488 samples/bpf/xdp_router_ipv4_user.c 	iov.iov_base = (void *)&req.nl;
req               489 samples/bpf/xdp_router_ipv4_user.c 	iov.iov_len = req.nl.nlmsg_len;
req                42 samples/connector/cn_test.c 	struct cn_notify_req *req;
req                49 samples/connector/cn_test.c 	size0 = sizeof(*msg) + sizeof(*ctl) + 3 * sizeof(*req);
req                82 samples/connector/cn_test.c 	req = (struct cn_notify_req *)(ctl + 1);
req                87 samples/connector/cn_test.c 	req->first = cn_test_id.idx;
req                88 samples/connector/cn_test.c 	req->range = 10;
req                93 samples/connector/cn_test.c 	req++;
req                94 samples/connector/cn_test.c 	req->first = cn_test_id.val;
req                95 samples/connector/cn_test.c 	req->range = 10;
req               100 samples/connector/cn_test.c 	req++;
req               101 samples/connector/cn_test.c 	req->first = cn_test_id.val + 20;
req               102 samples/connector/cn_test.c 	req->range = 10;
req               305 samples/qmi/qmi_sample_client.c 	struct test_ping_req_msg_v01 req = {};
req               309 samples/qmi/qmi_sample_client.c 	memcpy(req.ping, "ping", sizeof(req.ping));
req               318 samples/qmi/qmi_sample_client.c 			       test_ping_req_msg_v01_ei, &req);
req               374 samples/qmi/qmi_sample_client.c 	struct test_data_req_msg_v01 *req;
req               378 samples/qmi/qmi_sample_client.c 	req = kzalloc(sizeof(*req), GFP_KERNEL);
req               379 samples/qmi/qmi_sample_client.c 	if (!req)
req               384 samples/qmi/qmi_sample_client.c 		kfree(req);
req               388 samples/qmi/qmi_sample_client.c 	req->data_len = min_t(size_t, sizeof(req->data), count);
req               389 samples/qmi/qmi_sample_client.c 	if (copy_from_user(req->data, user_buf, req->data_len)) {
req               401 samples/qmi/qmi_sample_client.c 			       test_data_req_msg_v01_ei, req);
req               411 samples/qmi/qmi_sample_client.c 		   resp->data_len != req->data_len ||
req               412 samples/qmi/qmi_sample_client.c 		   memcmp(resp->data, req->data, req->data_len)) {
req               422 samples/qmi/qmi_sample_client.c 	kfree(req);
req               104 samples/seccomp/user-trap.c static int handle_req(struct seccomp_notif *req,
req               110 samples/seccomp/user-trap.c 	resp->id = req->id;
req               114 samples/seccomp/user-trap.c 	if (req->data.nr != __NR_mount) {
req               115 samples/seccomp/user-trap.c 		fprintf(stderr, "huh? trapped something besides mount? %d\n", req->data.nr);
req               120 samples/seccomp/user-trap.c 	if (!(req->data.args[3] & MS_BIND))
req               127 samples/seccomp/user-trap.c 	snprintf(path, sizeof(path), "/proc/%d/mem", req->pid);
req               145 samples/seccomp/user-trap.c 	if (ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req->id) < 0) {
req               155 samples/seccomp/user-trap.c 	if (lseek(mem, req->data.args[0], SEEK_SET) < 0) {
req               166 samples/seccomp/user-trap.c 	if (lseek(mem, req->data.args[1], SEEK_SET) < 0) {
req               183 samples/seccomp/user-trap.c 		if (mount(source, target, NULL, req->data.args[3], NULL) < 0) {
req               289 samples/seccomp/user-trap.c 		struct seccomp_notif *req;
req               298 samples/seccomp/user-trap.c 		req = malloc(sizes.seccomp_notif);
req               299 samples/seccomp/user-trap.c 		if (!req)
req               308 samples/seccomp/user-trap.c 			memset(req, 0, sizes.seccomp_notif);
req               309 samples/seccomp/user-trap.c 			if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
req               314 samples/seccomp/user-trap.c 			if (handle_req(req, resp, listener) < 0)
req               335 samples/seccomp/user-trap.c 		free(req);
req              1133 security/apparmor/lsm.c 				      struct request_sock *req)
req               211 security/integrity/ima/ima_crypto.c 	struct ahash_request *req;
req               218 security/integrity/ima/ima_crypto.c 	req = ahash_request_alloc(tfm, GFP_KERNEL);
req               219 security/integrity/ima/ima_crypto.c 	if (!req)
req               223 security/integrity/ima/ima_crypto.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               227 security/integrity/ima/ima_crypto.c 	rc = ahash_wait(crypto_ahash_init(req), &wait);
req               293 security/integrity/ima/ima_crypto.c 		ahash_request_set_crypt(req, sg, NULL, rbuf_len);
req               295 security/integrity/ima/ima_crypto.c 		ahash_rc = crypto_ahash_update(req);
req               307 security/integrity/ima/ima_crypto.c 		ahash_request_set_crypt(req, NULL, hash->digest, 0);
req               308 security/integrity/ima/ima_crypto.c 		rc = ahash_wait(crypto_ahash_final(req), &wait);
req               311 security/integrity/ima/ima_crypto.c 	ahash_request_free(req);
req               535 security/integrity/ima/ima_crypto.c 	struct ahash_request *req;
req               542 security/integrity/ima/ima_crypto.c 	req = ahash_request_alloc(tfm, GFP_KERNEL);
req               543 security/integrity/ima/ima_crypto.c 	if (!req)
req               547 security/integrity/ima/ima_crypto.c 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               551 security/integrity/ima/ima_crypto.c 	rc = ahash_wait(crypto_ahash_init(req), &wait);
req               556 security/integrity/ima/ima_crypto.c 	ahash_request_set_crypt(req, &sg, NULL, len);
req               558 security/integrity/ima/ima_crypto.c 	ahash_rc = crypto_ahash_update(req);
req               563 security/integrity/ima/ima_crypto.c 		ahash_request_set_crypt(req, NULL, hash->digest, 0);
req               564 security/integrity/ima/ima_crypto.c 		rc = ahash_wait(crypto_ahash_final(req), &wait);
req               567 security/integrity/ima/ima_crypto.c 	ahash_request_free(req);
req                71 security/keys/dh.c static void dh_crypto_done(struct crypto_async_request *req, int err)
req                73 security/keys/dh.c 	struct dh_completion *compl = req->data;
req               240 security/keys/dh.c 	struct kpp_request *req;
req               350 security/keys/dh.c 	req = kpp_request_alloc(tfm, GFP_KERNEL);
req               351 security/keys/dh.c 	if (!req) {
req               356 security/keys/dh.c 	kpp_request_set_input(req, NULL, 0);
req               357 security/keys/dh.c 	kpp_request_set_output(req, &outsg, outlen);
req               359 security/keys/dh.c 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
req               367 security/keys/dh.c 	ret = crypto_kpp_generate_public_key(req);
req               380 security/keys/dh.c 		if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
req               387 security/keys/dh.c 					    req->dst_len + kdfcopy->otherinfolen,
req               388 security/keys/dh.c 					    outlen - req->dst_len);
req               389 security/keys/dh.c 	} else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
req               390 security/keys/dh.c 		ret = req->dst_len;
req               396 security/keys/dh.c 	kpp_request_free(req);
req               392 security/keys/encrypted-keys/encrypted.c 	struct skcipher_request *req;
req               410 security/keys/encrypted-keys/encrypted.c 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
req               411 security/keys/encrypted-keys/encrypted.c 	if (!req) {
req               418 security/keys/encrypted-keys/encrypted.c 	skcipher_request_set_callback(req, 0, NULL, NULL);
req               419 security/keys/encrypted-keys/encrypted.c 	return req;
req               465 security/keys/encrypted-keys/encrypted.c 	struct skcipher_request *req;
req               472 security/keys/encrypted-keys/encrypted.c 	req = init_skcipher_req(derived_key, derived_keylen);
req               473 security/keys/encrypted-keys/encrypted.c 	ret = PTR_ERR(req);
req               474 security/keys/encrypted-keys/encrypted.c 	if (IS_ERR(req))
req               487 security/keys/encrypted-keys/encrypted.c 	skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
req               488 security/keys/encrypted-keys/encrypted.c 	ret = crypto_skcipher_encrypt(req);
req               489 security/keys/encrypted-keys/encrypted.c 	tfm = crypto_skcipher_reqtfm(req);
req               490 security/keys/encrypted-keys/encrypted.c 	skcipher_request_free(req);
req               567 security/keys/encrypted-keys/encrypted.c 	struct skcipher_request *req;
req               579 security/keys/encrypted-keys/encrypted.c 	req = init_skcipher_req(derived_key, derived_keylen);
req               580 security/keys/encrypted-keys/encrypted.c 	ret = PTR_ERR(req);
req               581 security/keys/encrypted-keys/encrypted.c 	if (IS_ERR(req))
req               593 security/keys/encrypted-keys/encrypted.c 	skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
req               594 security/keys/encrypted-keys/encrypted.c 	ret = crypto_skcipher_decrypt(req);
req               595 security/keys/encrypted-keys/encrypted.c 	tfm = crypto_skcipher_reqtfm(req);
req               596 security/keys/encrypted-keys/encrypted.c 	skcipher_request_free(req);
req              2089 security/security.c void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
req              2091 security/security.c 	call_void_hook(req_classify_flow, req, fl);
req              2102 security/security.c 			struct sk_buff *skb, struct request_sock *req)
req              2104 security/security.c 	return call_int_hook(inet_conn_request, 0, sk, skb, req);
req              2109 security/security.c 			const struct request_sock *req)
req              2111 security/security.c 	call_void_hook(inet_csk_clone, newsk, req);
req              5362 security/selinux/hooks.c 				     struct request_sock *req)
req              5366 security/selinux/hooks.c 	u16 family = req->rsk_ops->family;
req              5376 security/selinux/hooks.c 	req->secid = connsid;
req              5377 security/selinux/hooks.c 	req->peer_secid = peersid;
req              5379 security/selinux/hooks.c 	return selinux_netlbl_inet_conn_request(req, family);
req              5383 security/selinux/hooks.c 				   const struct request_sock *req)
req              5387 security/selinux/hooks.c 	newsksec->sid = req->secid;
req              5388 security/selinux/hooks.c 	newsksec->peer_sid = req->peer_secid;
req              5396 security/selinux/hooks.c 	selinux_netlbl_inet_csk_clone(newsk, req->rsk_ops->family);
req              5434 security/selinux/hooks.c static void selinux_req_classify_flow(const struct request_sock *req,
req              5437 security/selinux/hooks.c 	fl->flowi_secid = req->secid;
req                44 security/selinux/include/netlabel.h int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family);
req               112 security/selinux/include/netlabel.h static inline int selinux_netlbl_inet_conn_request(struct request_sock *req,
req               324 security/selinux/netlabel.c int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
req               333 security/selinux/netlabel.c 	rc = security_netlbl_sid_to_secattr(&selinux_state, req->secid,
req               337 security/selinux/netlabel.c 	rc = netlbl_req_setattr(req, &secattr);
req               688 security/selinux/selinuxfs.c 	char *req = NULL;
req               708 security/selinux/selinuxfs.c 	req = memdup_user_nul(buf, count);
req               709 security/selinux/selinuxfs.c 	if (IS_ERR(req)) {
req               710 security/selinux/selinuxfs.c 		rc = PTR_ERR(req);
req               711 security/selinux/selinuxfs.c 		req = NULL;
req               729 security/selinux/selinuxfs.c 	if (sscanf(req, "%s %s %hu %s", oldcon, newcon, &tclass, taskcon) != 4)
req               748 security/selinux/selinuxfs.c 	kfree(req);
req              4074 security/smack/smack_lsm.c 				   struct request_sock *req)
req              4146 security/smack/smack_lsm.c 	req->peer_secid = skp->smk_secid;
req              4160 security/smack/smack_lsm.c 		rc = netlbl_req_setattr(req, &skp->smk_netlabel);
req              4162 security/smack/smack_lsm.c 		netlbl_req_delattr(req);
req              4175 security/smack/smack_lsm.c 				 const struct request_sock *req)
req              4180 security/smack/smack_lsm.c 	if (req->peer_secid != 0) {
req              4181 security/smack/smack_lsm.c 		skp = smack_from_secid(req->peer_secid);
req               305 sound/pci/hda/patch_ca0132.c 	int req; /*effect module request*/
req               315 sound/pci/hda/patch_ca0132.c 	  .req = 8,
req               323 sound/pci/hda/patch_ca0132.c 	  .req = 45,
req               331 sound/pci/hda/patch_ca0132.c 	  .req = 11,
req               339 sound/pci/hda/patch_ca0132.c 	  .req = 12,
req               347 sound/pci/hda/patch_ca0132.c 	  .req = 13,
req               355 sound/pci/hda/patch_ca0132.c 	  .req = 14,
req               363 sound/pci/hda/patch_ca0132.c 	  .req = 15,
req               371 sound/pci/hda/patch_ca0132.c 	  .req = 16,
req               379 sound/pci/hda/patch_ca0132.c 	  .req = 17,
req               387 sound/pci/hda/patch_ca0132.c 	  .req = 18,
req               395 sound/pci/hda/patch_ca0132.c 	  .req = 19,
req               403 sound/pci/hda/patch_ca0132.c 	  .req = 20,
req              1757 sound/pci/hda/patch_ca0132.c 		unsigned int get_flag, unsigned int req,
req              1767 sound/pci/hda/patch_ca0132.c 	header |= (req & 0x7f) << 17;
req              1781 sound/pci/hda/patch_ca0132.c 		   unsigned int *get_flag, unsigned int *req,
req              1793 sound/pci/hda/patch_ca0132.c 	if (req)
req              1794 sound/pci/hda/patch_ca0132.c 		*req = (header >> 17) & 0x7f;
req              1937 sound/pci/hda/patch_ca0132.c 		int mod_id, int src_id, int req, int dir, const void *data,
req              1962 sound/pci/hda/patch_ca0132.c 	scp_send.hdr = make_scp_header(mod_id, src_id, (dir == SCP_GET), req,
req              2019 sound/pci/hda/patch_ca0132.c 			int src_id, int req, const void *data, unsigned int len)
req              2021 sound/pci/hda/patch_ca0132.c 	return dspio_scp(codec, mod_id, src_id, req, SCP_SET, data, len, NULL,
req              2026 sound/pci/hda/patch_ca0132.c 			int req, const unsigned int data)
req              2028 sound/pci/hda/patch_ca0132.c 	return dspio_set_param(codec, mod_id, 0x20, req, &data,
req              2033 sound/pci/hda/patch_ca0132.c 			int req, const unsigned int data)
req              2035 sound/pci/hda/patch_ca0132.c 	return dspio_set_param(codec, mod_id, 0x00, req, &data,
req              3845 sound/pci/hda/patch_ca0132.c 			ca0132_tuning_ctls[i].req,
req               256 sound/ppc/awacs.c 	struct adb_request req;
req               257 sound/ppc/awacs.c 	cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x8a,
req               259 sound/ppc/awacs.c 	while (! req.complete)
req               402 sound/ppc/pmac.c 	unsigned short req, res ;
req               423 sound/ppc/pmac.c 	req = le16_to_cpu(cp->req_count);
req               426 sound/ppc/pmac.c 	phy += (req - res);
req              1213 sound/soc/codecs/ab8500-codec.c 	unsigned int req;
req              1220 sound/soc/codecs/ab8500-codec.c 	req = ucontrol->value.enumerated.item[0];
req              1221 sound/soc/codecs/ab8500-codec.c 	if (req >= ARRAY_SIZE(enum_anc_state)) {
req              1225 sound/soc/codecs/ab8500-codec.c 	if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
req              1226 sound/soc/codecs/ab8500-codec.c 		req != ANC_APPLY_IIR) {
req              1228 sound/soc/codecs/ab8500-codec.c 			__func__, enum_anc_state[req]);
req              1232 sound/soc/codecs/ab8500-codec.c 	apply_fir = req == ANC_APPLY_FIR || req == ANC_APPLY_FIR_IIR;
req              1233 sound/soc/codecs/ab8500-codec.c 	apply_iir = req == ANC_APPLY_IIR || req == ANC_APPLY_FIR_IIR;
req              2504 sound/soc/codecs/wcd9335.c 				   int micb_num, int req, bool is_dapm)
req              2535 sound/soc/codecs/wcd9335.c 	switch (req) {
req               845 sound/soc/intel/haswell/sst-haswell-ipc.c 	struct sst_hsw_ipc_volume_req *req;
req               860 sound/soc/intel/haswell/sst-haswell-ipc.c 	req = &stream->vol_req;
req               861 sound/soc/intel/haswell/sst-haswell-ipc.c 	req->target_volume = volume;
req               869 sound/soc/intel/haswell/sst-haswell-ipc.c 			req->channel = 1;
req               871 sound/soc/intel/haswell/sst-haswell-ipc.c 			req->channel = 0;
req               873 sound/soc/intel/haswell/sst-haswell-ipc.c 			req->channel = SST_HSW_CHANNELS_ALL;
req               880 sound/soc/intel/haswell/sst-haswell-ipc.c 		req->channel = channel;
req               883 sound/soc/intel/haswell/sst-haswell-ipc.c 	request.data = req;
req               884 sound/soc/intel/haswell/sst-haswell-ipc.c 	request.size = sizeof(*req);
req               911 sound/soc/intel/haswell/sst-haswell-ipc.c 	struct sst_hsw_ipc_volume_req req;
req               926 sound/soc/intel/haswell/sst-haswell-ipc.c 			req.channel = 1;
req               928 sound/soc/intel/haswell/sst-haswell-ipc.c 			req.channel = 0;
req               930 sound/soc/intel/haswell/sst-haswell-ipc.c 			req.channel = SST_HSW_CHANNELS_ALL;
req               937 sound/soc/intel/haswell/sst-haswell-ipc.c 		req.channel = channel;
req               946 sound/soc/intel/haswell/sst-haswell-ipc.c 	req.curve_duration = hsw->curve_duration;
req               947 sound/soc/intel/haswell/sst-haswell-ipc.c 	req.curve_type = hsw->curve_type;
req               948 sound/soc/intel/haswell/sst-haswell-ipc.c 	req.target_volume = volume;
req               950 sound/soc/intel/haswell/sst-haswell-ipc.c 	request.data = &req;
req               951 sound/soc/intel/haswell/sst-haswell-ipc.c 	request.size = sizeof(req);
req                44 sound/usb/line6/midi.c 	int req, done;
req                47 sound/usb/line6/midi.c 		req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
req                48 sound/usb/line6/midi.c 		done = snd_rawmidi_transmit_peek(substream, chunk, req);
req               572 sound/usb/mixer_scarlett_gen2.c 					  struct scarlett2_usb_packet *req,
req               578 sound/usb/mixer_scarlett_gen2.c 	req->cmd = cpu_to_le32(cmd);
req               579 sound/usb/mixer_scarlett_gen2.c 	req->size = cpu_to_le16(req_size);
req               580 sound/usb/mixer_scarlett_gen2.c 	req->seq = cpu_to_le16(seq);
req               581 sound/usb/mixer_scarlett_gen2.c 	req->error = 0;
req               582 sound/usb/mixer_scarlett_gen2.c 	req->pad = 0;
req               593 sound/usb/mixer_scarlett_gen2.c 	struct scarlett2_usb_packet *req = NULL, *resp = NULL;
req               596 sound/usb/mixer_scarlett_gen2.c 	req = kmalloc(req_buf_size, GFP_KERNEL);
req               597 sound/usb/mixer_scarlett_gen2.c 	if (!req) {
req               612 sound/usb/mixer_scarlett_gen2.c 	scarlett2_fill_request_header(private, req, cmd, req_size);
req               615 sound/usb/mixer_scarlett_gen2.c 		memcpy(req->data, req_data, req_size);
req               623 sound/usb/mixer_scarlett_gen2.c 			req,
req               657 sound/usb/mixer_scarlett_gen2.c 	if (resp->cmd != req->cmd ||
req               658 sound/usb/mixer_scarlett_gen2.c 	    resp->seq != req->seq ||
req               667 sound/usb/mixer_scarlett_gen2.c 			le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
req               668 sound/usb/mixer_scarlett_gen2.c 			le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
req               682 sound/usb/mixer_scarlett_gen2.c 	kfree(req);
req               690 sound/usb/mixer_scarlett_gen2.c 	__le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
req               693 sound/usb/mixer_scarlett_gen2.c 		      &req, sizeof(u32),
req               719 sound/usb/mixer_scarlett_gen2.c 	} __packed req;
req               728 sound/usb/mixer_scarlett_gen2.c 	req.offset = cpu_to_le32(config_item.offset + index * config_item.size);
req               729 sound/usb/mixer_scarlett_gen2.c 	req.bytes = cpu_to_le32(config_item.size);
req               730 sound/usb/mixer_scarlett_gen2.c 	req.value = cpu_to_le32(value);
req               732 sound/usb/mixer_scarlett_gen2.c 			    &req, sizeof(u32) * 2 + config_item.size,
req               758 sound/usb/mixer_scarlett_gen2.c 	} __packed req;
req               760 sound/usb/mixer_scarlett_gen2.c 	req.offset = cpu_to_le32(offset);
req               761 sound/usb/mixer_scarlett_gen2.c 	req.size = cpu_to_le32(size);
req               763 sound/usb/mixer_scarlett_gen2.c 			     &req, sizeof(req), buf, size);
req               799 sound/usb/mixer_scarlett_gen2.c 	} __packed req;
req               805 sound/usb/mixer_scarlett_gen2.c 	req.mix_num = cpu_to_le16(mix_num);
req               808 sound/usb/mixer_scarlett_gen2.c 		req.data[i] = cpu_to_le16(
req               813 sound/usb/mixer_scarlett_gen2.c 			     &req, (num_mixer_in + 1) * sizeof(u16),
req               856 sound/usb/mixer_scarlett_gen2.c 	} __packed req;
req               858 sound/usb/mixer_scarlett_gen2.c 	req.pad = 0;
req               866 sound/usb/mixer_scarlett_gen2.c 		req.num = cpu_to_le16(rate);
req               884 sound/usb/mixer_scarlett_gen2.c 				req.data[i] = !port_id
req               900 sound/usb/mixer_scarlett_gen2.c 				    &req, (i + 1) * sizeof(u32),
req               917 sound/usb/mixer_scarlett_gen2.c 	} __packed req;
req               921 sound/usb/mixer_scarlett_gen2.c 	req.pad = 0;
req               922 sound/usb/mixer_scarlett_gen2.c 	req.num_meters = cpu_to_le16(SCARLETT2_NUM_METERS);
req               923 sound/usb/mixer_scarlett_gen2.c 	req.magic = cpu_to_le32(SCARLETT2_USB_METER_LEVELS_GET_MAGIC);
req               925 sound/usb/mixer_scarlett_gen2.c 			    &req, sizeof(req), resp, sizeof(resp));
req                29 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req                31 sound/xen/xen_snd_front.c 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
req                32 sound/xen/xen_snd_front.c 			       evtchnl->u.req.ring.req_prod_pvt);
req                33 sound/xen/xen_snd_front.c 	req->operation = operation;
req                34 sound/xen/xen_snd_front.c 	req->id = evtchnl->evt_next_id++;
req                35 sound/xen/xen_snd_front.c 	evtchnl->evt_id = req->id;
req                36 sound/xen/xen_snd_front.c 	return req;
req                44 sound/xen/xen_snd_front.c 	reinit_completion(&evtchnl->u.req.completion);
req                51 sound/xen/xen_snd_front.c 	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
req                55 sound/xen/xen_snd_front.c 	return evtchnl->u.req.resp_status;
req                62 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req                65 sound/xen/xen_snd_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req                68 sound/xen/xen_snd_front.c 	req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
req                69 sound/xen/xen_snd_front.c 	req->op.hw_param = *hw_param_req;
req                78 sound/xen/xen_snd_front.c 		*hw_param_resp = evtchnl->u.req.resp.hw_param;
req                80 sound/xen/xen_snd_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req                90 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req                93 sound/xen/xen_snd_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req                96 sound/xen/xen_snd_front.c 	req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
req                97 sound/xen/xen_snd_front.c 	req->op.open.pcm_format = format;
req                98 sound/xen/xen_snd_front.c 	req->op.open.pcm_channels = channels;
req                99 sound/xen/xen_snd_front.c 	req->op.open.pcm_rate = rate;
req               100 sound/xen/xen_snd_front.c 	req->op.open.buffer_sz = buffer_sz;
req               101 sound/xen/xen_snd_front.c 	req->op.open.period_sz = period_sz;
req               102 sound/xen/xen_snd_front.c 	req->op.open.gref_directory =
req               111 sound/xen/xen_snd_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               117 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req               120 sound/xen/xen_snd_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               123 sound/xen/xen_snd_front.c 	req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
req               131 sound/xen/xen_snd_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               138 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req               141 sound/xen/xen_snd_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               144 sound/xen/xen_snd_front.c 	req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
req               145 sound/xen/xen_snd_front.c 	req->op.rw.length = count;
req               146 sound/xen/xen_snd_front.c 	req->op.rw.offset = pos;
req               154 sound/xen/xen_snd_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               161 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req               164 sound/xen/xen_snd_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               167 sound/xen/xen_snd_front.c 	req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
req               168 sound/xen/xen_snd_front.c 	req->op.rw.length = count;
req               169 sound/xen/xen_snd_front.c 	req->op.rw.offset = pos;
req               177 sound/xen/xen_snd_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               184 sound/xen/xen_snd_front.c 	struct xensnd_req *req;
req               187 sound/xen/xen_snd_front.c 	mutex_lock(&evtchnl->u.req.req_io_lock);
req               190 sound/xen/xen_snd_front.c 	req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
req               191 sound/xen/xen_snd_front.c 	req->op.trigger.type = type;
req               199 sound/xen/xen_snd_front.c 	mutex_unlock(&evtchnl->u.req.req_io_lock);
req               273 sound/xen/xen_snd_front_alsa.c 	struct xensnd_query_hw_param req;
req               282 sound/xen/xen_snd_front_alsa.c 	req.formats = to_sndif_formats_mask((u64)formats->bits[0] |
req               285 sound/xen/xen_snd_front_alsa.c 	req.rates.min = rates->min;
req               286 sound/xen/xen_snd_front_alsa.c 	req.rates.max = rates->max;
req               288 sound/xen/xen_snd_front_alsa.c 	req.channels.min = channels->min;
req               289 sound/xen/xen_snd_front_alsa.c 	req.channels.max = channels->max;
req               291 sound/xen/xen_snd_front_alsa.c 	req.buffer.min = buffer->min;
req               292 sound/xen/xen_snd_front_alsa.c 	req.buffer.max = buffer->max;
req               294 sound/xen/xen_snd_front_alsa.c 	req.period.min = period->min;
req               295 sound/xen/xen_snd_front_alsa.c 	req.period.max = period->max;
req               297 sound/xen/xen_snd_front_alsa.c 	ret = xen_snd_front_stream_query_hw_param(&stream->evt_pair->req,
req               298 sound/xen/xen_snd_front_alsa.c 						  &req, &resp);
req               507 sound/xen/xen_snd_front_alsa.c 	ret = xen_snd_front_stream_close(&stream->evt_pair->req);
req               530 sound/xen/xen_snd_front_alsa.c 		ret = xen_snd_front_stream_prepare(&stream->evt_pair->req,
req               572 sound/xen/xen_snd_front_alsa.c 	return xen_snd_front_stream_trigger(&stream->evt_pair->req, type);
req               617 sound/xen/xen_snd_front_alsa.c 	return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
req               631 sound/xen/xen_snd_front_alsa.c 	return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
req               644 sound/xen/xen_snd_front_alsa.c 	ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
req               662 sound/xen/xen_snd_front_alsa.c 	ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
req               682 sound/xen/xen_snd_front_alsa.c 	return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
req                34 sound/xen/xen_snd_front_evtchnl.c 	rp = channel->u.req.ring.sring->rsp_prod;
req                43 sound/xen/xen_snd_front_evtchnl.c 	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
req                44 sound/xen/xen_snd_front_evtchnl.c 		resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
req                57 sound/xen/xen_snd_front_evtchnl.c 			channel->u.req.resp_status = resp->status;
req                58 sound/xen/xen_snd_front_evtchnl.c 			complete(&channel->u.req.completion);
req                61 sound/xen/xen_snd_front_evtchnl.c 			channel->u.req.resp_status = resp->status;
req                62 sound/xen/xen_snd_front_evtchnl.c 			channel->u.req.resp.hw_param =
req                64 sound/xen/xen_snd_front_evtchnl.c 			complete(&channel->u.req.completion);
req                75 sound/xen/xen_snd_front_evtchnl.c 	channel->u.req.ring.rsp_cons = i;
req                76 sound/xen/xen_snd_front_evtchnl.c 	if (i != channel->u.req.ring.req_prod_pvt) {
req                79 sound/xen/xen_snd_front_evtchnl.c 		RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
req                84 sound/xen/xen_snd_front_evtchnl.c 		channel->u.req.ring.sring->rsp_event = i + 1;
req               141 sound/xen/xen_snd_front_evtchnl.c 	channel->u.req.ring.req_prod_pvt++;
req               142 sound/xen/xen_snd_front_evtchnl.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
req               153 sound/xen/xen_snd_front_evtchnl.c 		page = (unsigned long)channel->u.req.ring.sring;
req               163 sound/xen/xen_snd_front_evtchnl.c 		channel->u.req.resp_status = -EIO;
req               164 sound/xen/xen_snd_front_evtchnl.c 		complete_all(&channel->u.req.completion);
req               190 sound/xen/xen_snd_front_evtchnl.c 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
req               235 sound/xen/xen_snd_front_evtchnl.c 		init_completion(&channel->u.req.completion);
req               236 sound/xen/xen_snd_front_evtchnl.c 		mutex_init(&channel->u.req.req_io_lock);
req               238 sound/xen/xen_snd_front_evtchnl.c 		FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
req               242 sound/xen/xen_snd_front_evtchnl.c 			channel->u.req.ring.sring = NULL;
req               318 sound/xen/xen_snd_front_evtchnl.c 					    &front_info->evt_pairs[index].req,
req               338 sound/xen/xen_snd_front_evtchnl.c 					    &front_info->evt_pairs[index].req,
req               412 sound/xen/xen_snd_front_evtchnl.c 					      &front_info->evt_pairs[index].req,
req               432 sound/xen/xen_snd_front_evtchnl.c 					      &front_info->evt_pairs[index].req,
req               475 sound/xen/xen_snd_front_evtchnl.c 	mutex_lock(&evt_pair->req.ring_io_lock);
req               476 sound/xen/xen_snd_front_evtchnl.c 	evt_pair->req.state = state;
req               477 sound/xen/xen_snd_front_evtchnl.c 	mutex_unlock(&evt_pair->req.ring_io_lock);
req               486 sound/xen/xen_snd_front_evtchnl.c 	mutex_lock(&evt_pair->req.ring_io_lock);
req               487 sound/xen/xen_snd_front_evtchnl.c 	evt_pair->req.evt_next_id = 0;
req               488 sound/xen/xen_snd_front_evtchnl.c 	mutex_unlock(&evt_pair->req.ring_io_lock);
req                67 sound/xen/xen_snd_front_evtchnl.h 		} req;
req                77 sound/xen/xen_snd_front_evtchnl.h 	struct xen_snd_front_evtchnl req;
req                33 tools/gpio/gpio-event-mon.c 	struct gpioevent_request req;
req                51 tools/gpio/gpio-event-mon.c 	req.lineoffset = line;
req                52 tools/gpio/gpio-event-mon.c 	req.handleflags = handleflags;
req                53 tools/gpio/gpio-event-mon.c 	req.eventflags = eventflags;
req                54 tools/gpio/gpio-event-mon.c 	strcpy(req.consumer_label, "gpio-event-mon");
req                56 tools/gpio/gpio-event-mon.c 	ret = ioctl(fd, GPIO_GET_LINEEVENT_IOCTL, &req);
req                66 tools/gpio/gpio-event-mon.c 	ret = ioctl(req.fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, &data);
req                81 tools/gpio/gpio-event-mon.c 		ret = read(req.fd, &event, sizeof(event));
req                63 tools/gpio/gpio-utils.c 	struct gpiohandle_request req;
req                82 tools/gpio/gpio-utils.c 		req.lineoffsets[i] = lines[i];
req                84 tools/gpio/gpio-utils.c 	req.flags = flag;
req                85 tools/gpio/gpio-utils.c 	strcpy(req.consumer_label, consumer_label);
req                86 tools/gpio/gpio-utils.c 	req.lines = nlines;
req                88 tools/gpio/gpio-utils.c 		memcpy(req.default_values, data, sizeof(req.default_values));
req                90 tools/gpio/gpio-utils.c 	ret = ioctl(fd, GPIO_GET_LINEHANDLE_IOCTL, &req);
req               101 tools/gpio/gpio-utils.c 	return ret < 0 ? ret : req.fd;
req              1554 tools/include/nolibc/nolibc.h int sys_ioctl(int fd, unsigned long req, void *value)
req              1556 tools/include/nolibc/nolibc.h 	return my_syscall3(__NR_ioctl, fd, req, value);
req              1952 tools/include/nolibc/nolibc.h int ioctl(int fd, unsigned long req, void *value)
req              1954 tools/include/nolibc/nolibc.h 	int ret = sys_ioctl(fd, req, value);
req               139 tools/lib/bpf/netlink.c 	} req;
req               146 tools/lib/bpf/netlink.c 	memset(&req, 0, sizeof(req));
req               147 tools/lib/bpf/netlink.c 	req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
req               148 tools/lib/bpf/netlink.c 	req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
req               149 tools/lib/bpf/netlink.c 	req.nh.nlmsg_type = RTM_SETLINK;
req               150 tools/lib/bpf/netlink.c 	req.nh.nlmsg_pid = 0;
req               151 tools/lib/bpf/netlink.c 	req.nh.nlmsg_seq = ++seq;
req               152 tools/lib/bpf/netlink.c 	req.ifinfo.ifi_family = AF_UNSPEC;
req               153 tools/lib/bpf/netlink.c 	req.ifinfo.ifi_index = ifindex;
req               156 tools/lib/bpf/netlink.c 	nla = (struct nlattr *)(((char *)&req)
req               157 tools/lib/bpf/netlink.c 				+ NLMSG_ALIGN(req.nh.nlmsg_len));
req               177 tools/lib/bpf/netlink.c 	req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
req               179 tools/lib/bpf/netlink.c 	if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
req               290 tools/lib/bpf/netlink.c 	} req = {
req               298 tools/lib/bpf/netlink.c 	req.nlh.nlmsg_seq = seq;
req               299 tools/lib/bpf/netlink.c 	if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
req               328 tools/lib/bpf/netlink.c 	} req = {
req               337 tools/lib/bpf/netlink.c 	req.nlh.nlmsg_seq = seq;
req               338 tools/lib/bpf/netlink.c 	if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
req               367 tools/lib/bpf/netlink.c 	} req = {
req               376 tools/lib/bpf/netlink.c 	req.nlh.nlmsg_seq = seq;
req               377 tools/lib/bpf/netlink.c 	if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
req               406 tools/lib/bpf/netlink.c 	} req = {
req               416 tools/lib/bpf/netlink.c 	req.nlh.nlmsg_seq = seq;
req               417 tools/lib/bpf/netlink.c 	if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
req               154 tools/power/x86/intel-speed-select/isst-core.c 	unsigned int req, resp;
req               157 tools/power/x86/intel-speed-select/isst-core.c 	req = level | (avx_level << 16);
req               159 tools/power/x86/intel-speed-select/isst-core.c 				     CONFIG_TDP_GET_TURBO_LIMIT_RATIOS, 0, req,
req               166 tools/power/x86/intel-speed-select/isst-core.c 		cpu, req, resp);
req               173 tools/power/x86/intel-speed-select/isst-core.c 	req = level | BIT(8) | (avx_level << 16);
req               175 tools/power/x86/intel-speed-select/isst-core.c 				     CONFIG_TDP_GET_TURBO_LIMIT_RATIOS, 0, req,
req               181 tools/power/x86/intel-speed-select/isst-core.c 		     req, resp);
req               248 tools/power/x86/intel-speed-select/isst-core.c 	unsigned int req, resp;
req               274 tools/power/x86/intel-speed-select/isst-core.c 	req = level;
req               276 tools/power/x86/intel-speed-select/isst-core.c 				     CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO, 0, req,
req               287 tools/power/x86/intel-speed-select/isst-core.c 	req = level;
req               289 tools/power/x86/intel-speed-select/isst-core.c 		cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TDP_INFO, 0, req, &resp);
req               297 tools/power/x86/intel-speed-select/isst-core.c 	req = level;
req               299 tools/power/x86/intel-speed-select/isst-core.c 		cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TJ_MAX_INFO, 0, req, &resp);
req               321 tools/power/x86/intel-speed-select/isst-core.c 	unsigned int req = 0, resp;
req               336 tools/power/x86/intel-speed-select/isst-core.c 			req = BIT(16);
req               339 tools/power/x86/intel-speed-select/isst-core.c 			req |= BIT(17);
req               341 tools/power/x86/intel-speed-select/isst-core.c 			req &= ~BIT(17);
req               344 tools/power/x86/intel-speed-select/isst-core.c 			req = BIT(17);
req               347 tools/power/x86/intel-speed-select/isst-core.c 			req |= BIT(16);
req               349 tools/power/x86/intel-speed-select/isst-core.c 			req &= ~BIT(16);
req               353 tools/power/x86/intel-speed-select/isst-core.c 				     CONFIG_TDP_SET_TDP_CONTROL, 0, req, &resp);
req               358 tools/power/x86/intel-speed-select/isst-core.c 		     cpu, pbf, req);
req               649 tools/power/x86/intel-speed-select/isst-core.c 	unsigned int req, resp;
req               659 tools/power/x86/intel-speed-select/isst-core.c 	req = resp;
req               662 tools/power/x86/intel-speed-select/isst-core.c 		req = req | BIT(1);
req               664 tools/power/x86/intel-speed-select/isst-core.c 		req = req & ~BIT(1);
req               667 tools/power/x86/intel-speed-select/isst-core.c 		req = req | BIT(2);
req               669 tools/power/x86/intel-speed-select/isst-core.c 		req = req & ~BIT(2);
req               672 tools/power/x86/intel-speed-select/isst-core.c 				     BIT(MBOX_CMD_WRITE_BIT), req, &resp);
req               677 tools/power/x86/intel-speed-select/isst-core.c 		     priority_type, req);
req               706 tools/power/x86/intel-speed-select/isst-core.c 	unsigned int req, resp;
req               710 tools/power/x86/intel-speed-select/isst-core.c 	req = clos_config->epp & 0x0f;
req               711 tools/power/x86/intel-speed-select/isst-core.c 	req |= (clos_config->clos_prop_prio & 0x0f) << 4;
req               712 tools/power/x86/intel-speed-select/isst-core.c 	req |= (clos_config->clos_min & 0xff) << 8;
req               713 tools/power/x86/intel-speed-select/isst-core.c 	req |= (clos_config->clos_max & 0xff) << 16;
req               714 tools/power/x86/intel-speed-select/isst-core.c 	req |= (clos_config->clos_desired & 0xff) << 24;
req               718 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_CLOS, param, req,
req               723 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PM_CLOS param:%x req:%x\n", cpu, param, req);
req               751 tools/power/x86/intel-speed-select/isst-core.c 	unsigned int req, resp;
req               755 tools/power/x86/intel-speed-select/isst-core.c 	req = (clos_id & 0x03) << 16;
req               760 tools/power/x86/intel-speed-select/isst-core.c 				     req, &resp);
req               765 tools/power/x86/intel-speed-select/isst-core.c 		     req);
req               751 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	struct msr_hwp_request req;
req               763 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
req               764 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	print_hwp_request(cpu, &req, "");
req               774 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	struct msr_hwp_request req;
req               780 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
req               781 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	print_hwp_request_pkg(pkg, &req, "");
req               871 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req)
req               874 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (req->hwp_min > req->hwp_max) {
req               876 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_min, req->hwp_max);
req               880 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (req->hwp_desired && (req->hwp_desired > req->hwp_max)) {
req               882 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_desired, req->hwp_max);
req               885 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (req->hwp_desired && (req->hwp_desired < req->hwp_min)) {
req               887 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_desired, req->hwp_min);
req               893 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, struct msr_hwp_cap *cap)
req               896 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_max > cap->highest)
req               898 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_max, cap->highest);
req               899 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_max < cap->lowest)
req               901 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_max, cap->lowest);
req               905 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_min > cap->highest)
req               907 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_min, cap->highest);
req               908 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_min < cap->lowest)
req               910 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_min, cap->lowest);
req               913 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (update_hwp_min && update_hwp_max && (req->hwp_min > req->hwp_max))
req               915 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_min, req->hwp_max);
req               917 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (update_hwp_desired && req->hwp_desired) {
req               918 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_desired > req->hwp_max)
req               920 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, req->hwp_max);
req               921 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_desired < req->hwp_min)
req               923 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, req->hwp_min);
req               924 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_desired < cap->lowest)
req               926 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, cap->lowest);
req               927 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (req->hwp_desired > cap->highest)
req               929 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, cap->highest);
req               937 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	struct msr_hwp_request req;
req               942 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(cpu, &req, msr_offset);
req               944 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_request(cpu, &req, "old: ");
req               947 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_min = req_update.hwp_min;
req               950 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_max = req_update.hwp_max;
req               953 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_desired = req_update.hwp_desired;
req               956 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_window = req_update.hwp_window;
req               959 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_epp = req_update.hwp_epp;
req               961 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	req.hwp_use_pkg = req_update.hwp_use_pkg;
req               968 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
req               970 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	verify_hwp_req_self_consistency(cpu, &req);
req               972 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	write_hwp_request(cpu, &req, msr_offset);
req               975 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		read_hwp_request(cpu, &req, msr_offset);
req               976 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_request(cpu, &req, "new: ");
req               982 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	struct msr_hwp_request req;
req               988 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(cpu, &req, msr_offset);
req               990 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_request_pkg(pkg, &req, "old: ");
req               993 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_min = req_update.hwp_min;
req               996 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_max = req_update.hwp_max;
req               999 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_desired = req_update.hwp_desired;
req              1002 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_window = req_update.hwp_window;
req              1005 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		req.hwp_epp = req_update.hwp_epp;
req              1012 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
req              1014 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	verify_hwp_req_self_consistency(cpu, &req);
req              1016 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	write_hwp_request(cpu, &req, msr_offset);
req              1019 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		read_hwp_request(cpu, &req, msr_offset);
req              1020 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_request_pkg(pkg, &req, "new: ");
req               249 tools/testing/nvdimm/test/iomap.c 			struct nfit_test_request *req;
req               259 tools/testing/nvdimm/test/iomap.c 			list_for_each_entry(req, &nfit_res->requests, list)
req               260 tools/testing/nvdimm/test/iomap.c 				if (req->res.start == start) {
req               261 tools/testing/nvdimm/test/iomap.c 					res = &req->res;
req               262 tools/testing/nvdimm/test/iomap.c 					list_del(&req->list);
req               271 tools/testing/nvdimm/test/iomap.c 				kfree(req);
req               287 tools/testing/nvdimm/test/iomap.c 			struct nfit_test_request *req;
req               299 tools/testing/nvdimm/test/iomap.c 			list_for_each_entry(req, &nfit_res->requests, list)
req               300 tools/testing/nvdimm/test/iomap.c 				if (start == req->res.start) {
req               301 tools/testing/nvdimm/test/iomap.c 					res = &req->res;
req               311 tools/testing/nvdimm/test/iomap.c 			req = kzalloc(sizeof(*req), GFP_KERNEL);
req               312 tools/testing/nvdimm/test/iomap.c 			if (!req)
req               314 tools/testing/nvdimm/test/iomap.c 			INIT_LIST_HEAD(&req->list);
req               315 tools/testing/nvdimm/test/iomap.c 			res = &req->res;
req               323 tools/testing/nvdimm/test/iomap.c 			list_add(&req->list, &nfit_res->requests);
req               149 tools/testing/selftests/net/ipv6_flowlabel.c 	struct in6_flowlabel_req req = {
req               157 tools/testing/selftests/net/ipv6_flowlabel.c 	req.flr_dst.s6_addr[0] = 0xfd;
req               158 tools/testing/selftests/net/ipv6_flowlabel.c 	req.flr_dst.s6_addr[15] = 0x1;
req               160 tools/testing/selftests/net/ipv6_flowlabel.c 	if (setsockopt(fd, SOL_IPV6, IPV6_FLOWLABEL_MGR, &req, sizeof(req)))
req                50 tools/testing/selftests/net/ipv6_flowlabel_mgr.c 	struct in6_flowlabel_req req = {
req                58 tools/testing/selftests/net/ipv6_flowlabel_mgr.c 	req.flr_dst.s6_addr[0] = 0xfd;
req                59 tools/testing/selftests/net/ipv6_flowlabel_mgr.c 	req.flr_dst.s6_addr[15] = 0x1;
req                61 tools/testing/selftests/net/ipv6_flowlabel_mgr.c 	return setsockopt(fd, SOL_IPV6, IPV6_FLOWLABEL_MGR, &req, sizeof(req));
req                66 tools/testing/selftests/net/ipv6_flowlabel_mgr.c 	struct in6_flowlabel_req req = {
req                71 tools/testing/selftests/net/ipv6_flowlabel_mgr.c 	return setsockopt(fd, SOL_IPV6, IPV6_FLOWLABEL_MGR, &req, sizeof(req));
req               178 tools/testing/selftests/net/psock_fanout.c 	struct tpacket_req req = {
req               192 tools/testing/selftests/net/psock_fanout.c 	if (setsockopt(fd, SOL_PACKET, PACKET_RX_RING, (void *) &req,
req               193 tools/testing/selftests/net/psock_fanout.c 		       sizeof(req))) {
req               198 tools/testing/selftests/net/psock_fanout.c 	ring = mmap(0, req.tp_block_size * req.tp_block_nr,
req                74 tools/testing/selftests/net/psock_tpacket.c 		struct tpacket_req  req;
req               632 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_block_size = getpagesize() << 2;
req               633 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
req               634 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_block_nr = blocks;
req               636 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_frame_nr = ring->req.tp_block_size /
req               637 tools/testing/selftests/net/psock_tpacket.c 				ring->req.tp_frame_size *
req               638 tools/testing/selftests/net/psock_tpacket.c 				ring->req.tp_block_nr;
req               640 tools/testing/selftests/net/psock_tpacket.c 	ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
req               642 tools/testing/selftests/net/psock_tpacket.c 	ring->rd_num = ring->req.tp_frame_nr;
req               643 tools/testing/selftests/net/psock_tpacket.c 	ring->flen = ring->req.tp_frame_size;
req               681 tools/testing/selftests/net/psock_tpacket.c 		ret = setsockopt(sock, SOL_PACKET, type, &ring->req,
req               682 tools/testing/selftests/net/psock_tpacket.c 				 sizeof(ring->req));
req                89 tools/testing/selftests/net/txring_overwrite.c 	struct tpacket_req req = {};
req               105 tools/testing/selftests/net/txring_overwrite.c 	req.tp_block_size = getpagesize();
req               106 tools/testing/selftests/net/txring_overwrite.c 	req.tp_block_nr   = 1;
req               107 tools/testing/selftests/net/txring_overwrite.c 	req.tp_frame_size = getpagesize();
req               108 tools/testing/selftests/net/txring_overwrite.c 	req.tp_frame_nr   = 1;
req               111 tools/testing/selftests/net/txring_overwrite.c 		       (void *)&req, sizeof(req)))
req               114 tools/testing/selftests/net/txring_overwrite.c 	*ring = mmap(0, req.tp_block_size * req.tp_block_nr,
req              3087 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_notif req = {};
req              3151 tools/testing/selftests/seccomp/seccomp_bpf.c 	memset(&req, 0, sizeof(req));
req              3152 tools/testing/selftests/seccomp/seccomp_bpf.c 	req.pid = -1;
req              3154 tools/testing/selftests/seccomp/seccomp_bpf.c 	ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
req              3159 tools/testing/selftests/seccomp/seccomp_bpf.c 		req.pid = 0;
req              3160 tools/testing/selftests/seccomp/seccomp_bpf.c 		EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3169 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(req.data.nr,  __NR_getppid);
req              3171 tools/testing/selftests/seccomp/seccomp_bpf.c 	resp.id = req.id;
req              3193 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_notif req = {};
req              3217 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3218 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0);
req              3223 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1);
req              3225 tools/testing/selftests/seccomp/seccomp_bpf.c 	resp.id = req.id;
req              3244 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_notif req = {};
req              3281 tools/testing/selftests/seccomp/seccomp_bpf.c 	memset(&req, 0, sizeof(req));
req              3282 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3293 tools/testing/selftests/seccomp/seccomp_bpf.c 	resp.id = req.id;
req              3300 tools/testing/selftests/seccomp/seccomp_bpf.c 	memset(&req, 0, sizeof(req));
req              3301 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3303 tools/testing/selftests/seccomp/seccomp_bpf.c 	resp.id = req.id;
req              3354 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_notif req = {};
req              3369 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3370 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(req.pid, pid);
req              3372 tools/testing/selftests/seccomp/seccomp_bpf.c 	resp.id = req.id;
req              3392 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_notif req = {};
req              3429 tools/testing/selftests/seccomp/seccomp_bpf.c 		ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3434 tools/testing/selftests/seccomp/seccomp_bpf.c 		EXPECT_EQ(req.pid, 0);
req              3436 tools/testing/selftests/seccomp/seccomp_bpf.c 		resp.id = req.id;
req              3459 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_notif req = {};
req              3479 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
req              3480 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(req.pid, pid);
req              3482 tools/testing/selftests/seccomp/seccomp_bpf.c 	resp.id = req.id;
req               617 tools/testing/selftests/x86/sigreturn.c 		greg_t req = requested_regs[i], res = resulting_regs[i];
req               638 tools/testing/selftests/x86/sigreturn.c 			if (res == req)
req               641 tools/testing/selftests/x86/sigreturn.c 			if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
req               643 tools/testing/selftests/x86/sigreturn.c 				       (unsigned long long)req,
req               682 tools/testing/selftests/x86/sigreturn.c 		if (i == REG_CX && req != res) {
req               684 tools/testing/selftests/x86/sigreturn.c 			       (unsigned long long)req,
req               690 tools/testing/selftests/x86/sigreturn.c 		if (req != res && !ignore_reg) {
req               692 tools/testing/selftests/x86/sigreturn.c 			       i, (unsigned long long)req,
req               182 tools/testing/vsock/vsock_diag_test.c 	} req = {
req               184 tools/testing/vsock/vsock_diag_test.c 			.nlmsg_len = sizeof(req),
req               194 tools/testing/vsock/vsock_diag_test.c 		.iov_base = &req,
req               195 tools/testing/vsock/vsock_diag_test.c 		.iov_len = sizeof(req),
req                93 tools/usb/usbip/src/usbipd.c 	struct op_import_request req;
req               101 tools/usb/usbip/src/usbipd.c 	memset(&req, 0, sizeof(req));
req               103 tools/usb/usbip/src/usbipd.c 	rc = usbip_net_recv(sockfd, &req, sizeof(req));
req               108 tools/usb/usbip/src/usbipd.c 	PACK_OP_IMPORT_REQUEST(0, &req);
req               112 tools/usb/usbip/src/usbipd.c 		if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) {
req               113 tools/usb/usbip/src/usbipd.c 			info("found requested device: %s", req.busid);
req               128 tools/usb/usbip/src/usbipd.c 		info("requested device not found: %s", req.busid);
req               139 tools/usb/usbip/src/usbipd.c 		dbg("import request busid %s: failed", req.busid);
req               152 tools/usb/usbip/src/usbipd.c 	dbg("import request busid %s: complete", req.busid);
req               231 tools/usb/usbip/src/usbipd.c 	struct op_devlist_request req;
req               234 tools/usb/usbip/src/usbipd.c 	memset(&req, 0, sizeof(req));
req               236 tools/usb/usbip/src/usbipd.c 	rc = usbip_net_recv(connfd, &req, sizeof(req));
req               215 virt/kvm/kvm_main.c static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
req               223 virt/kvm/kvm_main.c 	if (req & KVM_REQUEST_WAIT)
req               248 virt/kvm/kvm_main.c bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
req               261 virt/kvm/kvm_main.c 		kvm_make_request(req, vcpu);
req               264 virt/kvm/kvm_main.c 		if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
req               268 virt/kvm/kvm_main.c 		    kvm_request_needs_ipi(vcpu, req))
req               272 virt/kvm/kvm_main.c 	called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
req               278 virt/kvm/kvm_main.c bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
req               285 virt/kvm/kvm_main.c 	called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);