/linux-4.1.27/arch/arm64/crypto/ |
D | aes-ce.S | 20 .macro load_round_keys, rounds, rk 21 cmp \rounds, #12 32 .macro enc_prepare, rounds, rk, ignore 33 load_round_keys \rounds, \rk 37 .macro enc_switch_key, rounds, rk, ignore 38 load_round_keys \rounds, \rk 42 .macro dec_prepare, rounds, rk, ignore 43 load_round_keys \rounds, \rk 91 .macro do_block_Nx, enc, rounds, i0, i1, i2, i3 92 cmp \rounds, #12 [all …]
|
D | aes-glue.c | 58 int rounds, int blocks, int first); 60 int rounds, int blocks, int first); 63 int rounds, int blocks, u8 iv[], int first); 65 int rounds, int blocks, u8 iv[], int first); 68 int rounds, int blocks, u8 ctr[], int first); 71 int rounds, int blocks, u8 const rk2[], u8 iv[], 74 int rounds, int blocks, u8 const rk2[], u8 iv[], 103 int err, first, rounds = 6 + ctx->key_length / 4; in ecb_encrypt() local 114 (u8 *)ctx->key_enc, rounds, blocks, first); in ecb_encrypt() 125 int err, first, rounds = 6 + ctx->key_length / 4; in ecb_decrypt() local [all …]
|
D | aes-neon.S | 85 .macro do_block, enc, in, rounds, rk, rkp, i 88 mov \i, \rounds 104 .macro encrypt_block, in, rounds, rk, rkp, i 105 do_block 1, \in, \rounds, \rk, \rkp, \i 108 .macro decrypt_block, in, rounds, rk, rkp, i 109 do_block 0, \in, \rounds, \rk, \rkp, \i 231 .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i 234 mov \i, \rounds 256 .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i 259 mov \i, \rounds [all …]
|
D | aes-ce-ccm-glue.c | 34 u32 *macp, u32 const rk[], u32 rounds); 37 u32 const rk[], u32 rounds, u8 mac[], 41 u32 const rk[], u32 rounds, u8 mac[], 45 u32 rounds);
|
D | aes-ce-cipher.c | 75 [rounds] "=r"(dummy1) in aes_cipher_encrypt() 120 [rounds] "=r"(dummy1) in aes_cipher_decrypt()
|
/linux-4.1.27/arch/powerpc/crypto/ |
D | aes-spe-glue.c | 45 u32 rounds; member 52 u32 rounds; member 55 extern void ppc_encrypt_aes(u8 *out, const u8 *in, u32 *key_enc, u32 rounds); 56 extern void ppc_decrypt_aes(u8 *out, const u8 *in, u32 *key_dec, u32 rounds); 57 extern void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, 59 extern void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, 61 extern void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, 63 extern void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, 65 extern void ppc_crypt_ctr (u8 *out, const u8 *in, u32 *key_enc, u32 rounds, 67 extern void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, [all …]
|
/linux-4.1.27/arch/x86/crypto/ |
D | sha1_ssse3_glue.c | 38 unsigned int rounds); 41 unsigned int rounds); 47 unsigned int rounds); 96 unsigned int rounds) in sha1_apply_transform_avx2() argument 99 if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE) in sha1_apply_transform_avx2() 100 sha1_transform_avx2(digest, data, rounds); in sha1_apply_transform_avx2() 102 sha1_transform_avx(digest, data, rounds); in sha1_apply_transform_avx2()
|
D | sha256_ssse3_glue.c | 46 u64 rounds); 49 u64 rounds); 53 u64 rounds);
|
D | sha512_ssse3_glue.c | 45 u64 rounds); 48 u64 rounds); 52 u64 rounds);
|
D | sha512-ssse3-asm.S | 155 # Compute rounds t-2 and t-1 158 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 167 # The computation of the message schedule and the rounds are tightly 169 # For clarity, integer instructions (for the rounds calculation) are indented 264 movdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds 310 # (80 rounds) / (2 rounds/iteration) + (1 iteration) 319 movdqa %xmm0, WK_2(t) # Store into WK for rounds
|
D | sha512-avx-asm.S | 161 # Compute rounds t-2 and t-1 164 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 173 # The computation of the message schedule and the rounds are tightly 255 vmovdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds 311 # (80 rounds) / (2 rounds/iteration) + (1 iteration) 320 vmovdqa %xmm0, WK_2(t) # Store into WK for rounds
|
D | sha256-avx2-asm.S | 118 _XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round 595 ## schedule 48 input dwords, by doing 3 rounds of 12 each 621 ## Do last 16 rounds with no scheduling
|
D | sha1_ssse3_asm.S | 377 # vector iteration / 4 scalar rounds
|
D | aesni-intel_asm.S | 363 .irpc index, 1234 # do 4 rounds 376 .irpc index, 56789 # do next 5 rounds 584 .irpc index, 1234 # do 4 rounds 597 .irpc index, 56789 # do next 5 rounds
|
D | sha256-avx-asm.S | 392 ## schedule 48 input dwords, by doing 3 rounds of 16 each
|
D | sha256-ssse3-asm.S | 399 ## schedule 48 input dwords, by doing 3 rounds of 16 each
|
D | sha512-avx2-asm.S | 614 ## schedule 64 input dwords, by doing 12 rounds of 4 each
|
D | aesni-intel_avx-x86_64.S | 530 .rep 9 # do 9 rounds 1790 .rep 9 # do 9 rounds
|
/linux-4.1.27/tools/power/cpupower/bench/ |
D | benchmark.c | 50 unsigned int rounds = 0; in calculate_timespace() local 66 rounds = (unsigned int)(load * estimated / timed); in calculate_timespace() 67 dprintf("calibrating with %u rounds\n", rounds); in calculate_timespace() 69 ROUNDS(rounds); in calculate_timespace() 73 estimated = rounds; in calculate_timespace() 104 for (_round = 1; _round <= config->rounds; _round++) in start_benchmark() 108 for (_round = 0; _round < config->rounds; _round++) { in start_benchmark()
|
D | main.c | 139 sscanf(optarg, "%u", &config->rounds); in main() 187 config->rounds, in main()
|
D | README-BENCH | 47 rounds=5 66 This shows expected results of the first two test run rounds from 117 -r, --rounds<int> load/sleep rounds
|
D | example.cfg | 9 rounds = 40
|
D | parse.c | 134 config->rounds = 50; in prepare_default_config() 202 sscanf(val, "%u", &config->rounds); in prepare_config()
|
D | parse.h | 30 unsigned int rounds; /* calculation rounds with iterated sleep/load time */ member
|
D | system.c | 144 for (round = 0; round < config->rounds; round++) { in prepare_user()
|
/linux-4.1.27/arch/arm/crypto/ |
D | bsaes-armv7.pl | 57 my ($key,$rounds,$const)=("r4","r5","r6"); 759 sub $rounds,$rounds,#1 768 subs $rounds,$rounds,#1 856 sub $rounds,$rounds,#1 865 subs $rounds,$rounds,#1 896 my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6"); 943 sub $rounds,$rounds,#1 970 subs $rounds,$rounds,#1 1084 my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10)); 1116 ldr $rounds, [$key, #240] @ get # of rounds [all …]
|
D | aes-ce-glue.c | 28 int rounds, int blocks); 30 int rounds, int blocks); 33 int rounds, int blocks, u8 iv[]); 35 int rounds, int blocks, u8 iv[]); 38 int rounds, int blocks, u8 ctr[]); 41 int rounds, int blocks, u8 iv[], 44 int rounds, int blocks, u8 iv[], 307 int err, first, rounds = num_rounds(&ctx->key1); in xts_encrypt() local 318 (u8 *)ctx->key1.key_enc, rounds, blocks, in xts_encrypt() 332 int err, first, rounds = num_rounds(&ctx->key1); in xts_decrypt() local [all …]
|
D | sha512_neon_glue.c | 113 const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; in __sha512_neon_update() local 116 rounds); in __sha512_neon_update() 118 done += rounds * SHA512_BLOCK_SIZE; in __sha512_neon_update()
|
D | aes_glue.h | 6 int rounds; member
|
D | sha1_glue.c | 31 const unsigned char *data, unsigned int rounds);
|
D | sha1_neon_glue.c | 35 unsigned int rounds);
|
D | aesbs-core.S_shipped | 1081 ldr r10, [r3, #240] @ get # of rounds 1089 mov r5, r10 @ pass # of rounds 1104 mov r5, r10 @ pass # of rounds 1337 ldr r10, [r3, #240] @ get # of rounds 1345 mov r5, r10 @ pass # of rounds 1362 mov r5, r10 @ pass # of rounds 1407 mov r5, r10 @ pass rounds 1569 ldr r1, [r10, #240] @ get # of rounds 1579 mov r5, r1 @ pass # of rounds 1592 mov r5, r1 @ pass # of rounds [all …]
|
D | aes-ce-core.S | 102 blo 0f @ AES-128: 10 rounds 104 beq 1f @ AES-192: 12 rounds 154 .macro prepare_key, rk, rounds 155 add ip, \rk, \rounds, lsl #4
|
D | aes-armv4.S | 703 mov r12,r12,lsl#2 @ (rounds-1)*4
|
/linux-4.1.27/drivers/crypto/vmx/ |
D | aesp8-ppc.pl | 61 my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8)); 202 li $rounds,10 276 li $rounds,12 284 li $rounds,14 338 stw $rounds,0($out) 358 slwi $cnt,$rounds,4 360 srwi $rounds,$rounds,1 362 mtctr $rounds 400 my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7)); 404 lwz $rounds,240($key) [all …]
|
D | aesp8-ppc.h | 8 int rounds; member
|
/linux-4.1.27/arch/sparc/crypto/ |
D | md5_glue.c | 30 unsigned int rounds); 57 const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE; in __md5_sparc64_update() local 59 md5_sparc64_transform(sctx->hash, data + done, rounds); in __md5_sparc64_update() 60 done += rounds * MD5_HMAC_BLOCK_SIZE; in __md5_sparc64_update()
|
D | sha1_glue.c | 27 unsigned int rounds); 52 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; in __sha1_sparc64_update() local 54 sha1_sparc64_transform(sctx->state, data + done, rounds); in __sha1_sparc64_update() 55 done += rounds * SHA1_BLOCK_SIZE; in __sha1_sparc64_update()
|
D | sha256_glue.c | 27 unsigned int rounds); 73 const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE; in __sha256_sparc64_update() local 75 sha256_sparc64_transform(sctx->state, data + done, rounds); in __sha256_sparc64_update() 76 done += rounds * SHA256_BLOCK_SIZE; in __sha256_sparc64_update()
|
D | sha512_glue.c | 26 unsigned int rounds); 73 const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; in __sha512_sparc64_update() local 75 sha512_sparc64_transform(sctx->state, data + done, rounds); in __sha512_sparc64_update() 76 done += rounds * SHA512_BLOCK_SIZE; in __sha512_sparc64_update()
|
/linux-4.1.27/fs/reiserfs/ |
D | hashes.c | 24 #define TEACORE(rounds) \ argument 27 int n = rounds; \
|
/linux-4.1.27/drivers/crypto/ |
D | padlock-aes.c | 42 rounds:4, member 134 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; in aes_set_key() 135 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; in aes_set_key()
|
/linux-4.1.27/arch/x86/crypto/sha-mb/ |
D | sha1_x8_avx2.S | 370 ## do rounds 0...15 378 ## do rounds 16...19 387 ## do rounds 20...39 395 ## do rounds 40...59 403 ## do rounds 60...79
|
/linux-4.1.27/Documentation/ |
D | nommu-mmap.txt | 283 NOMMU mmap automatically rounds up to the nearest power-of-2 number of pages
|
/linux-4.1.27/crypto/ |
D | Kconfig | 1271 many rounds for security. It is very fast and uses
|