Searched refs:sctx (Results 1 - 52 of 52) sorted by relevance

/linux-4.1.27/include/crypto/
H A Dsha256_base.h23 struct sha256_state *sctx = shash_desc_ctx(desc); sha224_base_init() local
25 sctx->state[0] = SHA224_H0; sha224_base_init()
26 sctx->state[1] = SHA224_H1; sha224_base_init()
27 sctx->state[2] = SHA224_H2; sha224_base_init()
28 sctx->state[3] = SHA224_H3; sha224_base_init()
29 sctx->state[4] = SHA224_H4; sha224_base_init()
30 sctx->state[5] = SHA224_H5; sha224_base_init()
31 sctx->state[6] = SHA224_H6; sha224_base_init()
32 sctx->state[7] = SHA224_H7; sha224_base_init()
33 sctx->count = 0; sha224_base_init()
40 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_init() local
42 sctx->state[0] = SHA256_H0; sha256_base_init()
43 sctx->state[1] = SHA256_H1; sha256_base_init()
44 sctx->state[2] = SHA256_H2; sha256_base_init()
45 sctx->state[3] = SHA256_H3; sha256_base_init()
46 sctx->state[4] = SHA256_H4; sha256_base_init()
47 sctx->state[5] = SHA256_H5; sha256_base_init()
48 sctx->state[6] = SHA256_H6; sha256_base_init()
49 sctx->state[7] = SHA256_H7; sha256_base_init()
50 sctx->count = 0; sha256_base_init()
60 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_do_update() local
61 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; sha256_base_do_update()
63 sctx->count += len; sha256_base_do_update()
71 memcpy(sctx->buf + partial, data, p); sha256_base_do_update()
75 block_fn(sctx, sctx->buf, 1); sha256_base_do_update()
82 block_fn(sctx, data, blocks); sha256_base_do_update()
88 memcpy(sctx->buf + partial, data, len); sha256_base_do_update()
97 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_do_finalize() local
98 __be64 *bits = (__be64 *)(sctx->buf + bit_offset); sha256_base_do_finalize()
99 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; sha256_base_do_finalize()
101 sctx->buf[partial++] = 0x80; sha256_base_do_finalize()
103 memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); sha256_base_do_finalize()
106 block_fn(sctx, sctx->buf, 1); sha256_base_do_finalize()
109 memset(sctx->buf + partial, 0x0, bit_offset - partial); sha256_base_do_finalize()
110 *bits = cpu_to_be64(sctx->count << 3); sha256_base_do_finalize()
111 block_fn(sctx, sctx->buf, 1); sha256_base_do_finalize()
119 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_finish() local
124 put_unaligned_be32(sctx->state[i], digest++); sha256_base_finish()
126 *sctx = (struct sha256_state){}; sha256_base_finish()
H A Dsha512_base.h23 struct sha512_state *sctx = shash_desc_ctx(desc); sha384_base_init() local
25 sctx->state[0] = SHA384_H0; sha384_base_init()
26 sctx->state[1] = SHA384_H1; sha384_base_init()
27 sctx->state[2] = SHA384_H2; sha384_base_init()
28 sctx->state[3] = SHA384_H3; sha384_base_init()
29 sctx->state[4] = SHA384_H4; sha384_base_init()
30 sctx->state[5] = SHA384_H5; sha384_base_init()
31 sctx->state[6] = SHA384_H6; sha384_base_init()
32 sctx->state[7] = SHA384_H7; sha384_base_init()
33 sctx->count[0] = sctx->count[1] = 0; sha384_base_init()
40 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_init() local
42 sctx->state[0] = SHA512_H0; sha512_base_init()
43 sctx->state[1] = SHA512_H1; sha512_base_init()
44 sctx->state[2] = SHA512_H2; sha512_base_init()
45 sctx->state[3] = SHA512_H3; sha512_base_init()
46 sctx->state[4] = SHA512_H4; sha512_base_init()
47 sctx->state[5] = SHA512_H5; sha512_base_init()
48 sctx->state[6] = SHA512_H6; sha512_base_init()
49 sctx->state[7] = SHA512_H7; sha512_base_init()
50 sctx->count[0] = sctx->count[1] = 0; sha512_base_init()
60 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_do_update() local
61 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; sha512_base_do_update()
63 sctx->count[0] += len; sha512_base_do_update()
64 if (sctx->count[0] < len) sha512_base_do_update()
65 sctx->count[1]++; sha512_base_do_update()
73 memcpy(sctx->buf + partial, data, p); sha512_base_do_update()
77 block_fn(sctx, sctx->buf, 1); sha512_base_do_update()
84 block_fn(sctx, data, blocks); sha512_base_do_update()
90 memcpy(sctx->buf + partial, data, len); sha512_base_do_update()
99 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_do_finalize() local
100 __be64 *bits = (__be64 *)(sctx->buf + bit_offset); sha512_base_do_finalize()
101 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; sha512_base_do_finalize()
103 sctx->buf[partial++] = 0x80; sha512_base_do_finalize()
105 memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); sha512_base_do_finalize()
108 block_fn(sctx, sctx->buf, 1); sha512_base_do_finalize()
111 memset(sctx->buf + partial, 0x0, bit_offset - partial); sha512_base_do_finalize()
112 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); sha512_base_do_finalize()
113 bits[1] = cpu_to_be64(sctx->count[0] << 3); sha512_base_do_finalize()
114 block_fn(sctx, sctx->buf, 1); sha512_base_do_finalize()
122 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_finish() local
127 put_unaligned_be64(sctx->state[i], digest++); sha512_base_finish()
129 *sctx = (struct sha512_state){}; sha512_base_finish()
H A Dsha1_base.h22 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_init() local
24 sctx->state[0] = SHA1_H0; sha1_base_init()
25 sctx->state[1] = SHA1_H1; sha1_base_init()
26 sctx->state[2] = SHA1_H2; sha1_base_init()
27 sctx->state[3] = SHA1_H3; sha1_base_init()
28 sctx->state[4] = SHA1_H4; sha1_base_init()
29 sctx->count = 0; sha1_base_init()
39 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_do_update() local
40 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; sha1_base_do_update()
42 sctx->count += len; sha1_base_do_update()
50 memcpy(sctx->buffer + partial, data, p); sha1_base_do_update()
54 block_fn(sctx, sctx->buffer, 1); sha1_base_do_update()
61 block_fn(sctx, data, blocks); sha1_base_do_update()
67 memcpy(sctx->buffer + partial, data, len); sha1_base_do_update()
76 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_do_finalize() local
77 __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); sha1_base_do_finalize()
78 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; sha1_base_do_finalize()
80 sctx->buffer[partial++] = 0x80; sha1_base_do_finalize()
82 memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial); sha1_base_do_finalize()
85 block_fn(sctx, sctx->buffer, 1); sha1_base_do_finalize()
88 memset(sctx->buffer + partial, 0x0, bit_offset - partial); sha1_base_do_finalize()
89 *bits = cpu_to_be64(sctx->count << 3); sha1_base_do_finalize()
90 block_fn(sctx, sctx->buffer, 1); sha1_base_do_finalize()
97 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_finish() local
102 put_unaligned_be32(sctx->state[i], digest++); sha1_base_finish()
104 *sctx = (struct sha1_state){}; sha1_base_finish()
/linux-4.1.27/arch/powerpc/crypto/
H A Dmd5-glue.c26 static inline void ppc_md5_clear_context(struct md5_state *sctx) ppc_md5_clear_context() argument
29 u32 *ptr = (u32 *)sctx; ppc_md5_clear_context()
38 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_init() local
40 sctx->hash[0] = 0x67452301; ppc_md5_init()
41 sctx->hash[1] = 0xefcdab89; ppc_md5_init()
42 sctx->hash[2] = 0x98badcfe; ppc_md5_init()
43 sctx->hash[3] = 0x10325476; ppc_md5_init()
44 sctx->byte_count = 0; ppc_md5_init()
52 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_update() local
53 const unsigned int offset = sctx->byte_count & 0x3f; ppc_md5_update()
57 sctx->byte_count += len; ppc_md5_update()
60 memcpy((char *)sctx->block + offset, src, len); ppc_md5_update()
65 memcpy((char *)sctx->block + offset, src, avail); ppc_md5_update()
66 ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1); ppc_md5_update()
72 ppc_md5_transform(sctx->hash, src, len >> 6); ppc_md5_update()
77 memcpy((char *)sctx->block, src, len); ppc_md5_update()
83 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_final() local
84 const unsigned int offset = sctx->byte_count & 0x3f; ppc_md5_final()
85 const u8 *src = (const u8 *)sctx->block; ppc_md5_final()
88 __le64 *pbits = (__le64 *)((char *)sctx->block + 56); ppc_md5_final()
95 ppc_md5_transform(sctx->hash, src, 1); ppc_md5_final()
96 p = (char *)sctx->block; ppc_md5_final()
101 *pbits = cpu_to_le64(sctx->byte_count << 3); ppc_md5_final()
102 ppc_md5_transform(sctx->hash, src, 1); ppc_md5_final()
104 dst[0] = cpu_to_le32(sctx->hash[0]); ppc_md5_final()
105 dst[1] = cpu_to_le32(sctx->hash[1]); ppc_md5_final()
106 dst[2] = cpu_to_le32(sctx->hash[2]); ppc_md5_final()
107 dst[3] = cpu_to_le32(sctx->hash[3]); ppc_md5_final()
109 ppc_md5_clear_context(sctx); ppc_md5_final()
115 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_export() local
117 memcpy(out, sctx, sizeof(*sctx)); ppc_md5_export()
123 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_import() local
125 memcpy(sctx, in, sizeof(*sctx)); ppc_md5_import()
H A Dsha1-spe-glue.c53 static inline void ppc_sha1_clear_context(struct sha1_state *sctx) ppc_sha1_clear_context() argument
56 u32 *ptr = (u32 *)sctx; ppc_sha1_clear_context()
65 struct sha1_state *sctx = shash_desc_ctx(desc); ppc_spe_sha1_init() local
67 sctx->state[0] = SHA1_H0; ppc_spe_sha1_init()
68 sctx->state[1] = SHA1_H1; ppc_spe_sha1_init()
69 sctx->state[2] = SHA1_H2; ppc_spe_sha1_init()
70 sctx->state[3] = SHA1_H3; ppc_spe_sha1_init()
71 sctx->state[4] = SHA1_H4; ppc_spe_sha1_init()
72 sctx->count = 0; ppc_spe_sha1_init()
80 struct sha1_state *sctx = shash_desc_ctx(desc); ppc_spe_sha1_update() local
81 const unsigned int offset = sctx->count & 0x3f; ppc_spe_sha1_update()
87 sctx->count += len; ppc_spe_sha1_update()
88 memcpy((char *)sctx->buffer + offset, src, len); ppc_spe_sha1_update()
92 sctx->count += len; ppc_spe_sha1_update()
95 memcpy((char *)sctx->buffer + offset, src, avail); ppc_spe_sha1_update()
98 ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1); ppc_spe_sha1_update()
110 ppc_spe_sha1_transform(sctx->state, src, bytes >> 6); ppc_spe_sha1_update()
117 memcpy((char *)sctx->buffer, src, len); ppc_spe_sha1_update()
123 struct sha1_state *sctx = shash_desc_ctx(desc); ppc_spe_sha1_final() local
124 const unsigned int offset = sctx->count & 0x3f; ppc_spe_sha1_final()
125 char *p = (char *)sctx->buffer + offset; ppc_spe_sha1_final()
127 __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56); ppc_spe_sha1_final()
137 ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); ppc_spe_sha1_final()
138 p = (char *)sctx->buffer; ppc_spe_sha1_final()
143 *pbits = cpu_to_be64(sctx->count << 3); ppc_spe_sha1_final()
144 ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); ppc_spe_sha1_final()
148 dst[0] = cpu_to_be32(sctx->state[0]); ppc_spe_sha1_final()
149 dst[1] = cpu_to_be32(sctx->state[1]); ppc_spe_sha1_final()
150 dst[2] = cpu_to_be32(sctx->state[2]); ppc_spe_sha1_final()
151 dst[3] = cpu_to_be32(sctx->state[3]); ppc_spe_sha1_final()
152 dst[4] = cpu_to_be32(sctx->state[4]); ppc_spe_sha1_final()
154 ppc_sha1_clear_context(sctx); ppc_spe_sha1_final()
160 struct sha1_state *sctx = shash_desc_ctx(desc); ppc_spe_sha1_export() local
162 memcpy(out, sctx, sizeof(*sctx)); ppc_spe_sha1_export()
168 struct sha1_state *sctx = shash_desc_ctx(desc); ppc_spe_sha1_import() local
170 memcpy(sctx, in, sizeof(*sctx)); ppc_spe_sha1_import()
H A Dsha256-spe-glue.c54 static inline void ppc_sha256_clear_context(struct sha256_state *sctx) ppc_sha256_clear_context() argument
57 u32 *ptr = (u32 *)sctx; ppc_sha256_clear_context()
66 struct sha256_state *sctx = shash_desc_ctx(desc); ppc_spe_sha256_init() local
68 sctx->state[0] = SHA256_H0; ppc_spe_sha256_init()
69 sctx->state[1] = SHA256_H1; ppc_spe_sha256_init()
70 sctx->state[2] = SHA256_H2; ppc_spe_sha256_init()
71 sctx->state[3] = SHA256_H3; ppc_spe_sha256_init()
72 sctx->state[4] = SHA256_H4; ppc_spe_sha256_init()
73 sctx->state[5] = SHA256_H5; ppc_spe_sha256_init()
74 sctx->state[6] = SHA256_H6; ppc_spe_sha256_init()
75 sctx->state[7] = SHA256_H7; ppc_spe_sha256_init()
76 sctx->count = 0; ppc_spe_sha256_init()
83 struct sha256_state *sctx = shash_desc_ctx(desc); ppc_spe_sha224_init() local
85 sctx->state[0] = SHA224_H0; ppc_spe_sha224_init()
86 sctx->state[1] = SHA224_H1; ppc_spe_sha224_init()
87 sctx->state[2] = SHA224_H2; ppc_spe_sha224_init()
88 sctx->state[3] = SHA224_H3; ppc_spe_sha224_init()
89 sctx->state[4] = SHA224_H4; ppc_spe_sha224_init()
90 sctx->state[5] = SHA224_H5; ppc_spe_sha224_init()
91 sctx->state[6] = SHA224_H6; ppc_spe_sha224_init()
92 sctx->state[7] = SHA224_H7; ppc_spe_sha224_init()
93 sctx->count = 0; ppc_spe_sha224_init()
101 struct sha256_state *sctx = shash_desc_ctx(desc); ppc_spe_sha256_update() local
102 const unsigned int offset = sctx->count & 0x3f; ppc_spe_sha256_update()
108 sctx->count += len; ppc_spe_sha256_update()
109 memcpy((char *)sctx->buf + offset, src, len); ppc_spe_sha256_update()
113 sctx->count += len; ppc_spe_sha256_update()
116 memcpy((char *)sctx->buf + offset, src, avail); ppc_spe_sha256_update()
119 ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1); ppc_spe_sha256_update()
132 ppc_spe_sha256_transform(sctx->state, src, bytes >> 6); ppc_spe_sha256_update()
139 memcpy((char *)sctx->buf, src, len); ppc_spe_sha256_update()
145 struct sha256_state *sctx = shash_desc_ctx(desc); ppc_spe_sha256_final() local
146 const unsigned int offset = sctx->count & 0x3f; ppc_spe_sha256_final()
147 char *p = (char *)sctx->buf + offset; ppc_spe_sha256_final()
149 __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56); ppc_spe_sha256_final()
159 ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); ppc_spe_sha256_final()
160 p = (char *)sctx->buf; ppc_spe_sha256_final()
165 *pbits = cpu_to_be64(sctx->count << 3); ppc_spe_sha256_final()
166 ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); ppc_spe_sha256_final()
170 dst[0] = cpu_to_be32(sctx->state[0]); ppc_spe_sha256_final()
171 dst[1] = cpu_to_be32(sctx->state[1]); ppc_spe_sha256_final()
172 dst[2] = cpu_to_be32(sctx->state[2]); ppc_spe_sha256_final()
173 dst[3] = cpu_to_be32(sctx->state[3]); ppc_spe_sha256_final()
174 dst[4] = cpu_to_be32(sctx->state[4]); ppc_spe_sha256_final()
175 dst[5] = cpu_to_be32(sctx->state[5]); ppc_spe_sha256_final()
176 dst[6] = cpu_to_be32(sctx->state[6]); ppc_spe_sha256_final()
177 dst[7] = cpu_to_be32(sctx->state[7]); ppc_spe_sha256_final()
179 ppc_sha256_clear_context(sctx); ppc_spe_sha256_final()
206 struct sha256_state *sctx = shash_desc_ctx(desc); ppc_spe_sha256_export() local
208 memcpy(out, sctx, sizeof(*sctx)); ppc_spe_sha256_export()
214 struct sha256_state *sctx = shash_desc_ctx(desc); ppc_spe_sha256_import() local
216 memcpy(sctx, in, sizeof(*sctx)); ppc_spe_sha256_import()
H A Dsha1.c33 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_init() local
35 *sctx = (struct sha1_state){ sha1_init()
45 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_update() local
49 partial = sctx->count & 0x3f; sha1_update()
50 sctx->count += len; sha1_update()
59 memcpy(sctx->buffer + partial, data, done + 64); sha1_update()
60 src = sctx->buffer; sha1_update()
64 powerpc_sha_transform(sctx->state, src, temp); sha1_update()
72 memcpy(sctx->buffer + partial, src, len - done); sha1_update()
81 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_final() local
87 bits = cpu_to_be64(sctx->count << 3); sha1_final()
90 index = sctx->count & 0x3f; sha1_final()
99 dst[i] = cpu_to_be32(sctx->state[i]); sha1_final()
102 memset(sctx, 0, sizeof *sctx); sha1_final()
109 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_export() local
111 memcpy(out, sctx, sizeof(*sctx)); sha1_export()
117 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_import() local
119 memcpy(sctx, in, sizeof(*sctx)); sha1_import()
/linux-4.1.27/arch/sparc/crypto/
H A Dsha512_glue.c30 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_sparc64_init() local
31 sctx->state[0] = SHA512_H0; sha512_sparc64_init()
32 sctx->state[1] = SHA512_H1; sha512_sparc64_init()
33 sctx->state[2] = SHA512_H2; sha512_sparc64_init()
34 sctx->state[3] = SHA512_H3; sha512_sparc64_init()
35 sctx->state[4] = SHA512_H4; sha512_sparc64_init()
36 sctx->state[5] = SHA512_H5; sha512_sparc64_init()
37 sctx->state[6] = SHA512_H6; sha512_sparc64_init()
38 sctx->state[7] = SHA512_H7; sha512_sparc64_init()
39 sctx->count[0] = sctx->count[1] = 0; sha512_sparc64_init()
46 struct sha512_state *sctx = shash_desc_ctx(desc); sha384_sparc64_init() local
47 sctx->state[0] = SHA384_H0; sha384_sparc64_init()
48 sctx->state[1] = SHA384_H1; sha384_sparc64_init()
49 sctx->state[2] = SHA384_H2; sha384_sparc64_init()
50 sctx->state[3] = SHA384_H3; sha384_sparc64_init()
51 sctx->state[4] = SHA384_H4; sha384_sparc64_init()
52 sctx->state[5] = SHA384_H5; sha384_sparc64_init()
53 sctx->state[6] = SHA384_H6; sha384_sparc64_init()
54 sctx->state[7] = SHA384_H7; sha384_sparc64_init()
55 sctx->count[0] = sctx->count[1] = 0; sha384_sparc64_init()
60 static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data, __sha512_sparc64_update() argument
65 if ((sctx->count[0] += len) < len) __sha512_sparc64_update()
66 sctx->count[1]++; __sha512_sparc64_update()
69 memcpy(sctx->buf + partial, data, done); __sha512_sparc64_update()
70 sha512_sparc64_transform(sctx->state, sctx->buf, 1); __sha512_sparc64_update()
75 sha512_sparc64_transform(sctx->state, data + done, rounds); __sha512_sparc64_update()
79 memcpy(sctx->buf, data + done, len - done); __sha512_sparc64_update()
85 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_sparc64_update() local
86 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; sha512_sparc64_update()
90 if ((sctx->count[0] += len) < len) sha512_sparc64_update()
91 sctx->count[1]++; sha512_sparc64_update()
92 memcpy(sctx->buf + partial, data, len); sha512_sparc64_update()
94 __sha512_sparc64_update(sctx, data, len, partial); sha512_sparc64_update()
101 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_sparc64_final() local
108 bits[1] = cpu_to_be64(sctx->count[0] << 3); sha512_sparc64_final()
109 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); sha512_sparc64_final()
112 index = sctx->count[0] % SHA512_BLOCK_SIZE; sha512_sparc64_final()
117 if ((sctx->count[0] += padlen) < padlen) sha512_sparc64_final()
118 sctx->count[1]++; sha512_sparc64_final()
119 memcpy(sctx->buf + index, padding, padlen); sha512_sparc64_final()
121 __sha512_sparc64_update(sctx, padding, padlen, index); sha512_sparc64_final()
123 __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112); sha512_sparc64_final()
127 dst[i] = cpu_to_be64(sctx->state[i]); sha512_sparc64_final()
130 memset(sctx, 0, sizeof(*sctx)); sha512_sparc64_final()
H A Dsha256_glue.c31 struct sha256_state *sctx = shash_desc_ctx(desc); sha224_sparc64_init() local
32 sctx->state[0] = SHA224_H0; sha224_sparc64_init()
33 sctx->state[1] = SHA224_H1; sha224_sparc64_init()
34 sctx->state[2] = SHA224_H2; sha224_sparc64_init()
35 sctx->state[3] = SHA224_H3; sha224_sparc64_init()
36 sctx->state[4] = SHA224_H4; sha224_sparc64_init()
37 sctx->state[5] = SHA224_H5; sha224_sparc64_init()
38 sctx->state[6] = SHA224_H6; sha224_sparc64_init()
39 sctx->state[7] = SHA224_H7; sha224_sparc64_init()
40 sctx->count = 0; sha224_sparc64_init()
47 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_init() local
48 sctx->state[0] = SHA256_H0; sha256_sparc64_init()
49 sctx->state[1] = SHA256_H1; sha256_sparc64_init()
50 sctx->state[2] = SHA256_H2; sha256_sparc64_init()
51 sctx->state[3] = SHA256_H3; sha256_sparc64_init()
52 sctx->state[4] = SHA256_H4; sha256_sparc64_init()
53 sctx->state[5] = SHA256_H5; sha256_sparc64_init()
54 sctx->state[6] = SHA256_H6; sha256_sparc64_init()
55 sctx->state[7] = SHA256_H7; sha256_sparc64_init()
56 sctx->count = 0; sha256_sparc64_init()
61 static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data, __sha256_sparc64_update() argument
66 sctx->count += len; __sha256_sparc64_update()
69 memcpy(sctx->buf + partial, data, done); __sha256_sparc64_update()
70 sha256_sparc64_transform(sctx->state, sctx->buf, 1); __sha256_sparc64_update()
75 sha256_sparc64_transform(sctx->state, data + done, rounds); __sha256_sparc64_update()
79 memcpy(sctx->buf, data + done, len - done); __sha256_sparc64_update()
85 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_update() local
86 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; sha256_sparc64_update()
90 sctx->count += len; sha256_sparc64_update()
91 memcpy(sctx->buf + partial, data, len); sha256_sparc64_update()
93 __sha256_sparc64_update(sctx, data, len, partial); sha256_sparc64_update()
100 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_final() local
106 bits = cpu_to_be64(sctx->count << 3); sha256_sparc64_final()
109 index = sctx->count % SHA256_BLOCK_SIZE; sha256_sparc64_final()
114 sctx->count += padlen; sha256_sparc64_final()
115 memcpy(sctx->buf + index, padding, padlen); sha256_sparc64_final()
117 __sha256_sparc64_update(sctx, padding, padlen, index); sha256_sparc64_final()
119 __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); sha256_sparc64_final()
123 dst[i] = cpu_to_be32(sctx->state[i]); sha256_sparc64_final()
126 memset(sctx, 0, sizeof(*sctx)); sha256_sparc64_final()
145 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_export() local
147 memcpy(out, sctx, sizeof(*sctx)); sha256_sparc64_export()
153 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_import() local
155 memcpy(sctx, in, sizeof(*sctx)); sha256_sparc64_import()
H A Dsha1_glue.c31 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_sparc64_init() local
33 *sctx = (struct sha1_state){ sha1_sparc64_init()
40 static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data, __sha1_sparc64_update() argument
45 sctx->count += len; __sha1_sparc64_update()
48 memcpy(sctx->buffer + partial, data, done); __sha1_sparc64_update()
49 sha1_sparc64_transform(sctx->state, sctx->buffer, 1); __sha1_sparc64_update()
54 sha1_sparc64_transform(sctx->state, data + done, rounds); __sha1_sparc64_update()
58 memcpy(sctx->buffer, data + done, len - done); __sha1_sparc64_update()
64 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_sparc64_update() local
65 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; sha1_sparc64_update()
69 sctx->count += len; sha1_sparc64_update()
70 memcpy(sctx->buffer + partial, data, len); sha1_sparc64_update()
72 __sha1_sparc64_update(sctx, data, len, partial); sha1_sparc64_update()
80 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_sparc64_final() local
86 bits = cpu_to_be64(sctx->count << 3); sha1_sparc64_final()
89 index = sctx->count % SHA1_BLOCK_SIZE; sha1_sparc64_final()
94 sctx->count += padlen; sha1_sparc64_final()
95 memcpy(sctx->buffer + index, padding, padlen); sha1_sparc64_final()
97 __sha1_sparc64_update(sctx, padding, padlen, index); sha1_sparc64_final()
99 __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); sha1_sparc64_final()
103 dst[i] = cpu_to_be32(sctx->state[i]); sha1_sparc64_final()
106 memset(sctx, 0, sizeof(*sctx)); sha1_sparc64_final()
113 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_sparc64_export() local
115 memcpy(out, sctx, sizeof(*sctx)); sha1_sparc64_export()
122 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_sparc64_import() local
124 memcpy(sctx, in, sizeof(*sctx)); sha1_sparc64_import()
H A Dmd5_glue.c45 static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data, __md5_sparc64_update() argument
50 sctx->byte_count += len; __md5_sparc64_update()
53 memcpy((u8 *)sctx->block + partial, data, done); __md5_sparc64_update()
54 md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1); __md5_sparc64_update()
59 md5_sparc64_transform(sctx->hash, data + done, rounds); __md5_sparc64_update()
63 memcpy(sctx->block, data + done, len - done); __md5_sparc64_update()
69 struct md5_state *sctx = shash_desc_ctx(desc); md5_sparc64_update() local
70 unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; md5_sparc64_update()
74 sctx->byte_count += len; md5_sparc64_update()
75 memcpy((u8 *)sctx->block + partial, data, len); md5_sparc64_update()
77 __md5_sparc64_update(sctx, data, len, partial); md5_sparc64_update()
85 struct md5_state *sctx = shash_desc_ctx(desc); md5_sparc64_final() local
91 bits = cpu_to_le64(sctx->byte_count << 3); md5_sparc64_final()
94 index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; md5_sparc64_final()
99 sctx->byte_count += padlen; md5_sparc64_final()
100 memcpy((u8 *)sctx->block + index, padding, padlen); md5_sparc64_final()
102 __md5_sparc64_update(sctx, padding, padlen, index); md5_sparc64_final()
104 __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); md5_sparc64_final()
108 dst[i] = sctx->hash[i]; md5_sparc64_final()
111 memset(sctx, 0, sizeof(*sctx)); md5_sparc64_final()
118 struct md5_state *sctx = shash_desc_ctx(desc); md5_sparc64_export() local
120 memcpy(out, sctx, sizeof(*sctx)); md5_sparc64_export()
127 struct md5_state *sctx = shash_desc_ctx(desc); md5_sparc64_import() local
129 memcpy(sctx, in, sizeof(*sctx)); md5_sparc64_import()
/linux-4.1.27/arch/s390/crypto/
H A Dsha256_s390.c26 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha256_init() local
28 sctx->state[0] = SHA256_H0; sha256_init()
29 sctx->state[1] = SHA256_H1; sha256_init()
30 sctx->state[2] = SHA256_H2; sha256_init()
31 sctx->state[3] = SHA256_H3; sha256_init()
32 sctx->state[4] = SHA256_H4; sha256_init()
33 sctx->state[5] = SHA256_H5; sha256_init()
34 sctx->state[6] = SHA256_H6; sha256_init()
35 sctx->state[7] = SHA256_H7; sha256_init()
36 sctx->count = 0; sha256_init()
37 sctx->func = KIMD_SHA_256; sha256_init()
44 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha256_export() local
47 octx->count = sctx->count; sha256_export()
48 memcpy(octx->state, sctx->state, sizeof(octx->state)); sha256_export()
49 memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); sha256_export()
55 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha256_import() local
58 sctx->count = ictx->count; sha256_import()
59 memcpy(sctx->state, ictx->state, sizeof(ictx->state)); sha256_import()
60 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sha256_import()
61 sctx->func = KIMD_SHA_256; sha256_import()
86 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha224_init() local
88 sctx->state[0] = SHA224_H0; sha224_init()
89 sctx->state[1] = SHA224_H1; sha224_init()
90 sctx->state[2] = SHA224_H2; sha224_init()
91 sctx->state[3] = SHA224_H3; sha224_init()
92 sctx->state[4] = SHA224_H4; sha224_init()
93 sctx->state[5] = SHA224_H5; sha224_init()
94 sctx->state[6] = SHA224_H6; sha224_init()
95 sctx->state[7] = SHA224_H7; sha224_init()
96 sctx->count = 0; sha224_init()
97 sctx->func = KIMD_SHA_256; sha224_init()
H A Dsha1_s390.c36 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha1_init() local
38 sctx->state[0] = SHA1_H0; sha1_init()
39 sctx->state[1] = SHA1_H1; sha1_init()
40 sctx->state[2] = SHA1_H2; sha1_init()
41 sctx->state[3] = SHA1_H3; sha1_init()
42 sctx->state[4] = SHA1_H4; sha1_init()
43 sctx->count = 0; sha1_init()
44 sctx->func = KIMD_SHA_1; sha1_init()
51 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha1_export() local
54 octx->count = sctx->count; sha1_export()
55 memcpy(octx->state, sctx->state, sizeof(octx->state)); sha1_export()
56 memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); sha1_export()
62 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha1_import() local
65 sctx->count = ictx->count; sha1_import()
66 memcpy(sctx->state, ictx->state, sizeof(ictx->state)); sha1_import()
67 memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); sha1_import()
68 sctx->func = KIMD_SHA_1; sha1_import()
H A Daes_s390.c97 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); setkey_fallback_cip() local
100 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; setkey_fallback_cip()
101 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & setkey_fallback_cip()
104 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); setkey_fallback_cip()
107 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & setkey_fallback_cip()
116 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); aes_set_key() local
126 sctx->key_len = key_len; aes_set_key()
128 memcpy(sctx->key, in_key, key_len); aes_set_key()
137 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); aes_encrypt() local
139 if (unlikely(need_fallback(sctx->key_len))) { aes_encrypt()
140 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); aes_encrypt()
144 switch (sctx->key_len) { aes_encrypt()
146 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, aes_encrypt()
150 crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, aes_encrypt()
154 crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, aes_encrypt()
162 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); aes_decrypt() local
164 if (unlikely(need_fallback(sctx->key_len))) { aes_decrypt()
165 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); aes_decrypt()
169 switch (sctx->key_len) { aes_decrypt()
171 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, aes_decrypt()
175 crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, aes_decrypt()
179 crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, aes_decrypt()
188 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); fallback_init_cip() local
190 sctx->fallback.cip = crypto_alloc_cipher(name, 0, fallback_init_cip()
193 if (IS_ERR(sctx->fallback.cip)) { fallback_init_cip()
196 return PTR_ERR(sctx->fallback.cip); fallback_init_cip()
204 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); fallback_exit_cip() local
206 crypto_free_cipher(sctx->fallback.cip); fallback_exit_cip()
207 sctx->fallback.cip = NULL; fallback_exit_cip()
235 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); setkey_fallback_blk() local
238 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; setkey_fallback_blk()
239 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & setkey_fallback_blk()
242 ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len); setkey_fallback_blk()
245 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & setkey_fallback_blk()
257 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); fallback_blk_dec() local
260 desc->tfm = sctx->fallback.blk; fallback_blk_dec()
274 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); fallback_blk_enc() local
277 desc->tfm = sctx->fallback.blk; fallback_blk_enc()
288 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); ecb_aes_set_key() local
293 sctx->key_len = key_len; ecb_aes_set_key()
299 sctx->enc = KM_AES_128_ENCRYPT; ecb_aes_set_key()
300 sctx->dec = KM_AES_128_DECRYPT; ecb_aes_set_key()
303 sctx->enc = KM_AES_192_ENCRYPT; ecb_aes_set_key()
304 sctx->dec = KM_AES_192_DECRYPT; ecb_aes_set_key()
307 sctx->enc = KM_AES_256_ENCRYPT; ecb_aes_set_key()
308 sctx->dec = KM_AES_256_DECRYPT; ecb_aes_set_key()
342 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ecb_aes_encrypt() local
345 if (unlikely(need_fallback(sctx->key_len))) ecb_aes_encrypt()
349 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); ecb_aes_encrypt()
356 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ecb_aes_decrypt() local
359 if (unlikely(need_fallback(sctx->key_len))) ecb_aes_decrypt()
363 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); ecb_aes_decrypt()
369 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); fallback_init_blk() local
371 sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, fallback_init_blk()
374 if (IS_ERR(sctx->fallback.blk)) { fallback_init_blk()
377 return PTR_ERR(sctx->fallback.blk); fallback_init_blk()
385 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); fallback_exit_blk() local
387 crypto_free_blkcipher(sctx->fallback.blk); fallback_exit_blk()
388 sctx->fallback.blk = NULL; fallback_exit_blk()
417 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); cbc_aes_set_key() local
422 sctx->key_len = key_len; cbc_aes_set_key()
428 sctx->enc = KMC_AES_128_ENCRYPT; cbc_aes_set_key()
429 sctx->dec = KMC_AES_128_DECRYPT; cbc_aes_set_key()
432 sctx->enc = KMC_AES_192_ENCRYPT; cbc_aes_set_key()
433 sctx->dec = KMC_AES_192_DECRYPT; cbc_aes_set_key()
436 sctx->enc = KMC_AES_256_ENCRYPT; cbc_aes_set_key()
437 sctx->dec = KMC_AES_256_DECRYPT; cbc_aes_set_key()
447 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_crypt() local
459 memcpy(param.key, sctx->key, sctx->key_len); cbc_aes_crypt()
483 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_encrypt() local
486 if (unlikely(need_fallback(sctx->key_len))) cbc_aes_encrypt()
490 return cbc_aes_crypt(desc, sctx->enc, &walk); cbc_aes_encrypt()
497 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_decrypt() local
500 if (unlikely(need_fallback(sctx->key_len))) cbc_aes_decrypt()
504 return cbc_aes_crypt(desc, sctx->dec, &walk); cbc_aes_decrypt()
743 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); ctr_aes_set_key() local
747 sctx->enc = KMCTR_AES_128_ENCRYPT; ctr_aes_set_key()
748 sctx->dec = KMCTR_AES_128_DECRYPT; ctr_aes_set_key()
751 sctx->enc = KMCTR_AES_192_ENCRYPT; ctr_aes_set_key()
752 sctx->dec = KMCTR_AES_192_DECRYPT; ctr_aes_set_key()
755 sctx->enc = KMCTR_AES_256_ENCRYPT; ctr_aes_set_key()
756 sctx->dec = KMCTR_AES_256_DECRYPT; ctr_aes_set_key()
778 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) ctr_aes_crypt()
800 ret = crypt_s390_kmctr(func, sctx->key, out, in, ctr_aes_crypt()
833 ret = crypt_s390_kmctr(func, sctx->key, buf, in, ctr_aes_crypt()
850 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ctr_aes_encrypt() local
854 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk); ctr_aes_encrypt()
861 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ctr_aes_decrypt() local
865 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk); ctr_aes_decrypt()
777 ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) ctr_aes_crypt() argument
H A Dsha512_s390.c45 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha512_export() local
48 octx->count[0] = sctx->count; sha512_export()
50 memcpy(octx->state, sctx->state, sizeof(octx->state)); sha512_export()
51 memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); sha512_export()
57 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha512_import() local
62 sctx->count = ictx->count[0]; sha512_import()
64 memcpy(sctx->state, ictx->state, sizeof(ictx->state)); sha512_import()
65 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sha512_import()
66 sctx->func = KIMD_SHA_512; sha512_import()
/linux-4.1.27/arch/mips/cavium-octeon/crypto/
H A Docteon-sha512.c35 static void octeon_sha512_store_hash(struct sha512_state *sctx) octeon_sha512_store_hash() argument
37 write_octeon_64bit_hash_sha512(sctx->state[0], 0); octeon_sha512_store_hash()
38 write_octeon_64bit_hash_sha512(sctx->state[1], 1); octeon_sha512_store_hash()
39 write_octeon_64bit_hash_sha512(sctx->state[2], 2); octeon_sha512_store_hash()
40 write_octeon_64bit_hash_sha512(sctx->state[3], 3); octeon_sha512_store_hash()
41 write_octeon_64bit_hash_sha512(sctx->state[4], 4); octeon_sha512_store_hash()
42 write_octeon_64bit_hash_sha512(sctx->state[5], 5); octeon_sha512_store_hash()
43 write_octeon_64bit_hash_sha512(sctx->state[6], 6); octeon_sha512_store_hash()
44 write_octeon_64bit_hash_sha512(sctx->state[7], 7); octeon_sha512_store_hash()
47 static void octeon_sha512_read_hash(struct sha512_state *sctx) octeon_sha512_read_hash() argument
49 sctx->state[0] = read_octeon_64bit_hash_sha512(0); octeon_sha512_read_hash()
50 sctx->state[1] = read_octeon_64bit_hash_sha512(1); octeon_sha512_read_hash()
51 sctx->state[2] = read_octeon_64bit_hash_sha512(2); octeon_sha512_read_hash()
52 sctx->state[3] = read_octeon_64bit_hash_sha512(3); octeon_sha512_read_hash()
53 sctx->state[4] = read_octeon_64bit_hash_sha512(4); octeon_sha512_read_hash()
54 sctx->state[5] = read_octeon_64bit_hash_sha512(5); octeon_sha512_read_hash()
55 sctx->state[6] = read_octeon_64bit_hash_sha512(6); octeon_sha512_read_hash()
56 sctx->state[7] = read_octeon_64bit_hash_sha512(7); octeon_sha512_read_hash()
83 struct sha512_state *sctx = shash_desc_ctx(desc); octeon_sha512_init() local
85 sctx->state[0] = SHA512_H0; octeon_sha512_init()
86 sctx->state[1] = SHA512_H1; octeon_sha512_init()
87 sctx->state[2] = SHA512_H2; octeon_sha512_init()
88 sctx->state[3] = SHA512_H3; octeon_sha512_init()
89 sctx->state[4] = SHA512_H4; octeon_sha512_init()
90 sctx->state[5] = SHA512_H5; octeon_sha512_init()
91 sctx->state[6] = SHA512_H6; octeon_sha512_init()
92 sctx->state[7] = SHA512_H7; octeon_sha512_init()
93 sctx->count[0] = sctx->count[1] = 0; octeon_sha512_init()
100 struct sha512_state *sctx = shash_desc_ctx(desc); octeon_sha384_init() local
102 sctx->state[0] = SHA384_H0; octeon_sha384_init()
103 sctx->state[1] = SHA384_H1; octeon_sha384_init()
104 sctx->state[2] = SHA384_H2; octeon_sha384_init()
105 sctx->state[3] = SHA384_H3; octeon_sha384_init()
106 sctx->state[4] = SHA384_H4; octeon_sha384_init()
107 sctx->state[5] = SHA384_H5; octeon_sha384_init()
108 sctx->state[6] = SHA384_H6; octeon_sha384_init()
109 sctx->state[7] = SHA384_H7; octeon_sha384_init()
110 sctx->count[0] = sctx->count[1] = 0; octeon_sha384_init()
115 static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data, __octeon_sha512_update() argument
123 index = sctx->count[0] % SHA512_BLOCK_SIZE; __octeon_sha512_update()
126 if ((sctx->count[0] += len) < len) __octeon_sha512_update()
127 sctx->count[1]++; __octeon_sha512_update()
133 memcpy(&sctx->buf[index], data, part_len); __octeon_sha512_update()
134 octeon_sha512_transform(sctx->buf); __octeon_sha512_update()
146 memcpy(&sctx->buf[index], &data[i], len - i); __octeon_sha512_update()
152 struct sha512_state *sctx = shash_desc_ctx(desc); octeon_sha512_update() local
161 if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) octeon_sha512_update()
165 octeon_sha512_store_hash(sctx); octeon_sha512_update()
167 __octeon_sha512_update(sctx, data, len); octeon_sha512_update()
169 octeon_sha512_read_hash(sctx); octeon_sha512_update()
177 struct sha512_state *sctx = shash_desc_ctx(desc); octeon_sha512_final() local
188 bits[1] = cpu_to_be64(sctx->count[0] << 3); octeon_sha512_final()
189 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); octeon_sha512_final()
192 index = sctx->count[0] & 0x7f; octeon_sha512_final()
196 octeon_sha512_store_hash(sctx); octeon_sha512_final()
198 __octeon_sha512_update(sctx, padding, pad_len); octeon_sha512_final()
201 __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits)); octeon_sha512_final()
203 octeon_sha512_read_hash(sctx); octeon_sha512_final()
208 dst[i] = cpu_to_be64(sctx->state[i]); octeon_sha512_final()
211 memset(sctx, 0, sizeof(struct sha512_state)); octeon_sha512_final()
H A Docteon-sha1.c35 static void octeon_sha1_store_hash(struct sha1_state *sctx) octeon_sha1_store_hash() argument
37 u64 *hash = (u64 *)sctx->state; octeon_sha1_store_hash()
41 } hash_tail = { { sctx->state[4], } }; octeon_sha1_store_hash()
49 static void octeon_sha1_read_hash(struct sha1_state *sctx) octeon_sha1_read_hash() argument
51 u64 *hash = (u64 *)sctx->state; octeon_sha1_read_hash()
60 sctx->state[4] = hash_tail.word[0]; octeon_sha1_read_hash()
80 struct sha1_state *sctx = shash_desc_ctx(desc); octeon_sha1_init() local
82 sctx->state[0] = SHA1_H0; octeon_sha1_init()
83 sctx->state[1] = SHA1_H1; octeon_sha1_init()
84 sctx->state[2] = SHA1_H2; octeon_sha1_init()
85 sctx->state[3] = SHA1_H3; octeon_sha1_init()
86 sctx->state[4] = SHA1_H4; octeon_sha1_init()
87 sctx->count = 0; octeon_sha1_init()
92 static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data, __octeon_sha1_update() argument
99 partial = sctx->count % SHA1_BLOCK_SIZE; __octeon_sha1_update()
100 sctx->count += len; __octeon_sha1_update()
107 memcpy(sctx->buffer + partial, data, __octeon_sha1_update()
109 src = sctx->buffer; __octeon_sha1_update()
120 memcpy(sctx->buffer + partial, src, len - done); __octeon_sha1_update()
126 struct sha1_state *sctx = shash_desc_ctx(desc); octeon_sha1_update() local
135 if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) octeon_sha1_update()
139 octeon_sha1_store_hash(sctx); octeon_sha1_update()
141 __octeon_sha1_update(sctx, data, len); octeon_sha1_update()
143 octeon_sha1_read_hash(sctx); octeon_sha1_update()
151 struct sha1_state *sctx = shash_desc_ctx(desc); octeon_sha1_final() local
162 bits = cpu_to_be64(sctx->count << 3); octeon_sha1_final()
165 index = sctx->count & 0x3f; octeon_sha1_final()
169 octeon_sha1_store_hash(sctx); octeon_sha1_final()
171 __octeon_sha1_update(sctx, padding, pad_len); octeon_sha1_final()
174 __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); octeon_sha1_final()
176 octeon_sha1_read_hash(sctx); octeon_sha1_final()
181 dst[i] = cpu_to_be32(sctx->state[i]); octeon_sha1_final()
184 memset(sctx, 0, sizeof(*sctx)); octeon_sha1_final()
191 struct sha1_state *sctx = shash_desc_ctx(desc); octeon_sha1_export() local
193 memcpy(out, sctx, sizeof(*sctx)); octeon_sha1_export()
199 struct sha1_state *sctx = shash_desc_ctx(desc); octeon_sha1_import() local
201 memcpy(sctx, in, sizeof(*sctx)); octeon_sha1_import()
H A Docteon-sha256.c36 static void octeon_sha256_store_hash(struct sha256_state *sctx) octeon_sha256_store_hash() argument
38 u64 *hash = (u64 *)sctx->state; octeon_sha256_store_hash()
46 static void octeon_sha256_read_hash(struct sha256_state *sctx) octeon_sha256_read_hash() argument
48 u64 *hash = (u64 *)sctx->state; octeon_sha256_read_hash()
72 struct sha256_state *sctx = shash_desc_ctx(desc); octeon_sha224_init() local
74 sctx->state[0] = SHA224_H0; octeon_sha224_init()
75 sctx->state[1] = SHA224_H1; octeon_sha224_init()
76 sctx->state[2] = SHA224_H2; octeon_sha224_init()
77 sctx->state[3] = SHA224_H3; octeon_sha224_init()
78 sctx->state[4] = SHA224_H4; octeon_sha224_init()
79 sctx->state[5] = SHA224_H5; octeon_sha224_init()
80 sctx->state[6] = SHA224_H6; octeon_sha224_init()
81 sctx->state[7] = SHA224_H7; octeon_sha224_init()
82 sctx->count = 0; octeon_sha224_init()
89 struct sha256_state *sctx = shash_desc_ctx(desc); octeon_sha256_init() local
91 sctx->state[0] = SHA256_H0; octeon_sha256_init()
92 sctx->state[1] = SHA256_H1; octeon_sha256_init()
93 sctx->state[2] = SHA256_H2; octeon_sha256_init()
94 sctx->state[3] = SHA256_H3; octeon_sha256_init()
95 sctx->state[4] = SHA256_H4; octeon_sha256_init()
96 sctx->state[5] = SHA256_H5; octeon_sha256_init()
97 sctx->state[6] = SHA256_H6; octeon_sha256_init()
98 sctx->state[7] = SHA256_H7; octeon_sha256_init()
99 sctx->count = 0; octeon_sha256_init()
104 static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data, __octeon_sha256_update() argument
111 partial = sctx->count % SHA256_BLOCK_SIZE; __octeon_sha256_update()
112 sctx->count += len; __octeon_sha256_update()
119 memcpy(sctx->buf + partial, data, __octeon_sha256_update()
121 src = sctx->buf; __octeon_sha256_update()
132 memcpy(sctx->buf + partial, src, len - done); __octeon_sha256_update()
138 struct sha256_state *sctx = shash_desc_ctx(desc); octeon_sha256_update() local
147 if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) octeon_sha256_update()
151 octeon_sha256_store_hash(sctx); octeon_sha256_update()
153 __octeon_sha256_update(sctx, data, len); octeon_sha256_update()
155 octeon_sha256_read_hash(sctx); octeon_sha256_update()
163 struct sha256_state *sctx = shash_desc_ctx(desc); octeon_sha256_final() local
174 bits = cpu_to_be64(sctx->count << 3); octeon_sha256_final()
177 index = sctx->count & 0x3f; octeon_sha256_final()
181 octeon_sha256_store_hash(sctx); octeon_sha256_final()
183 __octeon_sha256_update(sctx, padding, pad_len); octeon_sha256_final()
186 __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); octeon_sha256_final()
188 octeon_sha256_read_hash(sctx); octeon_sha256_final()
193 dst[i] = cpu_to_be32(sctx->state[i]); octeon_sha256_final()
196 memset(sctx, 0, sizeof(*sctx)); octeon_sha256_final()
215 struct sha256_state *sctx = shash_desc_ctx(desc); octeon_sha256_export() local
217 memcpy(out, sctx, sizeof(*sctx)); octeon_sha256_export()
223 struct sha256_state *sctx = shash_desc_ctx(desc); octeon_sha256_import() local
225 memcpy(sctx, in, sizeof(*sctx)); octeon_sha256_import()
/linux-4.1.27/drivers/md/
H A Ddm-switch.c62 struct switch_ctx *sctx; alloc_switch_ctx() local
64 sctx = kzalloc(sizeof(struct switch_ctx) + nr_paths * sizeof(struct switch_path), alloc_switch_ctx()
66 if (!sctx) alloc_switch_ctx()
69 sctx->ti = ti; alloc_switch_ctx()
70 sctx->region_size = region_size; alloc_switch_ctx()
72 ti->private = sctx; alloc_switch_ctx()
74 return sctx; alloc_switch_ctx()
79 struct switch_ctx *sctx = ti->private; alloc_region_table() local
83 if (!(sctx->region_size & (sctx->region_size - 1))) alloc_region_table()
84 sctx->region_size_bits = __ffs(sctx->region_size); alloc_region_table()
86 sctx->region_size_bits = -1; alloc_region_table()
88 sctx->region_table_entry_bits = 1; alloc_region_table()
89 while (sctx->region_table_entry_bits < sizeof(region_table_slot_t) * 8 && alloc_region_table()
90 (region_table_slot_t)1 << sctx->region_table_entry_bits < nr_paths) alloc_region_table()
91 sctx->region_table_entry_bits++; alloc_region_table()
93 sctx->region_entries_per_slot = (sizeof(region_table_slot_t) * 8) / sctx->region_table_entry_bits; alloc_region_table()
94 if (!(sctx->region_entries_per_slot & (sctx->region_entries_per_slot - 1))) alloc_region_table()
95 sctx->region_entries_per_slot_bits = __ffs(sctx->region_entries_per_slot); alloc_region_table()
97 sctx->region_entries_per_slot_bits = -1; alloc_region_table()
99 if (sector_div(nr_regions, sctx->region_size)) alloc_region_table()
102 sctx->nr_regions = nr_regions; alloc_region_table()
103 if (sctx->nr_regions != nr_regions || sctx->nr_regions >= ULONG_MAX) { alloc_region_table()
109 if (sector_div(nr_slots, sctx->region_entries_per_slot)) alloc_region_table()
117 sctx->region_table = vmalloc(nr_slots * sizeof(region_table_slot_t)); alloc_region_table()
118 if (!sctx->region_table) { alloc_region_table()
126 static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr, switch_get_position() argument
129 if (sctx->region_entries_per_slot_bits >= 0) { switch_get_position()
130 *region_index = region_nr >> sctx->region_entries_per_slot_bits; switch_get_position()
131 *bit = region_nr & (sctx->region_entries_per_slot - 1); switch_get_position()
133 *region_index = region_nr / sctx->region_entries_per_slot; switch_get_position()
134 *bit = region_nr % sctx->region_entries_per_slot; switch_get_position()
137 *bit *= sctx->region_table_entry_bits; switch_get_position()
140 static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr) switch_region_table_read() argument
145 switch_get_position(sctx, region_nr, &region_index, &bit); switch_region_table_read()
147 return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & switch_region_table_read()
148 ((1 << sctx->region_table_entry_bits) - 1); switch_region_table_read()
154 static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) switch_get_path_nr() argument
160 if (sctx->region_size_bits >= 0) switch_get_path_nr()
161 p >>= sctx->region_size_bits; switch_get_path_nr()
163 sector_div(p, sctx->region_size); switch_get_path_nr()
165 path_nr = switch_region_table_read(sctx, p); switch_get_path_nr()
168 if (unlikely(path_nr >= sctx->nr_paths)) switch_get_path_nr()
174 static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr, switch_region_table_write() argument
181 switch_get_position(sctx, region_nr, &region_index, &bit); switch_region_table_write()
183 pte = sctx->region_table[region_index]; switch_region_table_write()
184 pte &= ~((((region_table_slot_t)1 << sctx->region_table_entry_bits) - 1) << bit); switch_region_table_write()
186 sctx->region_table[region_index] = pte; switch_region_table_write()
192 static void initialise_region_table(struct switch_ctx *sctx) initialise_region_table() argument
197 for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) { initialise_region_table()
198 switch_region_table_write(sctx, region_nr, path_nr); initialise_region_table()
199 if (++path_nr >= sctx->nr_paths) initialise_region_table()
206 struct switch_ctx *sctx = ti->private; parse_path() local
211 &sctx->path_list[sctx->nr_paths].dmdev); parse_path()
219 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); parse_path()
223 sctx->path_list[sctx->nr_paths].start = start; parse_path()
225 sctx->nr_paths++; parse_path()
235 struct switch_ctx *sctx = ti->private; switch_dtr() local
237 while (sctx->nr_paths--) switch_dtr()
238 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); switch_dtr()
240 vfree(sctx->region_table); switch_dtr()
241 kfree(sctx); switch_dtr()
260 struct switch_ctx *sctx; switch_ctr() local
286 sctx = alloc_switch_ctx(ti, nr_paths, region_size); switch_ctr()
287 if (!sctx) { switch_ctr()
306 initialise_region_table(sctx); switch_ctr()
321 struct switch_ctx *sctx = ti->private; switch_map() local
323 unsigned path_nr = switch_get_path_nr(sctx, offset); switch_map()
325 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; switch_map()
373 static int process_set_region_mappings(struct switch_ctx *sctx, process_set_region_mappings() argument
413 unlikely(region_index + num_write >= sctx->nr_regions)) { process_set_region_mappings()
415 region_index, num_write, sctx->nr_regions); process_set_region_mappings()
421 path_nr = switch_region_table_read(sctx, region_index - cycle_length); process_set_region_mappings()
422 switch_region_table_write(sctx, region_index, path_nr); process_set_region_mappings()
449 if (unlikely(region_index >= sctx->nr_regions)) { process_set_region_mappings()
450 DMWARN("invalid set_region_mappings region number: %lu >= %lu", region_index, sctx->nr_regions); process_set_region_mappings()
453 if (unlikely(path_nr >= sctx->nr_paths)) { process_set_region_mappings()
454 DMWARN("invalid set_region_mappings device: %lu >= %u", path_nr, sctx->nr_paths); process_set_region_mappings()
458 switch_region_table_write(sctx, region_index, path_nr); process_set_region_mappings()
473 struct switch_ctx *sctx = ti->private; switch_message() local
479 r = process_set_region_mappings(sctx, argc, argv); switch_message()
491 struct switch_ctx *sctx = ti->private; switch_status() local
501 DMEMIT("%u %u 0", sctx->nr_paths, sctx->region_size); switch_status()
502 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) switch_status()
503 DMEMIT(" %s %llu", sctx->path_list[path_nr].dmdev->name, switch_status()
504 (unsigned long long)sctx->path_list[path_nr].start); switch_status()
517 struct switch_ctx *sctx = ti->private; switch_ioctl() local
523 path_nr = switch_get_path_nr(sctx, 0); switch_ioctl()
525 bdev = sctx->path_list[path_nr].dmdev->bdev; switch_ioctl()
526 mode = sctx->path_list[path_nr].dmdev->mode; switch_ioctl()
531 if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) switch_ioctl()
540 struct switch_ctx *sctx = ti->private; switch_iterate_devices() local
544 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) { switch_iterate_devices()
545 r = fn(ti, sctx->path_list[path_nr].dmdev, switch_iterate_devices()
546 sctx->path_list[path_nr].start, ti->len, data); switch_iterate_devices()
/linux-4.1.27/arch/arm/crypto/
H A Dsha512_neon_glue.c81 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_init() local
83 sctx->state[0] = SHA512_H0; sha512_neon_init()
84 sctx->state[1] = SHA512_H1; sha512_neon_init()
85 sctx->state[2] = SHA512_H2; sha512_neon_init()
86 sctx->state[3] = SHA512_H3; sha512_neon_init()
87 sctx->state[4] = SHA512_H4; sha512_neon_init()
88 sctx->state[5] = SHA512_H5; sha512_neon_init()
89 sctx->state[6] = SHA512_H6; sha512_neon_init()
90 sctx->state[7] = SHA512_H7; sha512_neon_init()
91 sctx->count[0] = sctx->count[1] = 0; sha512_neon_init()
99 struct sha512_state *sctx = shash_desc_ctx(desc); __sha512_neon_update() local
102 sctx->count[0] += len; __sha512_neon_update()
103 if (sctx->count[0] < len) __sha512_neon_update()
104 sctx->count[1]++; __sha512_neon_update()
108 memcpy(sctx->buf + partial, data, done); __sha512_neon_update()
109 sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1); __sha512_neon_update()
115 sha512_transform_neon(sctx->state, data + done, sha512_k, __sha512_neon_update()
121 memcpy(sctx->buf, data + done, len - done); __sha512_neon_update()
129 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_update() local
130 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; sha512_neon_update()
135 sctx->count[0] += len; sha512_neon_update()
136 if (sctx->count[0] < len) sha512_neon_update()
137 sctx->count[1]++; sha512_neon_update()
138 memcpy(sctx->buf + partial, data, len); sha512_neon_update()
158 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_final() local
165 bits[1] = cpu_to_be64(sctx->count[0] << 3); sha512_neon_final()
166 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); sha512_neon_final()
169 index = sctx->count[0] & 0x7f; sha512_neon_final()
179 sctx->count[0] += padlen; sha512_neon_final()
180 if (sctx->count[0] < padlen) sha512_neon_final()
181 sctx->count[1]++; sha512_neon_final()
182 memcpy(sctx->buf + index, padding, padlen); sha512_neon_final()
193 dst[i] = cpu_to_be64(sctx->state[i]); sha512_neon_final()
196 memset(sctx, 0, sizeof(*sctx)); sha512_neon_final()
203 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_export() local
205 memcpy(out, sctx, sizeof(*sctx)); sha512_neon_export()
212 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_import() local
214 memcpy(sctx, in, sizeof(*sctx)); sha512_neon_import()
221 struct sha512_state *sctx = shash_desc_ctx(desc); sha384_neon_init() local
223 sctx->state[0] = SHA384_H0; sha384_neon_init()
224 sctx->state[1] = SHA384_H1; sha384_neon_init()
225 sctx->state[2] = SHA384_H2; sha384_neon_init()
226 sctx->state[3] = SHA384_H3; sha384_neon_init()
227 sctx->state[4] = SHA384_H4; sha384_neon_init()
228 sctx->state[5] = SHA384_H5; sha384_neon_init()
229 sctx->state[6] = SHA384_H6; sha384_neon_init()
230 sctx->state[7] = SHA384_H7; sha384_neon_init()
232 sctx->count[0] = sctx->count[1] = 0; sha384_neon_init()
H A Dsha1-ce-glue.c33 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_ce_update() local
36 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) sha1_ce_update()
H A Dsha1_neon_glue.c40 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_neon_update() local
43 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) sha1_neon_update()
H A Dsha2-ce-glue.c34 struct sha256_state *sctx = shash_desc_ctx(desc); sha2_ce_update() local
37 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) sha2_ce_update()
H A Dsha256_neon_glue.c35 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_update() local
38 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) sha256_update()
/linux-4.1.27/fs/btrfs/
H A Dsend.c275 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
278 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
280 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
282 static int need_send_hole(struct send_ctx *sctx) need_send_hole() argument
284 return (sctx->parent_root && !sctx->cur_inode_new && need_send_hole()
285 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && need_send_hole()
286 S_ISREG(sctx->cur_inode_mode)); need_send_hole()
541 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) tlv_put() argument
545 int left = sctx->send_max_size - sctx->send_size; tlv_put()
550 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); tlv_put()
554 sctx->send_size += total_len; tlv_put()
560 static int tlv_put_u##bits(struct send_ctx *sctx, \
564 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
569 static int tlv_put_string(struct send_ctx *sctx, u16 attr, tlv_put_string() argument
574 return tlv_put(sctx, attr, str, len); tlv_put_string()
577 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, tlv_put_uuid() argument
580 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); tlv_put_uuid()
583 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, tlv_put_btrfs_timespec() argument
589 return tlv_put(sctx, attr, &bts, sizeof(bts)); tlv_put_btrfs_timespec()
593 #define TLV_PUT(sctx, attrtype, attrlen, data) \
595 ret = tlv_put(sctx, attrtype, attrlen, data); \
600 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
602 ret = tlv_put_u##bits(sctx, attrtype, value); \
607 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
608 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
609 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
610 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
611 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
613 ret = tlv_put_string(sctx, attrtype, str, len); \
617 #define TLV_PUT_PATH(sctx, attrtype, p) \
619 ret = tlv_put_string(sctx, attrtype, p->start, \
624 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
626 ret = tlv_put_uuid(sctx, attrtype, uuid); \
630 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
632 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
637 static int send_header(struct send_ctx *sctx) send_header() argument
644 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), send_header()
645 &sctx->send_off); send_header()
651 static int begin_cmd(struct send_ctx *sctx, int cmd) begin_cmd() argument
655 if (WARN_ON(!sctx->send_buf)) begin_cmd()
658 BUG_ON(sctx->send_size); begin_cmd()
660 sctx->send_size += sizeof(*hdr); begin_cmd()
661 hdr = (struct btrfs_cmd_header *)sctx->send_buf; begin_cmd()
667 static int send_cmd(struct send_ctx *sctx) send_cmd() argument
673 hdr = (struct btrfs_cmd_header *)sctx->send_buf; send_cmd()
674 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); send_cmd()
677 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); send_cmd()
680 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, send_cmd()
681 &sctx->send_off); send_cmd()
683 sctx->total_send_size += sctx->send_size; send_cmd()
684 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; send_cmd()
685 sctx->send_size = 0; send_cmd()
693 static int send_rename(struct send_ctx *sctx, send_rename() argument
700 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); send_rename()
704 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); send_rename()
705 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); send_rename()
707 ret = send_cmd(sctx); send_rename()
717 static int send_link(struct send_ctx *sctx, send_link() argument
724 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); send_link()
728 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_link()
729 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); send_link()
731 ret = send_cmd(sctx); send_link()
741 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) send_unlink() argument
747 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); send_unlink()
751 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_unlink()
753 ret = send_cmd(sctx); send_unlink()
763 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) send_rmdir() argument
769 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); send_rmdir()
773 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_rmdir()
775 ret = send_cmd(sctx); send_rmdir()
1145 struct send_ctx *sctx; member in struct:backref_ctx
1191 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1201 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, __iterate_backrefs()
1202 bctx->sctx->clone_roots_cnt, __iterate_backrefs()
1208 if (found->root == bctx->sctx->send_root && __iterate_backrefs()
1231 if (found->root == bctx->sctx->send_root) { __iterate_backrefs()
1273 static int find_extent_clone(struct send_ctx *sctx, find_extent_clone() argument
1337 down_read(&sctx->send_root->fs_info->commit_root_sem); find_extent_clone()
1338 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, find_extent_clone()
1340 up_read(&sctx->send_root->fs_info->commit_root_sem); find_extent_clone()
1353 for (i = 0; i < sctx->clone_roots_cnt; i++) { find_extent_clone()
1354 cur_clone_root = sctx->clone_roots + i; find_extent_clone()
1360 backref_ctx->sctx = sctx; find_extent_clone()
1382 ret = iterate_extent_inodes(sctx->send_root->fs_info, find_extent_clone()
1392 btrfs_err(sctx->send_root->fs_info, "did not find backref in " find_extent_clone()
1408 for (i = 0; i < sctx->clone_roots_cnt; i++) { find_extent_clone()
1409 if (sctx->clone_roots[i].found_refs) { find_extent_clone()
1411 cur_clone_root = sctx->clone_roots + i; find_extent_clone()
1412 else if (sctx->clone_roots[i].root == sctx->send_root) find_extent_clone()
1414 cur_clone_root = sctx->clone_roots + i; find_extent_clone()
1502 static int gen_unique_name(struct send_ctx *sctx, gen_unique_name() argument
1522 di = btrfs_lookup_dir_item(NULL, sctx->send_root, gen_unique_name()
1536 if (!sctx->parent_root) { gen_unique_name()
1542 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, gen_unique_name()
1574 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) get_cur_inode_state() argument
1582 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, get_cur_inode_state()
1588 if (!sctx->parent_root) { get_cur_inode_state()
1591 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, get_cur_inode_state()
1602 if (ino < sctx->send_progress) get_cur_inode_state()
1607 if (ino < sctx->send_progress) get_cur_inode_state()
1616 if (ino < sctx->send_progress) get_cur_inode_state()
1625 if (ino < sctx->send_progress) get_cur_inode_state()
1640 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) is_inode_existent() argument
1644 ret = get_cur_inode_state(sctx, ino, gen); is_inode_existent()
1808 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, will_overwrite_ref() argument
1817 if (!sctx->parent_root) will_overwrite_ref()
1820 ret = is_inode_existent(sctx, dir, dir_gen); will_overwrite_ref()
1829 if (sctx->parent_root) { will_overwrite_ref()
1830 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, will_overwrite_ref()
1842 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, will_overwrite_ref()
1856 if (other_inode > sctx->send_progress) { will_overwrite_ref()
1857 ret = get_inode_info(sctx->parent_root, other_inode, NULL, will_overwrite_ref()
1879 static int did_overwrite_ref(struct send_ctx *sctx, did_overwrite_ref() argument
1889 if (!sctx->parent_root) did_overwrite_ref()
1892 ret = is_inode_existent(sctx, dir, dir_gen); did_overwrite_ref()
1897 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, did_overwrite_ref()
1907 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, did_overwrite_ref()
1918 if (ow_inode < sctx->send_progress) did_overwrite_ref()
1932 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) did_overwrite_first_ref() argument
1939 if (!sctx->parent_root) did_overwrite_first_ref()
1946 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); did_overwrite_first_ref()
1950 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, did_overwrite_first_ref()
1964 static int name_cache_insert(struct send_ctx *sctx, name_cache_insert() argument
1970 nce_head = radix_tree_lookup(&sctx->name_cache, name_cache_insert()
1980 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); name_cache_insert()
1988 list_add_tail(&nce->list, &sctx->name_cache_list); name_cache_insert()
1989 sctx->name_cache_size++; name_cache_insert()
1994 static void name_cache_delete(struct send_ctx *sctx, name_cache_delete() argument
1999 nce_head = radix_tree_lookup(&sctx->name_cache, name_cache_delete()
2002 btrfs_err(sctx->send_root->fs_info, name_cache_delete()
2004 nce->ino, sctx->name_cache_size); name_cache_delete()
2009 sctx->name_cache_size--; name_cache_delete()
2015 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); name_cache_delete()
2020 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, name_cache_search() argument
2026 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); name_cache_search()
2041 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) name_cache_used() argument
2044 list_add_tail(&nce->list, &sctx->name_cache_list); name_cache_used()
2050 static void name_cache_clean_unused(struct send_ctx *sctx) name_cache_clean_unused() argument
2054 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) name_cache_clean_unused()
2057 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { name_cache_clean_unused()
2058 nce = list_entry(sctx->name_cache_list.next, name_cache_clean_unused()
2060 name_cache_delete(sctx, nce); name_cache_clean_unused()
2065 static void name_cache_free(struct send_ctx *sctx) name_cache_free() argument
2069 while (!list_empty(&sctx->name_cache_list)) { name_cache_free()
2070 nce = list_entry(sctx->name_cache_list.next, name_cache_free()
2072 name_cache_delete(sctx, nce); name_cache_free()
2085 static int __get_cur_name_and_parent(struct send_ctx *sctx, __get_cur_name_and_parent() argument
2100 nce = name_cache_search(sctx, ino, gen); __get_cur_name_and_parent()
2102 if (ino < sctx->send_progress && nce->need_later_update) { __get_cur_name_and_parent()
2103 name_cache_delete(sctx, nce); __get_cur_name_and_parent()
2107 name_cache_used(sctx, nce); __get_cur_name_and_parent()
2123 ret = is_inode_existent(sctx, ino, gen); __get_cur_name_and_parent()
2128 ret = gen_unique_name(sctx, ino, gen, dest); __get_cur_name_and_parent()
2139 if (ino < sctx->send_progress) __get_cur_name_and_parent()
2140 ret = get_first_ref(sctx->send_root, ino, __get_cur_name_and_parent()
2143 ret = get_first_ref(sctx->parent_root, ino, __get_cur_name_and_parent()
2152 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, __get_cur_name_and_parent()
2158 ret = gen_unique_name(sctx, ino, gen, dest); __get_cur_name_and_parent()
2182 if (ino < sctx->send_progress) __get_cur_name_and_parent()
2187 nce_ret = name_cache_insert(sctx, nce); __get_cur_name_and_parent()
2190 name_cache_clean_unused(sctx); __get_cur_name_and_parent()
2218 * sctx->send_progress tells this function at which point in time receiving
2221 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, get_cur_path() argument
2242 if (is_waiting_for_rm(sctx, ino)) { get_cur_path()
2243 ret = gen_unique_name(sctx, ino, gen, name); get_cur_path()
2250 if (is_waiting_for_move(sctx, ino)) { get_cur_path()
2251 ret = get_first_ref(sctx->parent_root, ino, get_cur_path()
2254 ret = __get_cur_name_and_parent(sctx, ino, gen, get_cur_path()
2282 static int send_subvol_begin(struct send_ctx *sctx) send_subvol_begin() argument
2285 struct btrfs_root *send_root = sctx->send_root; send_subvol_begin()
2286 struct btrfs_root *parent_root = sctx->parent_root; send_subvol_begin()
2330 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); send_subvol_begin()
2334 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); send_subvol_begin()
2339 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); send_subvol_begin()
2340 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, send_subvol_begin()
2341 sctx->send_root->root_item.uuid); send_subvol_begin()
2342 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, send_subvol_begin()
2343 le64_to_cpu(sctx->send_root->root_item.ctransid)); send_subvol_begin()
2345 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, send_subvol_begin()
2346 sctx->parent_root->root_item.uuid); send_subvol_begin()
2347 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, send_subvol_begin()
2348 le64_to_cpu(sctx->parent_root->root_item.ctransid)); send_subvol_begin()
2351 ret = send_cmd(sctx); send_subvol_begin()
2360 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) send_truncate() argument
2371 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); send_truncate()
2375 ret = get_cur_path(sctx, ino, gen, p); send_truncate()
2378 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_truncate()
2379 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); send_truncate()
2381 ret = send_cmd(sctx); send_truncate()
2389 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) send_chmod() argument
2400 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); send_chmod()
2404 ret = get_cur_path(sctx, ino, gen, p); send_chmod()
2407 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_chmod()
2408 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); send_chmod()
2410 ret = send_cmd(sctx); send_chmod()
2418 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) send_chown() argument
2429 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); send_chown()
2433 ret = get_cur_path(sctx, ino, gen, p); send_chown()
2436 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_chown()
2437 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); send_chown()
2438 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); send_chown()
2440 ret = send_cmd(sctx); send_chown()
2448 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) send_utimes() argument
2473 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); send_utimes()
2481 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); send_utimes()
2485 ret = get_cur_path(sctx, ino, gen, p); send_utimes()
2488 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_utimes()
2489 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); send_utimes()
2490 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); send_utimes()
2491 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); send_utimes()
2494 ret = send_cmd(sctx); send_utimes()
2508 static int send_create_inode(struct send_ctx *sctx, u64 ino) send_create_inode() argument
2523 if (ino != sctx->cur_ino) { send_create_inode()
2524 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, send_create_inode()
2529 gen = sctx->cur_inode_gen; send_create_inode()
2530 mode = sctx->cur_inode_mode; send_create_inode()
2531 rdev = sctx->cur_inode_rdev; send_create_inode()
2553 ret = begin_cmd(sctx, cmd); send_create_inode()
2557 ret = gen_unique_name(sctx, ino, gen, p); send_create_inode()
2561 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_create_inode()
2562 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); send_create_inode()
2566 ret = read_symlink(sctx->send_root, ino, p); send_create_inode()
2569 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); send_create_inode()
2572 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); send_create_inode()
2573 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); send_create_inode()
2576 ret = send_cmd(sctx); send_create_inode()
2592 static int did_create_dir(struct send_ctx *sctx, u64 dir) did_create_dir() argument
2612 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); did_create_dir()
2620 ret = btrfs_next_leaf(sctx->send_root, path); did_create_dir()
2641 di_key.objectid < sctx->send_progress) { did_create_dir()
2660 static int send_create_inode_if_needed(struct send_ctx *sctx) send_create_inode_if_needed() argument
2664 if (S_ISDIR(sctx->cur_inode_mode)) { send_create_inode_if_needed()
2665 ret = did_create_dir(sctx, sctx->cur_ino); send_create_inode_if_needed()
2674 ret = send_create_inode(sctx, sctx->cur_ino); send_create_inode_if_needed()
2752 static void free_recorded_refs(struct send_ctx *sctx) free_recorded_refs() argument
2754 __free_recorded_refs(&sctx->new_refs); free_recorded_refs()
2755 __free_recorded_refs(&sctx->deleted_refs); free_recorded_refs()
2763 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, orphanize_inode() argument
2773 ret = gen_unique_name(sctx, ino, gen, orphan); orphanize_inode()
2777 ret = send_rename(sctx, path, orphan); orphanize_inode()
2785 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) add_orphan_dir_info() argument
2787 struct rb_node **p = &sctx->orphan_dirs.rb_node; add_orphan_dir_info()
2811 rb_insert_color(&odi->node, &sctx->orphan_dirs); add_orphan_dir_info()
2816 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) get_orphan_dir_info() argument
2818 struct rb_node *n = sctx->orphan_dirs.rb_node; get_orphan_dir_info()
2833 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) is_waiting_for_rm() argument
2835 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); is_waiting_for_rm()
2840 static void free_orphan_dir_info(struct send_ctx *sctx, free_orphan_dir_info() argument
2845 rb_erase(&odi->node, &sctx->orphan_dirs); free_orphan_dir_info()
2854 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, can_rmdir() argument
2858 struct btrfs_root *root = sctx->parent_root; can_rmdir()
2903 dm = get_waiting_dir_move(sctx, loc.objectid); can_rmdir()
2907 odi = add_orphan_dir_info(sctx, dir); can_rmdir()
2933 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) is_waiting_for_move() argument
2935 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); is_waiting_for_move()
2940 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) add_waiting_dir_move() argument
2942 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; add_waiting_dir_move()
2966 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); add_waiting_dir_move()
2971 get_waiting_dir_move(struct send_ctx *sctx, u64 ino) get_waiting_dir_move() argument
2973 struct rb_node *n = sctx->waiting_dir_moves.rb_node; get_waiting_dir_move()
2988 static void free_waiting_dir_move(struct send_ctx *sctx, free_waiting_dir_move() argument
2993 rb_erase(&dm->node, &sctx->waiting_dir_moves); free_waiting_dir_move()
2997 static int add_pending_dir_move(struct send_ctx *sctx, add_pending_dir_move() argument
3005 struct rb_node **p = &sctx->pending_dir_moves.rb_node; add_pending_dir_move()
3047 ret = add_waiting_dir_move(sctx, pm->ino);
3055 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3066 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, get_pending_dir_moves() argument
3069 struct rb_node *n = sctx->pending_dir_moves.rb_node; get_pending_dir_moves()
3084 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) apply_dir_move() argument
3089 u64 orig_progress = sctx->send_progress; apply_dir_move()
3103 dm = get_waiting_dir_move(sctx, pm->ino); apply_dir_move()
3106 free_waiting_dir_move(sctx, dm); apply_dir_move()
3109 ret = gen_unique_name(sctx, pm->ino, apply_dir_move()
3112 ret = get_first_ref(sctx->parent_root, pm->ino, apply_dir_move()
3116 ret = get_cur_path(sctx, parent_ino, parent_gen, apply_dir_move()
3125 sctx->send_progress = sctx->cur_ino + 1; apply_dir_move()
3129 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); apply_dir_move()
3133 ret = send_rename(sctx, from_path, to_path); apply_dir_move()
3140 odi = get_orphan_dir_info(sctx, rmdir_ino); apply_dir_move()
3145 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); apply_dir_move()
3156 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name); apply_dir_move()
3159 ret = send_rmdir(sctx, name); apply_dir_move()
3162 free_orphan_dir_info(sctx, odi); apply_dir_move()
3166 ret = send_utimes(sctx, pm->ino, pm->gen); apply_dir_move()
3177 ret = send_utimes(sctx, cur->dir, cur->dir_gen); apply_dir_move()
3186 sctx->send_progress = orig_progress; apply_dir_move()
3191 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) free_pending_move() argument
3196 rb_erase(&m->node, &sctx->pending_dir_moves); free_pending_move()
3214 static int apply_children_dir_moves(struct send_ctx *sctx) apply_children_dir_moves() argument
3218 u64 parent_ino = sctx->cur_ino; apply_children_dir_moves()
3221 pm = get_pending_dir_moves(sctx, parent_ino); apply_children_dir_moves()
3231 ret = apply_dir_move(sctx, pm); apply_children_dir_moves()
3232 free_pending_move(sctx, pm); apply_children_dir_moves()
3235 pm = get_pending_dir_moves(sctx, parent_ino); apply_children_dir_moves()
3244 free_pending_move(sctx, pm); apply_children_dir_moves()
3251 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3282 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3285 static int wait_for_dest_dir_move(struct send_ctx *sctx, wait_for_dest_dir_move() argument
3297 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) wait_for_dest_dir_move()
3308 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); wait_for_dest_dir_move()
3316 di = btrfs_match_dir_item_name(sctx->parent_root, path, wait_for_dest_dir_move()
3324 * parent directory with the same name that sctx->cur_ino is being wait_for_dest_dir_move()
3327 * if it is, we need to delay the rename of sctx->cur_ino as well, so wait_for_dest_dir_move()
3336 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, wait_for_dest_dir_move()
3340 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, wait_for_dest_dir_move()
3348 /* Different inode, no need to delay the rename of sctx->cur_ino */ wait_for_dest_dir_move()
3354 if (is_waiting_for_move(sctx, di_key.objectid)) { wait_for_dest_dir_move()
3355 ret = add_pending_dir_move(sctx, wait_for_dest_dir_move()
3356 sctx->cur_ino, wait_for_dest_dir_move()
3357 sctx->cur_inode_gen, wait_for_dest_dir_move()
3359 &sctx->new_refs, wait_for_dest_dir_move()
3360 &sctx->deleted_refs, wait_for_dest_dir_move()
3370 static int wait_for_parent_move(struct send_ctx *sctx, wait_for_parent_move() argument
3394 if (is_waiting_for_move(sctx, ino)) { wait_for_parent_move()
3402 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, wait_for_parent_move()
3406 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, wait_for_parent_move()
3417 if (ino > sctx->cur_ino && wait_for_parent_move()
3431 ret = add_pending_dir_move(sctx, wait_for_parent_move()
3432 sctx->cur_ino, wait_for_parent_move()
3433 sctx->cur_inode_gen, wait_for_parent_move()
3435 &sctx->new_refs, wait_for_parent_move()
3436 &sctx->deleted_refs, wait_for_parent_move()
3448 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) process_recorded_refs() argument
3462 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); process_recorded_refs()
3468 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); process_recorded_refs()
3488 if (!sctx->cur_inode_new) { process_recorded_refs()
3489 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, process_recorded_refs()
3490 sctx->cur_inode_gen); process_recorded_refs()
3496 if (sctx->cur_inode_new || did_overwrite) { process_recorded_refs()
3497 ret = gen_unique_name(sctx, sctx->cur_ino, process_recorded_refs()
3498 sctx->cur_inode_gen, valid_path); process_recorded_refs()
3503 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, process_recorded_refs()
3509 list_for_each_entry(cur, &sctx->new_refs, list) { process_recorded_refs()
3517 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); process_recorded_refs()
3526 list_for_each_entry(cur2, &sctx->new_refs, list) { process_recorded_refs()
3540 ret = did_create_dir(sctx, cur->dir); process_recorded_refs()
3544 ret = send_create_inode(sctx, cur->dir); process_recorded_refs()
3556 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, process_recorded_refs()
3562 ret = is_first_ref(sctx->parent_root, process_recorded_refs()
3570 ret = orphanize_inode(sctx, ow_inode, ow_gen, process_recorded_refs()
3580 * sctx->send_progress. We need to prevent process_recorded_refs()
3584 nce = name_cache_search(sctx, ow_inode, ow_gen); process_recorded_refs()
3586 name_cache_delete(sctx, nce); process_recorded_refs()
3590 ret = send_unlink(sctx, cur->full_path); process_recorded_refs()
3596 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { process_recorded_refs()
3597 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); process_recorded_refs()
3612 ret = send_rename(sctx, valid_path, cur->full_path); process_recorded_refs()
3620 if (S_ISDIR(sctx->cur_inode_mode)) { process_recorded_refs()
3626 ret = wait_for_parent_move(sctx, cur); process_recorded_refs()
3632 ret = send_rename(sctx, valid_path, process_recorded_refs()
3641 ret = send_link(sctx, cur->full_path, process_recorded_refs()
3652 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { process_recorded_refs()
3659 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, process_recorded_refs()
3660 sctx->cur_ino); process_recorded_refs()
3664 ret = send_rmdir(sctx, valid_path); process_recorded_refs()
3668 ret = orphanize_inode(sctx, sctx->cur_ino, process_recorded_refs()
3669 sctx->cur_inode_gen, valid_path); process_recorded_refs()
3675 list_for_each_entry(cur, &sctx->deleted_refs, list) { process_recorded_refs()
3680 } else if (S_ISDIR(sctx->cur_inode_mode) && process_recorded_refs()
3681 !list_empty(&sctx->deleted_refs)) { process_recorded_refs()
3685 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, process_recorded_refs()
3690 } else if (!S_ISDIR(sctx->cur_inode_mode)) { process_recorded_refs()
3696 list_for_each_entry(cur, &sctx->deleted_refs, list) { process_recorded_refs()
3697 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, process_recorded_refs()
3698 sctx->cur_ino, sctx->cur_inode_gen, process_recorded_refs()
3703 ret = send_unlink(sctx, cur->full_path); process_recorded_refs()
3720 ret = send_unlink(sctx, valid_path); process_recorded_refs()
3738 if (cur->dir > sctx->cur_ino) process_recorded_refs()
3741 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); process_recorded_refs()
3748 ret = send_utimes(sctx, cur->dir, cur->dir_gen); process_recorded_refs()
3753 ret = can_rmdir(sctx, cur->dir, cur->dir_gen, process_recorded_refs()
3754 sctx->cur_ino); process_recorded_refs()
3758 ret = get_cur_path(sctx, cur->dir, process_recorded_refs()
3762 ret = send_rmdir(sctx, valid_path); process_recorded_refs()
3774 free_recorded_refs(sctx); process_recorded_refs()
3783 struct send_ctx *sctx = ctx; record_ref() local
3796 ret = get_cur_path(sctx, dir, gen, p); record_ref()
3815 struct send_ctx *sctx = ctx; __record_new_ref() local
3816 return record_ref(sctx->send_root, num, dir, index, name, __record_new_ref()
3817 ctx, &sctx->new_refs); __record_new_ref()
3825 struct send_ctx *sctx = ctx; __record_deleted_ref() local
3826 return record_ref(sctx->parent_root, num, dir, index, name, __record_deleted_ref()
3827 ctx, &sctx->deleted_refs); __record_deleted_ref()
3830 static int record_new_ref(struct send_ctx *sctx) record_new_ref() argument
3834 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, record_new_ref()
3835 sctx->cmp_key, 0, __record_new_ref, sctx); record_new_ref()
3844 static int record_deleted_ref(struct send_ctx *sctx) record_deleted_ref() argument
3848 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, record_deleted_ref()
3849 sctx->cmp_key, 0, __record_deleted_ref, sctx); record_deleted_ref()
3922 struct send_ctx *sctx = ctx; __record_changed_new_ref() local
3924 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL, __record_changed_new_ref()
3929 ret = find_iref(sctx->parent_root, sctx->right_path, __record_changed_new_ref()
3930 sctx->cmp_key, dir, dir_gen, name); __record_changed_new_ref()
3932 ret = __record_new_ref(num, dir, index, name, sctx); __record_changed_new_ref()
3945 struct send_ctx *sctx = ctx; __record_changed_deleted_ref() local
3947 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL, __record_changed_deleted_ref()
3952 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key, __record_changed_deleted_ref()
3955 ret = __record_deleted_ref(num, dir, index, name, sctx); __record_changed_deleted_ref()
3962 static int record_changed_ref(struct send_ctx *sctx) record_changed_ref() argument
3966 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, record_changed_ref()
3967 sctx->cmp_key, 0, __record_changed_new_ref, sctx); record_changed_ref()
3970 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, record_changed_ref()
3971 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); record_changed_ref()
3984 static int process_all_refs(struct send_ctx *sctx, process_all_refs() argument
4002 root = sctx->send_root; process_all_refs()
4005 root = sctx->parent_root; process_all_refs()
4008 btrfs_err(sctx->send_root->fs_info, process_all_refs()
4014 key.objectid = sctx->cmp_key->objectid; process_all_refs()
4040 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); process_all_refs()
4048 ret = process_recorded_refs(sctx, &pending_move); process_all_refs()
4057 static int send_set_xattr(struct send_ctx *sctx, send_set_xattr() argument
4064 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); send_set_xattr()
4068 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_set_xattr()
4069 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); send_set_xattr()
4070 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); send_set_xattr()
4072 ret = send_cmd(sctx); send_set_xattr()
4079 static int send_remove_xattr(struct send_ctx *sctx, send_remove_xattr() argument
4085 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); send_remove_xattr()
4089 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_remove_xattr()
4090 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); send_remove_xattr()
4092 ret = send_cmd(sctx); send_remove_xattr()
4105 struct send_ctx *sctx = ctx; __process_new_xattr() local
4129 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); __process_new_xattr()
4133 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); __process_new_xattr()
4146 struct send_ctx *sctx = ctx; __process_deleted_xattr() local
4153 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); __process_deleted_xattr()
4157 ret = send_remove_xattr(sctx, p, name, name_len); __process_deleted_xattr()
4164 static int process_new_xattr(struct send_ctx *sctx) process_new_xattr() argument
4168 ret = iterate_dir_item(sctx->send_root, sctx->left_path, process_new_xattr()
4169 sctx->cmp_key, __process_new_xattr, sctx); process_new_xattr()
4174 static int process_deleted_xattr(struct send_ctx *sctx) process_deleted_xattr() argument
4178 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, process_deleted_xattr()
4179 sctx->cmp_key, __process_deleted_xattr, sctx); process_deleted_xattr()
4248 struct send_ctx *sctx = ctx; __process_changed_new_xattr() local
4252 ret = find_xattr(sctx->parent_root, sctx->right_path, __process_changed_new_xattr()
4253 sctx->cmp_key, name, name_len, &found_data, __process_changed_new_xattr()
4278 struct send_ctx *sctx = ctx; __process_changed_deleted_xattr() local
4280 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, __process_changed_deleted_xattr()
4291 static int process_changed_xattr(struct send_ctx *sctx) process_changed_xattr() argument
4295 ret = iterate_dir_item(sctx->send_root, sctx->left_path, process_changed_xattr()
4296 sctx->cmp_key, __process_changed_new_xattr, sctx); process_changed_xattr()
4299 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, process_changed_xattr()
4300 sctx->cmp_key, __process_changed_deleted_xattr, sctx); process_changed_xattr()
4306 static int process_all_new_xattrs(struct send_ctx *sctx) process_all_new_xattrs() argument
4320 root = sctx->send_root; process_all_new_xattrs()
4322 key.objectid = sctx->cmp_key->objectid; process_all_new_xattrs()
4351 __process_new_xattr, sctx); process_all_new_xattrs()
4363 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) fill_read_buf() argument
4365 struct btrfs_root *root = sctx->send_root; fill_read_buf()
4376 key.objectid = sctx->cur_ino; fill_read_buf()
4396 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); fill_read_buf()
4397 file_ra_state_init(&sctx->ra, inode->i_mapping); fill_read_buf()
4398 btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index, fill_read_buf()
4422 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); fill_read_buf()
4440 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) send_write() argument
4452 num_read = fill_read_buf(sctx, offset, len); send_write()
4459 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); send_write()
4463 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); send_write()
4467 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_write()
4468 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); send_write()
4469 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); send_write()
4471 ret = send_cmd(sctx); send_write()
4484 static int send_clone(struct send_ctx *sctx, send_clone() argument
4501 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); send_clone()
4505 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); send_clone()
4509 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); send_clone()
4510 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); send_clone()
4511 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_clone()
4513 if (clone_root->root == sctx->send_root) { send_clone()
4514 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, send_clone()
4518 ret = get_cur_path(sctx, clone_root->ino, gen, p); send_clone()
4525 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, send_clone()
4527 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, send_clone()
4529 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); send_clone()
4530 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, send_clone()
4533 ret = send_cmd(sctx); send_clone()
4544 static int send_update_extent(struct send_ctx *sctx, send_update_extent() argument
4554 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); send_update_extent()
4558 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); send_update_extent()
4562 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_update_extent()
4563 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); send_update_extent()
4564 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); send_update_extent()
4566 ret = send_cmd(sctx); send_update_extent()
4574 static int send_hole(struct send_ctx *sctx, u64 end) send_hole() argument
4577 u64 offset = sctx->cur_inode_last_extent; send_hole()
4584 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); send_hole()
4587 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); send_hole()
4591 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); send_hole()
4594 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); send_hole()
4595 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); send_hole()
4596 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); send_hole()
4597 ret = send_cmd(sctx); send_hole()
4607 static int send_write_or_clone(struct send_ctx *sctx, send_write_or_clone() argument
4619 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; send_write_or_clone()
4637 if (offset + len > sctx->cur_inode_size) send_write_or_clone()
4638 len = sctx->cur_inode_size - offset; send_write_or_clone()
4645 ret = send_clone(sctx, offset, len, clone_root); send_write_or_clone()
4646 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) { send_write_or_clone()
4647 ret = send_update_extent(sctx, offset, len); send_write_or_clone()
4653 ret = send_write(sctx, pos + offset, l); send_write_or_clone()
4666 static int is_extent_unchanged(struct send_ctx *sctx, is_extent_unchanged() argument
4731 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); is_extent_unchanged()
4801 ret = btrfs_next_item(sctx->parent_root, path); is_extent_unchanged()
4836 static int get_last_extent(struct send_ctx *sctx, u64 offset) get_last_extent() argument
4839 struct btrfs_root *root = sctx->send_root; get_last_extent()
4850 sctx->cur_inode_last_extent = 0; get_last_extent()
4852 key.objectid = sctx->cur_ino; get_last_extent()
4860 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) get_last_extent()
4870 sctx->send_root->sectorsize); get_last_extent()
4875 sctx->cur_inode_last_extent = extent_end; get_last_extent()
4881 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, maybe_send_hole() argument
4889 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) maybe_send_hole()
4892 if (sctx->cur_inode_last_extent == (u64)-1) { maybe_send_hole()
4893 ret = get_last_extent(sctx, key->offset - 1); maybe_send_hole()
4905 sctx->send_root->sectorsize); maybe_send_hole()
4912 sctx->cur_inode_last_extent < key->offset) { maybe_send_hole()
4920 ret = get_last_extent(sctx, key->offset - 1); maybe_send_hole()
4925 if (sctx->cur_inode_last_extent < key->offset) maybe_send_hole()
4926 ret = send_hole(sctx, key->offset); maybe_send_hole()
4927 sctx->cur_inode_last_extent = extent_end; maybe_send_hole()
4931 static int process_extent(struct send_ctx *sctx, process_extent() argument
4938 if (S_ISLNK(sctx->cur_inode_mode)) process_extent()
4941 if (sctx->parent_root && !sctx->cur_inode_new) { process_extent()
4942 ret = is_extent_unchanged(sctx, path, key); process_extent()
4977 ret = find_extent_clone(sctx, path, key->objectid, key->offset, process_extent()
4978 sctx->cur_inode_size, &found_clone); process_extent()
4982 ret = send_write_or_clone(sctx, path, key, found_clone); process_extent()
4986 ret = maybe_send_hole(sctx, path, key); process_extent()
4991 static int process_all_extents(struct send_ctx *sctx) process_all_extents() argument
5001 root = sctx->send_root; process_all_extents()
5006 key.objectid = sctx->cmp_key->objectid; process_all_extents()
5036 ret = process_extent(sctx, path, &found_key); process_all_extents()
5048 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, process_recorded_refs_if_needed() argument
5054 if (sctx->cur_ino == 0) process_recorded_refs_if_needed()
5056 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && process_recorded_refs_if_needed()
5057 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) process_recorded_refs_if_needed()
5059 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) process_recorded_refs_if_needed()
5062 ret = process_recorded_refs(sctx, pending_move); process_recorded_refs_if_needed()
5071 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) finish_inode_if_needed() argument
5085 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, finish_inode_if_needed()
5103 sctx->send_progress = sctx->cur_ino + 1; finish_inode_if_needed()
5105 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) finish_inode_if_needed()
5107 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) finish_inode_if_needed()
5110 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, finish_inode_if_needed()
5115 if (!sctx->parent_root || sctx->cur_inode_new) { finish_inode_if_needed()
5117 if (!S_ISLNK(sctx->cur_inode_mode)) finish_inode_if_needed()
5120 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, finish_inode_if_needed()
5128 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) finish_inode_if_needed()
5132 if (S_ISREG(sctx->cur_inode_mode)) { finish_inode_if_needed()
5133 if (need_send_hole(sctx)) { finish_inode_if_needed()
5134 if (sctx->cur_inode_last_extent == (u64)-1 || finish_inode_if_needed()
5135 sctx->cur_inode_last_extent < finish_inode_if_needed()
5136 sctx->cur_inode_size) { finish_inode_if_needed()
5137 ret = get_last_extent(sctx, (u64)-1); finish_inode_if_needed()
5141 if (sctx->cur_inode_last_extent < finish_inode_if_needed()
5142 sctx->cur_inode_size) { finish_inode_if_needed()
5143 ret = send_hole(sctx, sctx->cur_inode_size); finish_inode_if_needed()
5148 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen, finish_inode_if_needed()
5149 sctx->cur_inode_size); finish_inode_if_needed()
5155 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, finish_inode_if_needed()
5161 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, finish_inode_if_needed()
5171 if (!is_waiting_for_move(sctx, sctx->cur_ino)) { finish_inode_if_needed()
5172 ret = apply_children_dir_moves(sctx); finish_inode_if_needed()
5182 sctx->send_progress = sctx->cur_ino + 1; finish_inode_if_needed()
5183 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); finish_inode_if_needed()
5192 static int changed_inode(struct send_ctx *sctx, changed_inode() argument
5196 struct btrfs_key *key = sctx->cmp_key; changed_inode()
5202 sctx->cur_ino = key->objectid; changed_inode()
5203 sctx->cur_inode_new_gen = 0; changed_inode()
5204 sctx->cur_inode_last_extent = (u64)-1; changed_inode()
5211 sctx->send_progress = sctx->cur_ino; changed_inode()
5215 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], changed_inode()
5216 sctx->left_path->slots[0], changed_inode()
5218 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], changed_inode()
5221 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], changed_inode()
5222 sctx->right_path->slots[0], changed_inode()
5224 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], changed_inode()
5228 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], changed_inode()
5229 sctx->right_path->slots[0], changed_inode()
5232 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], changed_inode()
5241 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) changed_inode()
5242 sctx->cur_inode_new_gen = 1; changed_inode()
5246 sctx->cur_inode_gen = left_gen; changed_inode()
5247 sctx->cur_inode_new = 1; changed_inode()
5248 sctx->cur_inode_deleted = 0; changed_inode()
5249 sctx->cur_inode_size = btrfs_inode_size( changed_inode()
5250 sctx->left_path->nodes[0], left_ii); changed_inode()
5251 sctx->cur_inode_mode = btrfs_inode_mode( changed_inode()
5252 sctx->left_path->nodes[0], left_ii); changed_inode()
5253 sctx->cur_inode_rdev = btrfs_inode_rdev( changed_inode()
5254 sctx->left_path->nodes[0], left_ii); changed_inode()
5255 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) changed_inode()
5256 ret = send_create_inode_if_needed(sctx); changed_inode()
5258 sctx->cur_inode_gen = right_gen; changed_inode()
5259 sctx->cur_inode_new = 0; changed_inode()
5260 sctx->cur_inode_deleted = 1; changed_inode()
5261 sctx->cur_inode_size = btrfs_inode_size( changed_inode()
5262 sctx->right_path->nodes[0], right_ii); changed_inode()
5263 sctx->cur_inode_mode = btrfs_inode_mode( changed_inode()
5264 sctx->right_path->nodes[0], right_ii); changed_inode()
5273 if (sctx->cur_inode_new_gen) { changed_inode()
5277 sctx->cur_inode_gen = right_gen; changed_inode()
5278 sctx->cur_inode_new = 0; changed_inode()
5279 sctx->cur_inode_deleted = 1; changed_inode()
5280 sctx->cur_inode_size = btrfs_inode_size( changed_inode()
5281 sctx->right_path->nodes[0], right_ii); changed_inode()
5282 sctx->cur_inode_mode = btrfs_inode_mode( changed_inode()
5283 sctx->right_path->nodes[0], right_ii); changed_inode()
5284 ret = process_all_refs(sctx, changed_inode()
5292 sctx->cur_inode_gen = left_gen; changed_inode()
5293 sctx->cur_inode_new = 1; changed_inode()
5294 sctx->cur_inode_deleted = 0; changed_inode()
5295 sctx->cur_inode_size = btrfs_inode_size( changed_inode()
5296 sctx->left_path->nodes[0], left_ii); changed_inode()
5297 sctx->cur_inode_mode = btrfs_inode_mode( changed_inode()
5298 sctx->left_path->nodes[0], left_ii); changed_inode()
5299 sctx->cur_inode_rdev = btrfs_inode_rdev( changed_inode()
5300 sctx->left_path->nodes[0], left_ii); changed_inode()
5301 ret = send_create_inode_if_needed(sctx); changed_inode()
5305 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); changed_inode()
5312 sctx->send_progress = sctx->cur_ino + 1; changed_inode()
5318 ret = process_all_extents(sctx); changed_inode()
5321 ret = process_all_new_xattrs(sctx); changed_inode()
5325 sctx->cur_inode_gen = left_gen; changed_inode()
5326 sctx->cur_inode_new = 0; changed_inode()
5327 sctx->cur_inode_new_gen = 0; changed_inode()
5328 sctx->cur_inode_deleted = 0; changed_inode()
5329 sctx->cur_inode_size = btrfs_inode_size( changed_inode()
5330 sctx->left_path->nodes[0], left_ii); changed_inode()
5331 sctx->cur_inode_mode = btrfs_inode_mode( changed_inode()
5332 sctx->left_path->nodes[0], left_ii); changed_inode()
5350 static int changed_ref(struct send_ctx *sctx, changed_ref() argument
5355 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); changed_ref()
5357 if (!sctx->cur_inode_new_gen && changed_ref()
5358 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { changed_ref()
5360 ret = record_new_ref(sctx); changed_ref()
5362 ret = record_deleted_ref(sctx); changed_ref()
5364 ret = record_changed_ref(sctx); changed_ref()
5375 static int changed_xattr(struct send_ctx *sctx, changed_xattr() argument
5380 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); changed_xattr()
5382 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { changed_xattr()
5384 ret = process_new_xattr(sctx); changed_xattr()
5386 ret = process_deleted_xattr(sctx); changed_xattr()
5388 ret = process_changed_xattr(sctx); changed_xattr()
5399 static int changed_extent(struct send_ctx *sctx, changed_extent() argument
5404 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); changed_extent()
5406 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { changed_extent()
5408 ret = process_extent(sctx, sctx->left_path, changed_extent()
5409 sctx->cmp_key); changed_extent()
5415 static int dir_changed(struct send_ctx *sctx, u64 dir) dir_changed() argument
5420 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL, dir_changed()
5425 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL, dir_changed()
5433 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, compare_refs() argument
5449 ret = dir_changed(sctx, dirid); compare_refs()
5464 ret = dir_changed(sctx, dirid); compare_refs()
5474 * Updates compare related fields in sctx and simply forwards to the actual
5486 struct send_ctx *sctx = ctx; changed_cb() local
5491 ret = compare_refs(sctx, left_path, key); changed_cb()
5497 return maybe_send_hole(sctx, left_path, key); changed_cb()
5505 sctx->left_path = left_path; changed_cb()
5506 sctx->right_path = right_path; changed_cb()
5507 sctx->cmp_key = key; changed_cb()
5509 ret = finish_inode_if_needed(sctx, 0); changed_cb()
5519 ret = changed_inode(sctx, result); changed_cb()
5522 ret = changed_ref(sctx, result); changed_cb()
5524 ret = changed_xattr(sctx, result); changed_cb()
5526 ret = changed_extent(sctx, result); changed_cb()
5532 static int full_send_tree(struct send_ctx *sctx) full_send_tree() argument
5535 struct btrfs_root *send_root = sctx->send_root; full_send_tree()
5562 &found_key, BTRFS_COMPARE_TREE_NEW, sctx); full_send_tree()
5580 ret = finish_inode_if_needed(sctx, 1); full_send_tree()
5587 static int send_subvol(struct send_ctx *sctx) send_subvol() argument
5591 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { send_subvol()
5592 ret = send_header(sctx); send_subvol()
5597 ret = send_subvol_begin(sctx); send_subvol()
5601 if (sctx->parent_root) { send_subvol()
5602 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, send_subvol()
5603 changed_cb, sctx); send_subvol()
5606 ret = finish_inode_if_needed(sctx, 1); send_subvol()
5610 ret = full_send_tree(sctx); send_subvol()
5616 free_recorded_refs(sctx); send_subvol()
5633 static int ensure_commit_roots_uptodate(struct send_ctx *sctx) ensure_commit_roots_uptodate() argument
5639 if (sctx->parent_root && ensure_commit_roots_uptodate()
5640 sctx->parent_root->node != sctx->parent_root->commit_root) ensure_commit_roots_uptodate()
5643 for (i = 0; i < sctx->clone_roots_cnt; i++) ensure_commit_roots_uptodate()
5644 if (sctx->clone_roots[i].root->node != ensure_commit_roots_uptodate()
5645 sctx->clone_roots[i].root->commit_root) ensure_commit_roots_uptodate()
5649 return btrfs_end_transaction(trans, sctx->send_root); ensure_commit_roots_uptodate()
5656 trans = btrfs_join_transaction(sctx->send_root); ensure_commit_roots_uptodate()
5662 return btrfs_commit_transaction(trans, sctx->send_root); ensure_commit_roots_uptodate()
5688 struct send_ctx *sctx = NULL; btrfs_ioctl_send() local
5743 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS); btrfs_ioctl_send()
5744 if (!sctx) { btrfs_ioctl_send()
5749 INIT_LIST_HEAD(&sctx->new_refs); btrfs_ioctl_send()
5750 INIT_LIST_HEAD(&sctx->deleted_refs); btrfs_ioctl_send()
5751 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS); btrfs_ioctl_send()
5752 INIT_LIST_HEAD(&sctx->name_cache_list); btrfs_ioctl_send()
5754 sctx->flags = arg->flags; btrfs_ioctl_send()
5756 sctx->send_filp = fget(arg->send_fd); btrfs_ioctl_send()
5757 if (!sctx->send_filp) { btrfs_ioctl_send()
5762 sctx->send_root = send_root; btrfs_ioctl_send()
5767 if (btrfs_root_dead(sctx->send_root)) { btrfs_ioctl_send()
5772 sctx->clone_roots_cnt = arg->clone_sources_count; btrfs_ioctl_send()
5774 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; btrfs_ioctl_send()
5775 sctx->send_buf = vmalloc(sctx->send_max_size); btrfs_ioctl_send()
5776 if (!sctx->send_buf) { btrfs_ioctl_send()
5781 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); btrfs_ioctl_send()
5782 if (!sctx->read_buf) { btrfs_ioctl_send()
5787 sctx->pending_dir_moves = RB_ROOT; btrfs_ioctl_send()
5788 sctx->waiting_dir_moves = RB_ROOT; btrfs_ioctl_send()
5789 sctx->orphan_dirs = RB_ROOT; btrfs_ioctl_send()
5791 sctx->clone_roots = vzalloc(sizeof(struct clone_root) * btrfs_ioctl_send()
5793 if (!sctx->clone_roots) { btrfs_ioctl_send()
5839 sctx->clone_roots[i].root = clone_root; btrfs_ioctl_send()
5853 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); btrfs_ioctl_send()
5854 if (IS_ERR(sctx->parent_root)) { btrfs_ioctl_send()
5856 ret = PTR_ERR(sctx->parent_root); btrfs_ioctl_send()
5860 spin_lock(&sctx->parent_root->root_item_lock); btrfs_ioctl_send()
5861 sctx->parent_root->send_in_progress++; btrfs_ioctl_send()
5862 if (!btrfs_root_readonly(sctx->parent_root) || btrfs_ioctl_send()
5863 btrfs_root_dead(sctx->parent_root)) { btrfs_ioctl_send()
5864 spin_unlock(&sctx->parent_root->root_item_lock); btrfs_ioctl_send()
5869 spin_unlock(&sctx->parent_root->root_item_lock); btrfs_ioctl_send()
5879 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; btrfs_ioctl_send()
5882 sort(sctx->clone_roots, sctx->clone_roots_cnt, btrfs_ioctl_send()
5883 sizeof(*sctx->clone_roots), __clone_root_cmp_sort, btrfs_ioctl_send()
5887 ret = ensure_commit_roots_uptodate(sctx); btrfs_ioctl_send()
5892 ret = send_subvol(sctx); btrfs_ioctl_send()
5897 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { btrfs_ioctl_send()
5898 ret = begin_cmd(sctx, BTRFS_SEND_C_END); btrfs_ioctl_send()
5901 ret = send_cmd(sctx); btrfs_ioctl_send()
5907 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); btrfs_ioctl_send()
5908 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { btrfs_ioctl_send()
5912 n = rb_first(&sctx->pending_dir_moves); btrfs_ioctl_send()
5919 free_pending_move(sctx, pm2); btrfs_ioctl_send()
5921 free_pending_move(sctx, pm); btrfs_ioctl_send()
5924 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); btrfs_ioctl_send()
5925 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { btrfs_ioctl_send()
5929 n = rb_first(&sctx->waiting_dir_moves); btrfs_ioctl_send()
5931 rb_erase(&dm->node, &sctx->waiting_dir_moves); btrfs_ioctl_send()
5935 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); btrfs_ioctl_send()
5936 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { btrfs_ioctl_send()
5940 n = rb_first(&sctx->orphan_dirs); btrfs_ioctl_send()
5942 free_orphan_dir_info(sctx, odi); btrfs_ioctl_send()
5946 for (i = 0; i < sctx->clone_roots_cnt; i++) btrfs_ioctl_send()
5948 sctx->clone_roots[i].root); btrfs_ioctl_send()
5950 for (i = 0; sctx && i < clone_sources_to_rollback; i++) btrfs_ioctl_send()
5952 sctx->clone_roots[i].root); btrfs_ioctl_send()
5956 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) btrfs_ioctl_send()
5957 btrfs_root_dec_send_in_progress(sctx->parent_root); btrfs_ioctl_send()
5962 if (sctx) { btrfs_ioctl_send()
5963 if (sctx->send_filp) btrfs_ioctl_send()
5964 fput(sctx->send_filp); btrfs_ioctl_send()
5966 vfree(sctx->clone_roots); btrfs_ioctl_send()
5967 vfree(sctx->send_buf); btrfs_ioctl_send()
5968 vfree(sctx->read_buf); btrfs_ioctl_send()
5970 name_cache_free(sctx); btrfs_ioctl_send()
5972 kfree(sctx); btrfs_ioctl_send()
H A Dscrub.c95 struct scrub_ctx *sctx; member in struct:scrub_bio
116 struct scrub_ctx *sctx; member in struct:scrub_block
132 struct scrub_ctx *sctx; member in struct:scrub_parity
208 struct scrub_ctx *sctx; member in struct:scrub_fixup_nodatasum
224 struct scrub_ctx *sctx; member in struct:scrub_copy_nocow_ctx
242 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
243 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
244 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
275 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
277 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
289 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
295 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
297 static void scrub_wr_submit(struct scrub_ctx *sctx);
300 static int write_page_nocow(struct scrub_ctx *sctx,
304 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
309 static void scrub_put_ctx(struct scrub_ctx *sctx);
312 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) scrub_pending_bio_inc() argument
314 atomic_inc(&sctx->refs); scrub_pending_bio_inc()
315 atomic_inc(&sctx->bios_in_flight); scrub_pending_bio_inc()
318 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) scrub_pending_bio_dec() argument
320 atomic_dec(&sctx->bios_in_flight); scrub_pending_bio_dec()
321 wake_up(&sctx->list_wait); scrub_pending_bio_dec()
322 scrub_put_ctx(sctx); scrub_pending_bio_dec()
352 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) scrub_pending_trans_workers_inc() argument
354 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; scrub_pending_trans_workers_inc()
356 atomic_inc(&sctx->refs); scrub_pending_trans_workers_inc()
380 atomic_inc(&sctx->workers_pending); scrub_pending_trans_workers_inc()
384 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) scrub_pending_trans_workers_dec() argument
386 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; scrub_pending_trans_workers_dec()
396 atomic_dec(&sctx->workers_pending); scrub_pending_trans_workers_dec()
398 wake_up(&sctx->list_wait); scrub_pending_trans_workers_dec()
399 scrub_put_ctx(sctx); scrub_pending_trans_workers_dec()
402 static void scrub_free_csums(struct scrub_ctx *sctx) scrub_free_csums() argument
404 while (!list_empty(&sctx->csum_list)) { scrub_free_csums()
406 sum = list_first_entry(&sctx->csum_list, scrub_free_csums()
413 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) scrub_free_ctx() argument
417 if (!sctx) scrub_free_ctx()
420 scrub_free_wr_ctx(&sctx->wr_ctx); scrub_free_ctx()
423 if (sctx->curr != -1) { scrub_free_ctx()
424 struct scrub_bio *sbio = sctx->bios[sctx->curr]; scrub_free_ctx()
434 struct scrub_bio *sbio = sctx->bios[i]; scrub_free_ctx()
441 scrub_free_csums(sctx); scrub_free_ctx()
442 kfree(sctx); scrub_free_ctx()
445 static void scrub_put_ctx(struct scrub_ctx *sctx) scrub_put_ctx() argument
447 if (atomic_dec_and_test(&sctx->refs)) scrub_put_ctx()
448 scrub_free_ctx(sctx); scrub_put_ctx()
454 struct scrub_ctx *sctx; scrub_setup_ctx() local
472 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); scrub_setup_ctx()
473 if (!sctx) scrub_setup_ctx()
475 atomic_set(&sctx->refs, 1); scrub_setup_ctx()
476 sctx->is_dev_replace = is_dev_replace; scrub_setup_ctx()
477 sctx->pages_per_rd_bio = pages_per_rd_bio; scrub_setup_ctx()
478 sctx->curr = -1; scrub_setup_ctx()
479 sctx->dev_root = dev->dev_root; scrub_setup_ctx()
486 sctx->bios[i] = sbio; scrub_setup_ctx()
489 sbio->sctx = sctx; scrub_setup_ctx()
495 sctx->bios[i]->next_free = i + 1; scrub_setup_ctx()
497 sctx->bios[i]->next_free = -1; scrub_setup_ctx()
499 sctx->first_free = 0; scrub_setup_ctx()
500 sctx->nodesize = dev->dev_root->nodesize; scrub_setup_ctx()
501 sctx->sectorsize = dev->dev_root->sectorsize; scrub_setup_ctx()
502 atomic_set(&sctx->bios_in_flight, 0); scrub_setup_ctx()
503 atomic_set(&sctx->workers_pending, 0); scrub_setup_ctx()
504 atomic_set(&sctx->cancel_req, 0); scrub_setup_ctx()
505 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); scrub_setup_ctx()
506 INIT_LIST_HEAD(&sctx->csum_list); scrub_setup_ctx()
508 spin_lock_init(&sctx->list_lock); scrub_setup_ctx()
509 spin_lock_init(&sctx->stat_lock); scrub_setup_ctx()
510 init_waitqueue_head(&sctx->list_wait); scrub_setup_ctx()
512 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info, scrub_setup_ctx()
515 scrub_free_ctx(sctx); scrub_setup_ctx()
518 return sctx; scrub_setup_ctx()
521 scrub_free_ctx(sctx); scrub_setup_ctx()
627 fs_info = sblock->sctx->dev_root->fs_info; scrub_print_warning()
801 struct scrub_ctx *sctx; scrub_fixup_nodatasum() local
807 sctx = fixup->sctx; scrub_fixup_nodatasum()
811 spin_lock(&sctx->stat_lock); scrub_fixup_nodatasum()
812 ++sctx->stat.malloc_errors; scrub_fixup_nodatasum()
813 spin_unlock(&sctx->stat_lock); scrub_fixup_nodatasum()
842 spin_lock(&sctx->stat_lock); scrub_fixup_nodatasum()
843 ++sctx->stat.corrected_errors; scrub_fixup_nodatasum()
844 spin_unlock(&sctx->stat_lock); scrub_fixup_nodatasum()
850 spin_lock(&sctx->stat_lock); scrub_fixup_nodatasum()
851 ++sctx->stat.uncorrectable_errors; scrub_fixup_nodatasum()
852 spin_unlock(&sctx->stat_lock); scrub_fixup_nodatasum()
854 &sctx->dev_root->fs_info->dev_replace. scrub_fixup_nodatasum()
864 scrub_pending_trans_workers_dec(sctx); scrub_fixup_nodatasum()
890 struct scrub_ctx *sctx = sblock_to_check->sctx; scrub_handle_errored_block() local
910 fs_info = sctx->dev_root->fs_info; scrub_handle_errored_block()
917 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
918 ++sctx->stat.super_errors; scrub_handle_errored_block()
919 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
933 if (sctx->is_dev_replace && !is_metadata && !have_csum) { scrub_handle_errored_block()
970 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
971 sctx->stat.malloc_errors++; scrub_handle_errored_block()
972 sctx->stat.read_errors++; scrub_handle_errored_block()
973 sctx->stat.uncorrectable_errors++; scrub_handle_errored_block()
974 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
982 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
983 sctx->stat.read_errors++; scrub_handle_errored_block()
984 sctx->stat.uncorrectable_errors++; scrub_handle_errored_block()
985 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
994 csum, generation, sctx->csum_size, 1); scrub_handle_errored_block()
1006 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
1007 sctx->stat.unverified_errors++; scrub_handle_errored_block()
1009 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
1011 if (sctx->is_dev_replace) scrub_handle_errored_block()
1017 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
1018 sctx->stat.read_errors++; scrub_handle_errored_block()
1019 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
1024 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
1025 sctx->stat.csum_errors++; scrub_handle_errored_block()
1026 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
1032 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
1033 sctx->stat.verify_errors++; scrub_handle_errored_block()
1034 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
1046 if (sctx->readonly) { scrub_handle_errored_block()
1047 ASSERT(!sctx->is_dev_replace); scrub_handle_errored_block()
1054 WARN_ON(sctx->is_dev_replace); scrub_handle_errored_block()
1068 fixup_nodatasum->sctx = sctx; scrub_handle_errored_block()
1073 scrub_pending_trans_workers_inc(sctx); scrub_handle_errored_block()
1109 sctx->csum_size, 0); scrub_handle_errored_block()
1114 if (sctx->is_dev_replace) { scrub_handle_errored_block()
1126 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) scrub_handle_errored_block()
1160 if (!page_bad->io_error && !sctx->is_dev_replace) scrub_handle_errored_block()
1180 if (sctx->is_dev_replace) { scrub_handle_errored_block()
1194 &sctx->dev_root-> scrub_handle_errored_block()
1210 if (success && !sctx->is_dev_replace) { scrub_handle_errored_block()
1223 generation, sctx->csum_size, 1); scrub_handle_errored_block()
1232 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
1233 sctx->stat.corrected_errors++; scrub_handle_errored_block()
1235 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
1242 spin_lock(&sctx->stat_lock); scrub_handle_errored_block()
1243 sctx->stat.uncorrectable_errors++; scrub_handle_errored_block()
1244 spin_unlock(&sctx->stat_lock); scrub_handle_errored_block()
1320 struct scrub_ctx *sctx = original_sblock->sctx; scrub_setup_recheck_block() local
1321 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; scrub_setup_recheck_block()
1377 sblock->sctx = sctx; scrub_setup_recheck_block()
1381 spin_lock(&sctx->stat_lock); scrub_setup_recheck_block()
1382 sctx->stat.malloc_errors++; scrub_setup_recheck_block()
1383 spin_unlock(&sctx->stat_lock); scrub_setup_recheck_block()
1654 &sblock_bad->sctx->dev_root->fs_info-> scrub_repair_page_from_good_copy()
1682 &sblock->sctx->dev_root->fs_info->dev_replace. scrub_write_block_to_dev_replace()
1700 return scrub_add_page_to_wr_bio(sblock->sctx, spage); scrub_write_page_to_dev_replace()
1703 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, scrub_add_page_to_wr_bio() argument
1706 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; scrub_add_page_to_wr_bio()
1719 wr_ctx->wr_curr_bio->sctx = sctx; scrub_add_page_to_wr_bio()
1748 scrub_wr_submit(sctx); scrub_add_page_to_wr_bio()
1760 scrub_wr_submit(sctx); scrub_add_page_to_wr_bio()
1768 scrub_wr_submit(sctx); scrub_add_page_to_wr_bio()
1774 static void scrub_wr_submit(struct scrub_ctx *sctx) scrub_wr_submit() argument
1776 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; scrub_wr_submit()
1785 scrub_pending_bio_inc(sctx); scrub_wr_submit()
1809 struct scrub_ctx *sctx = sbio->sctx; scrub_wr_bio_end_io_worker() local
1815 &sbio->sctx->dev_root->fs_info->dev_replace; scrub_wr_bio_end_io_worker()
1831 scrub_pending_bio_dec(sctx); scrub_wr_bio_end_io_worker()
1858 struct scrub_ctx *sctx = sblock->sctx; scrub_checksum_data() local
1876 len = sctx->sectorsize; scrub_checksum_data()
1894 if (memcmp(csum, on_disk_csum, sctx->csum_size)) scrub_checksum_data()
1902 struct scrub_ctx *sctx = sblock->sctx; scrub_checksum_tree_block() local
1904 struct btrfs_root *root = sctx->dev_root; scrub_checksum_tree_block()
1922 memcpy(on_disk_csum, h->csum, sctx->csum_size); scrub_checksum_tree_block()
1943 len = sctx->nodesize - BTRFS_CSUM_SIZE; scrub_checksum_tree_block()
1965 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) scrub_checksum_tree_block()
1974 struct scrub_ctx *sctx = sblock->sctx; scrub_checksum_super() local
1991 memcpy(on_disk_csum, s->csum, sctx->csum_size); scrub_checksum_super()
2024 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) scrub_checksum_super()
2033 spin_lock(&sctx->stat_lock); scrub_checksum_super()
2034 ++sctx->stat.super_errors; scrub_checksum_super()
2035 spin_unlock(&sctx->stat_lock); scrub_checksum_super()
2080 static void scrub_submit(struct scrub_ctx *sctx) scrub_submit() argument
2084 if (sctx->curr == -1) scrub_submit()
2087 sbio = sctx->bios[sctx->curr]; scrub_submit()
2088 sctx->curr = -1; scrub_submit()
2089 scrub_pending_bio_inc(sctx); scrub_submit()
2107 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, scrub_add_page_to_rd_bio() argument
2118 while (sctx->curr == -1) { scrub_add_page_to_rd_bio()
2119 spin_lock(&sctx->list_lock); scrub_add_page_to_rd_bio()
2120 sctx->curr = sctx->first_free; scrub_add_page_to_rd_bio()
2121 if (sctx->curr != -1) { scrub_add_page_to_rd_bio()
2122 sctx->first_free = sctx->bios[sctx->curr]->next_free; scrub_add_page_to_rd_bio()
2123 sctx->bios[sctx->curr]->next_free = -1; scrub_add_page_to_rd_bio()
2124 sctx->bios[sctx->curr]->page_count = 0; scrub_add_page_to_rd_bio()
2125 spin_unlock(&sctx->list_lock); scrub_add_page_to_rd_bio()
2127 spin_unlock(&sctx->list_lock); scrub_add_page_to_rd_bio()
2128 wait_event(sctx->list_wait, sctx->first_free != -1); scrub_add_page_to_rd_bio()
2131 sbio = sctx->bios[sctx->curr]; scrub_add_page_to_rd_bio()
2140 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); scrub_add_page_to_rd_bio()
2156 scrub_submit(sctx); scrub_add_page_to_rd_bio()
2168 scrub_submit(sctx); scrub_add_page_to_rd_bio()
2175 if (sbio->page_count == sctx->pages_per_rd_bio) scrub_add_page_to_rd_bio()
2176 scrub_submit(sctx); scrub_add_page_to_rd_bio()
2181 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, scrub_pages() argument
2191 spin_lock(&sctx->stat_lock); scrub_pages()
2192 sctx->stat.malloc_errors++; scrub_pages()
2193 spin_unlock(&sctx->stat_lock); scrub_pages()
2200 sblock->sctx = sctx; scrub_pages()
2210 spin_lock(&sctx->stat_lock); scrub_pages()
2211 sctx->stat.malloc_errors++; scrub_pages()
2212 spin_unlock(&sctx->stat_lock); scrub_pages()
2229 memcpy(spage->csum, csum, sctx->csum_size); scrub_pages()
2248 ret = scrub_add_page_to_rd_bio(sctx, spage); scrub_pages()
2256 scrub_submit(sctx); scrub_pages()
2277 struct scrub_ctx *sctx = sbio->sctx; scrub_bio_end_io_worker() local
2302 spin_lock(&sctx->list_lock); scrub_bio_end_io_worker()
2303 sbio->next_free = sctx->first_free; scrub_bio_end_io_worker()
2304 sctx->first_free = sbio->index; scrub_bio_end_io_worker()
2305 spin_unlock(&sctx->list_lock); scrub_bio_end_io_worker()
2307 if (sctx->is_dev_replace && scrub_bio_end_io_worker()
2308 atomic_read(&sctx->wr_ctx.flush_all_writes)) { scrub_bio_end_io_worker()
2309 mutex_lock(&sctx->wr_ctx.wr_lock); scrub_bio_end_io_worker()
2310 scrub_wr_submit(sctx); scrub_bio_end_io_worker()
2311 mutex_unlock(&sctx->wr_ctx.wr_lock); scrub_bio_end_io_worker()
2314 scrub_pending_bio_dec(sctx); scrub_bio_end_io_worker()
2323 int sectorsize = sparity->sctx->dev_root->sectorsize; __scrub_mark_bitmap()
2370 if (!corrupted && sblock->sctx->is_dev_replace) scrub_block_complete()
2384 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, scrub_find_csum() argument
2391 while (!list_empty(&sctx->csum_list)) { scrub_find_csum()
2392 sum = list_first_entry(&sctx->csum_list, scrub_find_csum()
2399 ++sctx->stat.csum_discards; scrub_find_csum()
2407 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize; scrub_find_csum()
2408 num_sectors = sum->len / sctx->sectorsize; scrub_find_csum()
2409 memcpy(csum, sum->sums + index, sctx->csum_size); scrub_find_csum()
2418 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, scrub_extent() argument
2427 blocksize = sctx->sectorsize; scrub_extent()
2428 spin_lock(&sctx->stat_lock); scrub_extent()
2429 sctx->stat.data_extents_scrubbed++; scrub_extent()
2430 sctx->stat.data_bytes_scrubbed += len; scrub_extent()
2431 spin_unlock(&sctx->stat_lock); scrub_extent()
2433 blocksize = sctx->nodesize; scrub_extent()
2434 spin_lock(&sctx->stat_lock); scrub_extent()
2435 sctx->stat.tree_extents_scrubbed++; scrub_extent()
2436 sctx->stat.tree_bytes_scrubbed += len; scrub_extent()
2437 spin_unlock(&sctx->stat_lock); scrub_extent()
2439 blocksize = sctx->sectorsize; scrub_extent()
2449 have_csum = scrub_find_csum(sctx, logical, l, csum); scrub_extent()
2451 ++sctx->stat.no_csum; scrub_extent()
2452 if (sctx->is_dev_replace && !have_csum) { scrub_extent()
2453 ret = copy_nocow_pages(sctx, logical, l, scrub_extent()
2459 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, scrub_extent()
2478 struct scrub_ctx *sctx = sparity->sctx; scrub_pages_for_parity() local
2484 spin_lock(&sctx->stat_lock); scrub_pages_for_parity()
2485 sctx->stat.malloc_errors++; scrub_pages_for_parity()
2486 spin_unlock(&sctx->stat_lock); scrub_pages_for_parity()
2493 sblock->sctx = sctx; scrub_pages_for_parity()
2505 spin_lock(&sctx->stat_lock); scrub_pages_for_parity()
2506 sctx->stat.malloc_errors++; scrub_pages_for_parity()
2507 spin_unlock(&sctx->stat_lock); scrub_pages_for_parity()
2527 memcpy(spage->csum, csum, sctx->csum_size); scrub_pages_for_parity()
2545 ret = scrub_add_page_to_rd_bio(sctx, spage); scrub_pages_for_parity()
2562 struct scrub_ctx *sctx = sparity->sctx; scrub_extent_for_parity() local
2568 blocksize = sctx->sectorsize; scrub_extent_for_parity()
2570 blocksize = sctx->nodesize; scrub_extent_for_parity()
2572 blocksize = sctx->sectorsize; scrub_extent_for_parity()
2582 have_csum = scrub_find_csum(sctx, logical, l, csum); scrub_extent_for_parity()
2645 struct scrub_ctx *sctx = sparity->sctx; scrub_free_parity() local
2651 spin_lock(&sctx->stat_lock); scrub_free_parity()
2652 sctx->stat.read_errors += nbits; scrub_free_parity()
2653 sctx->stat.uncorrectable_errors += nbits; scrub_free_parity()
2654 spin_unlock(&sctx->stat_lock); scrub_free_parity()
2668 struct scrub_ctx *sctx = sparity->sctx; scrub_parity_bio_endio() local
2675 scrub_pending_bio_dec(sctx); scrub_parity_bio_endio()
2681 struct scrub_ctx *sctx = sparity->sctx; scrub_parity_check_and_repair() local
2694 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, scrub_parity_check_and_repair()
2708 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, scrub_parity_check_and_repair()
2719 scrub_pending_bio_inc(sctx); scrub_parity_check_and_repair()
2729 spin_lock(&sctx->stat_lock); scrub_parity_check_and_repair()
2730 sctx->stat.malloc_errors++; scrub_parity_check_and_repair()
2731 spin_unlock(&sctx->stat_lock); scrub_parity_check_and_repair()
2754 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, scrub_raid56_parity() argument
2761 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; scrub_raid56_parity()
2786 spin_lock(&sctx->stat_lock); scrub_raid56_parity()
2787 sctx->stat.malloc_errors++; scrub_raid56_parity()
2788 spin_unlock(&sctx->stat_lock); scrub_raid56_parity()
2794 sparity->sctx = sctx; scrub_raid56_parity()
2904 &sctx->csum_list, 1); scrub_raid56_parity()
2917 scrub_free_csums(sctx); scrub_raid56_parity()
2948 scrub_submit(sctx); scrub_raid56_parity()
2949 mutex_lock(&sctx->wr_ctx.wr_lock); scrub_raid56_parity()
2950 scrub_wr_submit(sctx); scrub_raid56_parity()
2951 mutex_unlock(&sctx->wr_ctx.wr_lock); scrub_raid56_parity()
2957 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, scrub_stripe() argument
2964 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; scrub_stripe()
3057 wait_event(sctx->list_wait, scrub_stripe()
3058 atomic_read(&sctx->bios_in_flight) == 0); scrub_stripe()
3103 ret = scrub_raid56_parity(sctx, map, scrub_dev, scrub_stripe()
3115 atomic_read(&sctx->cancel_req)) { scrub_stripe()
3124 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); scrub_stripe()
3125 scrub_submit(sctx); scrub_stripe()
3126 mutex_lock(&sctx->wr_ctx.wr_lock); scrub_stripe()
3127 scrub_wr_submit(sctx); scrub_stripe()
3128 mutex_unlock(&sctx->wr_ctx.wr_lock); scrub_stripe()
3129 wait_event(sctx->list_wait, scrub_stripe()
3130 atomic_read(&sctx->bios_in_flight) == 0); scrub_stripe()
3131 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); scrub_stripe()
3240 &sctx->csum_list, 1); scrub_stripe()
3244 ret = scrub_extent(sctx, extent_logical, extent_len, scrub_stripe()
3251 scrub_free_csums(sctx); scrub_stripe()
3270 ret = scrub_raid56_parity(sctx, scrub_stripe()
3299 spin_lock(&sctx->stat_lock); scrub_stripe()
3301 sctx->stat.last_physical = map->stripes[num].physical + scrub_stripe()
3304 sctx->stat.last_physical = physical; scrub_stripe()
3305 spin_unlock(&sctx->stat_lock); scrub_stripe()
3311 scrub_submit(sctx); scrub_stripe()
3312 mutex_lock(&sctx->wr_ctx.wr_lock); scrub_stripe()
3313 scrub_wr_submit(sctx); scrub_stripe()
3314 mutex_unlock(&sctx->wr_ctx.wr_lock); scrub_stripe()
3322 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, scrub_chunk() argument
3329 &sctx->dev_root->fs_info->mapping_tree; scrub_chunk()
3352 ret = scrub_stripe(sctx, map, scrub_dev, i, scrub_chunk()
3366 int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_enumerate_chunks() argument
3372 struct btrfs_root *root = sctx->dev_root; scrub_enumerate_chunks()
3452 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, scrub_enumerate_chunks()
3466 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); scrub_enumerate_chunks()
3467 scrub_submit(sctx); scrub_enumerate_chunks()
3468 mutex_lock(&sctx->wr_ctx.wr_lock); scrub_enumerate_chunks()
3469 scrub_wr_submit(sctx); scrub_enumerate_chunks()
3470 mutex_unlock(&sctx->wr_ctx.wr_lock); scrub_enumerate_chunks()
3472 wait_event(sctx->list_wait, scrub_enumerate_chunks()
3473 atomic_read(&sctx->bios_in_flight) == 0); scrub_enumerate_chunks()
3482 wait_event(sctx->list_wait, scrub_enumerate_chunks()
3483 atomic_read(&sctx->workers_pending) == 0); scrub_enumerate_chunks()
3484 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); scrub_enumerate_chunks()
3500 if (sctx->stat.malloc_errors > 0) { scrub_enumerate_chunks()
3521 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, scrub_supers() argument
3528 struct btrfs_root *root = sctx->dev_root; scrub_supers()
3545 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, scrub_supers()
3551 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_supers()
3612 struct scrub_ctx *sctx; btrfs_scrub_dev() local
3701 sctx = scrub_setup_ctx(dev, is_dev_replace); btrfs_scrub_dev()
3702 if (IS_ERR(sctx)) { btrfs_scrub_dev()
3706 return PTR_ERR(sctx); btrfs_scrub_dev()
3708 sctx->readonly = readonly; btrfs_scrub_dev()
3709 dev->scrub_device = sctx; btrfs_scrub_dev()
3726 ret = scrub_supers(sctx, dev); btrfs_scrub_dev()
3731 ret = scrub_enumerate_chunks(sctx, dev, start, end, btrfs_scrub_dev()
3734 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); btrfs_scrub_dev()
3738 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); btrfs_scrub_dev()
3741 memcpy(progress, &sctx->stat, sizeof(*progress)); btrfs_scrub_dev()
3748 scrub_put_ctx(sctx); btrfs_scrub_dev()
3802 struct scrub_ctx *sctx; btrfs_scrub_cancel_dev() local
3805 sctx = dev->scrub_device; btrfs_scrub_cancel_dev()
3806 if (!sctx) { btrfs_scrub_cancel_dev()
3810 atomic_inc(&sctx->cancel_req); btrfs_scrub_cancel_dev()
3826 struct scrub_ctx *sctx = NULL; btrfs_scrub_progress() local
3831 sctx = dev->scrub_device; btrfs_scrub_progress()
3832 if (sctx) btrfs_scrub_progress()
3833 memcpy(progress, &sctx->stat, sizeof(*progress)); btrfs_scrub_progress()
3836 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; btrfs_scrub_progress()
3864 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, scrub_setup_wr_ctx() argument
3893 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, copy_nocow_pages() argument
3897 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; copy_nocow_pages()
3901 spin_lock(&sctx->stat_lock); copy_nocow_pages()
3902 sctx->stat.malloc_errors++; copy_nocow_pages()
3903 spin_unlock(&sctx->stat_lock); copy_nocow_pages()
3907 scrub_pending_trans_workers_inc(sctx); copy_nocow_pages()
3909 nocow_ctx->sctx = sctx; copy_nocow_pages()
3944 struct scrub_ctx *sctx = nocow_ctx->sctx; copy_nocow_pages_worker() local
3956 fs_info = sctx->dev_root->fs_info; copy_nocow_pages_worker()
3961 spin_lock(&sctx->stat_lock); copy_nocow_pages_worker()
3962 sctx->stat.malloc_errors++; copy_nocow_pages_worker()
3963 spin_unlock(&sctx->stat_lock); copy_nocow_pages_worker()
4021 scrub_pending_trans_workers_dec(sctx); copy_nocow_pages_worker()
4071 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; copy_nocow_pages_for_inode()
4167 err = write_page_nocow(nocow_ctx->sctx, copy_nocow_pages_for_inode()
4190 static int write_page_nocow(struct scrub_ctx *sctx, write_page_nocow() argument
4197 dev = sctx->wr_ctx.tgtdev; write_page_nocow()
4207 spin_lock(&sctx->stat_lock); write_page_nocow()
4208 sctx->stat.malloc_errors++; write_page_nocow()
4209 spin_unlock(&sctx->stat_lock); write_page_nocow()
/linux-4.1.27/drivers/crypto/nx/
H A Dnx-sha256.c51 struct sha256_state *sctx = shash_desc_ctx(desc); nx_sha256_init() local
53 memset(sctx, 0, sizeof *sctx); nx_sha256_init()
55 sctx->state[0] = __cpu_to_be32(SHA256_H0); nx_sha256_init()
56 sctx->state[1] = __cpu_to_be32(SHA256_H1); nx_sha256_init()
57 sctx->state[2] = __cpu_to_be32(SHA256_H2); nx_sha256_init()
58 sctx->state[3] = __cpu_to_be32(SHA256_H3); nx_sha256_init()
59 sctx->state[4] = __cpu_to_be32(SHA256_H4); nx_sha256_init()
60 sctx->state[5] = __cpu_to_be32(SHA256_H5); nx_sha256_init()
61 sctx->state[6] = __cpu_to_be32(SHA256_H6); nx_sha256_init()
62 sctx->state[7] = __cpu_to_be32(SHA256_H7); nx_sha256_init()
63 sctx->count = 0; nx_sha256_init()
71 struct sha256_state *sctx = shash_desc_ctx(desc); nx_sha256_update() local
80 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); nx_sha256_update()
88 total = (sctx->count % SHA256_BLOCK_SIZE) + len; nx_sha256_update()
90 memcpy(sctx->buf + buf_len, data, len); nx_sha256_update()
91 sctx->count += len; nx_sha256_update()
95 memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE); nx_sha256_update()
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, nx_sha256_update()
121 (u8 *) sctx->buf, nx_sha256_update()
180 memcpy(sctx->buf, data, leftover); nx_sha256_update()
182 sctx->count += len; nx_sha256_update()
183 memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); nx_sha256_update()
191 struct sha256_state *sctx = shash_desc_ctx(desc); nx_sha256_final() local
209 if (sctx->count >= SHA256_BLOCK_SIZE) { nx_sha256_final()
212 memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); nx_sha256_final()
220 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); nx_sha256_final()
222 len = sctx->count & (SHA256_BLOCK_SIZE - 1); nx_sha256_final()
223 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, nx_sha256_final()
226 if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { nx_sha256_final()
253 atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes)); nx_sha256_final()
262 struct sha256_state *sctx = shash_desc_ctx(desc); nx_sha256_export() local
264 memcpy(out, sctx, sizeof(*sctx)); nx_sha256_export()
271 struct sha256_state *sctx = shash_desc_ctx(desc); nx_sha256_import() local
273 memcpy(sctx, in, sizeof(*sctx)); nx_sha256_import()
H A Dnx-sha512.c51 struct sha512_state *sctx = shash_desc_ctx(desc); nx_sha512_init() local
53 memset(sctx, 0, sizeof *sctx); nx_sha512_init()
55 sctx->state[0] = __cpu_to_be64(SHA512_H0); nx_sha512_init()
56 sctx->state[1] = __cpu_to_be64(SHA512_H1); nx_sha512_init()
57 sctx->state[2] = __cpu_to_be64(SHA512_H2); nx_sha512_init()
58 sctx->state[3] = __cpu_to_be64(SHA512_H3); nx_sha512_init()
59 sctx->state[4] = __cpu_to_be64(SHA512_H4); nx_sha512_init()
60 sctx->state[5] = __cpu_to_be64(SHA512_H5); nx_sha512_init()
61 sctx->state[6] = __cpu_to_be64(SHA512_H6); nx_sha512_init()
62 sctx->state[7] = __cpu_to_be64(SHA512_H7); nx_sha512_init()
63 sctx->count[0] = 0; nx_sha512_init()
71 struct sha512_state *sctx = shash_desc_ctx(desc); nx_sha512_update() local
80 u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); nx_sha512_update()
88 total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; nx_sha512_update()
90 memcpy(sctx->buf + buf_len, data, len); nx_sha512_update()
91 sctx->count[0] += len; nx_sha512_update()
95 memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); nx_sha512_update()
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, nx_sha512_update()
121 (u8 *) sctx->buf, nx_sha512_update()
184 memcpy(sctx->buf, data, leftover); nx_sha512_update()
185 sctx->count[0] += len; nx_sha512_update()
186 memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); nx_sha512_update()
194 struct sha512_state *sctx = shash_desc_ctx(desc); nx_sha512_final() local
213 if (sctx->count[0] >= SHA512_BLOCK_SIZE) { nx_sha512_final()
216 memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, nx_sha512_final()
227 count0 = sctx->count[0] * 8; nx_sha512_final()
231 len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); nx_sha512_final()
232 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, nx_sha512_final()
235 if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { nx_sha512_final()
258 atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); nx_sha512_final()
268 struct sha512_state *sctx = shash_desc_ctx(desc); nx_sha512_export() local
270 memcpy(out, sctx, sizeof(*sctx)); nx_sha512_export()
277 struct sha512_state *sctx = shash_desc_ctx(desc); nx_sha512_import() local
279 memcpy(sctx, in, sizeof(*sctx)); nx_sha512_import()
H A Dnx-aes-xcbc.c172 struct xcbc_state *sctx = shash_desc_ctx(desc); nx_xcbc_init() local
174 memset(sctx, 0, sizeof *sctx); nx_xcbc_init()
183 struct xcbc_state *sctx = shash_desc_ctx(desc); nx_xcbc_update() local
197 total = sctx->count + len; nx_xcbc_update()
204 memcpy(sctx->buffer + sctx->count, data, len); nx_xcbc_update()
205 sctx->count += len; nx_xcbc_update()
216 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, nx_xcbc_update()
242 if (sctx->count) { nx_xcbc_update()
243 data_len = sctx->count; nx_xcbc_update()
245 (u8 *) sctx->buffer, nx_xcbc_update()
248 if (data_len != sctx->count) { nx_xcbc_update()
254 data_len = to_process - sctx->count; nx_xcbc_update()
260 if (data_len != to_process - sctx->count) { nx_xcbc_update()
293 data += to_process - sctx->count; nx_xcbc_update()
294 sctx->count = 0; nx_xcbc_update()
299 memcpy(sctx->buffer, data, leftover); nx_xcbc_update()
300 sctx->count = leftover; nx_xcbc_update()
309 struct xcbc_state *sctx = shash_desc_ctx(desc); nx_xcbc_final() local
324 } else if (sctx->count == 0) { nx_xcbc_final()
338 len = sctx->count; nx_xcbc_final()
339 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, nx_xcbc_final()
342 if (len != sctx->count) { nx_xcbc_final()
/linux-4.1.27/arch/x86/purgatory/
H A Dsha256.h17 extern int sha256_init(struct sha256_state *sctx);
18 extern int sha256_update(struct sha256_state *sctx, const u8 *input,
20 extern int sha256_final(struct sha256_state *sctx, u8 *hash);
H A Dpurgatory.c46 struct sha256_state sctx; verify_sha256_digest() local
48 sha256_init(&sctx); verify_sha256_digest()
51 sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); verify_sha256_digest()
53 sha256_final(&sctx, digest); verify_sha256_digest()
H A Dsha256.c211 int sha256_init(struct sha256_state *sctx) sha256_init() argument
213 sctx->state[0] = SHA256_H0; sha256_init()
214 sctx->state[1] = SHA256_H1; sha256_init()
215 sctx->state[2] = SHA256_H2; sha256_init()
216 sctx->state[3] = SHA256_H3; sha256_init()
217 sctx->state[4] = SHA256_H4; sha256_init()
218 sctx->state[5] = SHA256_H5; sha256_init()
219 sctx->state[6] = SHA256_H6; sha256_init()
220 sctx->state[7] = SHA256_H7; sha256_init()
221 sctx->count = 0; sha256_init()
226 int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) sha256_update() argument
231 partial = sctx->count & 0x3f; sha256_update()
232 sctx->count += len; sha256_update()
239 memcpy(sctx->buf + partial, data, done + 64); sha256_update()
240 src = sctx->buf; sha256_update()
244 sha256_transform(sctx->state, src); sha256_update()
251 memcpy(sctx->buf + partial, src, len - done); sha256_update()
256 int sha256_final(struct sha256_state *sctx, u8 *out) sha256_final() argument
265 bits = cpu_to_be64(sctx->count << 3); sha256_final()
268 index = sctx->count & 0x3f; sha256_final()
270 sha256_update(sctx, padding, pad_len); sha256_final()
273 sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); sha256_final()
277 dst[i] = cpu_to_be32(sctx->state[i]); sha256_final()
280 memset(sctx, 0, sizeof(*sctx)); sha256_final()
/linux-4.1.27/drivers/staging/skein/
H A Dskein_generic.c45 struct skein_256_ctx *sctx = shash_desc_ctx(desc); skein256_export() local
47 memcpy(out, sctx, sizeof(*sctx)); skein256_export()
53 struct skein_256_ctx *sctx = shash_desc_ctx(desc); skein256_import() local
55 memcpy(sctx, in, sizeof(*sctx)); skein256_import()
80 struct skein_512_ctx *sctx = shash_desc_ctx(desc); skein512_export() local
82 memcpy(out, sctx, sizeof(*sctx)); skein512_export()
88 struct skein_512_ctx *sctx = shash_desc_ctx(desc); skein512_import() local
90 memcpy(sctx, in, sizeof(*sctx)); skein512_import()
115 struct skein_1024_ctx *sctx = shash_desc_ctx(desc); skein1024_export() local
117 memcpy(out, sctx, sizeof(*sctx)); skein1024_export()
123 struct skein_1024_ctx *sctx = shash_desc_ctx(desc); skein1024_import() local
125 memcpy(sctx, in, sizeof(*sctx)); skein1024_import()
/linux-4.1.27/arch/arm64/crypto/
H A Dsha1-ce-glue.c38 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_update() local
40 sctx->finalize = 0; sha1_ce_update()
52 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_finup() local
53 bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); sha1_ce_finup()
64 sctx->finalize = finalize; sha1_ce_finup()
77 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_final() local
79 sctx->finalize = 0; sha1_ce_final()
H A Dsha2-ce-glue.c38 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_update() local
40 sctx->finalize = 0; sha256_ce_update()
52 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_finup() local
53 bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); sha256_ce_finup()
64 sctx->finalize = finalize; sha256_ce_finup()
78 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_final() local
80 sctx->finalize = 0; sha256_ce_final()
/linux-4.1.27/drivers/crypto/
H A Dpadlock-sha.c296 struct sha1_state *sctx = shash_desc_ctx(desc); padlock_sha1_init_nano() local
298 *sctx = (struct sha1_state){ padlock_sha1_init_nano()
308 struct sha1_state *sctx = shash_desc_ctx(desc); padlock_sha1_update_nano() local
317 partial = sctx->count & 0x3f; padlock_sha1_update_nano()
318 sctx->count += len; padlock_sha1_update_nano()
321 memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); padlock_sha1_update_nano()
328 memcpy(sctx->buffer + partial, data, padlock_sha1_update_nano()
330 src = sctx->buffer; padlock_sha1_update_nano()
353 memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); padlock_sha1_update_nano()
354 memcpy(sctx->buffer + partial, src, len - done); padlock_sha1_update_nano()
384 struct sha256_state *sctx = shash_desc_ctx(desc); padlock_sha256_init_nano() local
386 *sctx = (struct sha256_state){ padlock_sha256_init_nano()
397 struct sha256_state *sctx = shash_desc_ctx(desc); padlock_sha256_update_nano() local
406 partial = sctx->count & 0x3f; padlock_sha256_update_nano()
407 sctx->count += len; padlock_sha256_update_nano()
410 memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); padlock_sha256_update_nano()
417 memcpy(sctx->buf + partial, data, padlock_sha256_update_nano()
419 src = sctx->buf; padlock_sha256_update_nano()
442 memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); padlock_sha256_update_nano()
443 memcpy(sctx->buf + partial, src, len - done); padlock_sha256_update_nano()
476 void *sctx = shash_desc_ctx(desc); padlock_sha_export_nano() local
478 memcpy(out, sctx, statesize); padlock_sha_export_nano()
486 void *sctx = shash_desc_ctx(desc); padlock_sha_import_nano() local
488 memcpy(sctx, in, statesize); padlock_sha_import_nano()
/linux-4.1.27/arch/x86/crypto/sha-mb/
H A Dsha1_mb.c351 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); sha1_mb_init() local
353 hash_ctx_init(sctx); sha1_mb_init()
354 sctx->job.result_digest[0] = SHA1_H0; sha1_mb_init()
355 sctx->job.result_digest[1] = SHA1_H1; sha1_mb_init()
356 sctx->job.result_digest[2] = SHA1_H2; sha1_mb_init()
357 sctx->job.result_digest[3] = SHA1_H3; sha1_mb_init()
358 sctx->job.result_digest[4] = SHA1_H4; sha1_mb_init()
359 sctx->total_length = 0; sha1_mb_init()
360 sctx->partial_block_buffer_length = 0; sha1_mb_init()
361 sctx->status = HASH_CTX_STS_IDLE; sha1_mb_init()
369 struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); sha1_mb_set_results() local
373 dst[i] = cpu_to_be32(sctx->job.result_digest[i]); sha1_mb_set_results()
662 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); sha1_mb_export() local
664 memcpy(out, sctx, sizeof(*sctx)); sha1_mb_export()
671 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); sha1_mb_import() local
673 memcpy(sctx, in, sizeof(*sctx)); sha1_mb_import()
/linux-4.1.27/drivers/staging/rtl8723au/os_dep/
H A Dusb_ops_linux.c109 rtw23a_sctx_done_err(&pxmitbuf->sctx, usb_write_port23a_complete()
138 rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY); rtl8723au_write_port()
184 rtw23a_sctx_done_err(&pxmitbuf->sctx, rtl8723au_write_port()
/linux-4.1.27/drivers/staging/rtl8188eu/include/
H A Drtw_xmit.h197 void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
198 int rtw_sctx_wait(struct submit_ctx *sctx);
199 void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
200 void rtw_sctx_done(struct submit_ctx **sctx);
212 struct submit_ctx *sctx; member in struct:xmit_buf
/linux-4.1.27/drivers/staging/rtl8188eu/core/
H A Drtw_xmit.c1240 if (pxmitbuf->sctx) { rtw_alloc_xmitbuf_ext()
1241 DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_alloc_xmitbuf_ext()
1242 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); rtw_alloc_xmitbuf_ext()
1301 if (pxmitbuf->sctx) { rtw_alloc_xmitbuf()
1302 DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_alloc_xmitbuf()
1303 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); rtw_alloc_xmitbuf()
1320 if (pxmitbuf->sctx) { rtw_free_xmitbuf()
1321 DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_free_xmitbuf()
1322 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE); rtw_free_xmitbuf()
2130 void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms) rtw_sctx_init() argument
2132 sctx->timeout_ms = timeout_ms; rtw_sctx_init()
2133 sctx->submit_time = jiffies; rtw_sctx_init()
2134 init_completion(&sctx->done); rtw_sctx_init()
2135 sctx->status = RTW_SCTX_SUBMITTED; rtw_sctx_init()
2138 int rtw_sctx_wait(struct submit_ctx *sctx) rtw_sctx_wait() argument
2144 expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT; rtw_sctx_wait()
2145 if (!wait_for_completion_timeout(&sctx->done, expire)) { rtw_sctx_wait()
2150 status = sctx->status; rtw_sctx_wait()
2174 void rtw_sctx_done_err(struct submit_ctx **sctx, int status) rtw_sctx_done_err() argument
2176 if (*sctx) { rtw_sctx_done_err()
2179 (*sctx)->status = status; rtw_sctx_done_err()
2180 complete(&((*sctx)->done)); rtw_sctx_done_err()
2181 *sctx = NULL; rtw_sctx_done_err()
2185 void rtw_sctx_done(struct submit_ctx **sctx) rtw_sctx_done() argument
2187 rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS); rtw_sctx_done()
H A Drtw_mlme_ext.c1901 struct submit_ctx sctx; dump_mgntframe_and_wait() local
1906 rtw_sctx_init(&sctx, timeout_ms); dump_mgntframe_and_wait()
1907 pxmitbuf->sctx = &sctx; dump_mgntframe_and_wait()
1912 ret = rtw_sctx_wait(&sctx); dump_mgntframe_and_wait()
/linux-4.1.27/drivers/staging/rtl8723au/core/
H A Drtw_xmit.c1317 if (pxmitbuf->sctx) { rtw_alloc_xmitbuf23a_ext()
1318 DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_alloc_xmitbuf23a_ext()
1319 rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); rtw_alloc_xmitbuf23a_ext()
1372 if (pxmitbuf->sctx) { rtw_alloc_xmitbuf23a()
1373 DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_alloc_xmitbuf23a()
1374 rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); rtw_alloc_xmitbuf23a()
1393 if (pxmitbuf->sctx) { rtw_free_xmitbuf23a()
1394 DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_free_xmitbuf23a()
1395 rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE); rtw_free_xmitbuf23a()
2314 void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms) rtw_sctx_init23a() argument
2316 sctx->timeout_ms = timeout_ms; rtw_sctx_init23a()
2317 init_completion(&sctx->done); rtw_sctx_init23a()
2318 sctx->status = RTW_SCTX_SUBMITTED; rtw_sctx_init23a()
2321 int rtw_sctx_wait23a(struct submit_ctx *sctx) rtw_sctx_wait23a() argument
2327 expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : rtw_sctx_wait23a()
2329 if (!wait_for_completion_timeout(&sctx->done, expire)) { rtw_sctx_wait23a()
2334 status = sctx->status; rtw_sctx_wait23a()
2357 void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status) rtw23a_sctx_done_err() argument
2359 if (*sctx) { rtw23a_sctx_done_err()
2362 (*sctx)->status = status; rtw23a_sctx_done_err()
2363 complete(&(*sctx)->done); rtw23a_sctx_done_err()
2364 *sctx = NULL; rtw23a_sctx_done_err()
H A Drtw_mlme_ext.c2277 struct submit_ctx sctx; dump_mgntframe23a_and_wait() local
2283 rtw_sctx_init23a(&sctx, timeout_ms); dump_mgntframe23a_and_wait()
2284 pxmitbuf->sctx = &sctx; dump_mgntframe23a_and_wait()
2289 ret = rtw_sctx_wait23a(&sctx); dump_mgntframe23a_and_wait()
2292 pxmitbuf->sctx = NULL; dump_mgntframe23a_and_wait()
/linux-4.1.27/arch/x86/crypto/
H A Dsha1_ssse3_glue.c55 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_ssse3_update() local
58 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) sha1_ssse3_update()
H A Dsha256_ssse3_glue.c61 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_ssse3_update() local
64 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) sha256_ssse3_update()
H A Dsha512_ssse3_glue.c60 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_ssse3_update() local
63 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) sha512_ssse3_update()
/linux-4.1.27/drivers/staging/rtl8723au/include/
H A Drtw_xmit.h196 void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms);
197 int rtw_sctx_wait23a(struct submit_ctx *sctx);
198 void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status);
212 struct submit_ctx *sctx; member in struct:xmit_buf
/linux-4.1.27/drivers/staging/rtl8188eu/os_dep/
H A Dusb_ops_linux.c686 rtw_sctx_done_err(&pxmitbuf->sctx, usb_write_port_complete()
715 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY); usb_write_port()
761 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR); usb_write_port()
/linux-4.1.27/arch/powerpc/perf/
H A Dcallchain.c352 struct sigcontext32 sctx; member in struct:signal_frame_32
394 if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs)) sane_signal_32_frame()
/linux-4.1.27/arch/powerpc/kernel/
H A Dsignal_32.c219 struct sigcontext sctx; /* the sigcontext */ member in struct:sigframe
1425 sc = (struct sigcontext __user *) &frame->sctx; handle_signal32()
1512 sc = &sf->sctx; sys_sigreturn()
/linux-4.1.27/drivers/staging/rtl8723au/hal/
H A Drtl8723au_xmit.c352 rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN); rtw_dump_xframe()
/linux-4.1.27/drivers/staging/rtl8188eu/hal/
H A Drtl8188eu_xmit.c408 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN); rtw_dump_xframe()

Completed in 1787 milliseconds