Home
last modified time | relevance | path

Searched refs:walk (Results 1 – 157 of 157) sorted by relevance

/linux-4.4.14/crypto/
Dblkcipher.c41 struct blkcipher_walk *walk);
43 struct blkcipher_walk *walk);
45 static inline void blkcipher_map_src(struct blkcipher_walk *walk) in blkcipher_map_src() argument
47 walk->src.virt.addr = scatterwalk_map(&walk->in); in blkcipher_map_src()
50 static inline void blkcipher_map_dst(struct blkcipher_walk *walk) in blkcipher_map_dst() argument
52 walk->dst.virt.addr = scatterwalk_map(&walk->out); in blkcipher_map_dst()
55 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) in blkcipher_unmap_src() argument
57 scatterwalk_unmap(walk->src.virt.addr); in blkcipher_unmap_src()
60 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) in blkcipher_unmap_dst() argument
62 scatterwalk_unmap(walk->dst.virt.addr); in blkcipher_unmap_dst()
[all …]
Dahash.c43 static int hash_walk_next(struct crypto_hash_walk *walk) in hash_walk_next() argument
45 unsigned int alignmask = walk->alignmask; in hash_walk_next()
46 unsigned int offset = walk->offset; in hash_walk_next()
47 unsigned int nbytes = min(walk->entrylen, in hash_walk_next()
50 if (walk->flags & CRYPTO_ALG_ASYNC) in hash_walk_next()
51 walk->data = kmap(walk->pg); in hash_walk_next()
53 walk->data = kmap_atomic(walk->pg); in hash_walk_next()
54 walk->data += offset; in hash_walk_next()
63 walk->entrylen -= nbytes; in hash_walk_next()
67 static int hash_walk_new_entry(struct crypto_hash_walk *walk) in hash_walk_new_entry() argument
[all …]
Dscatterwalk.c33 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) in scatterwalk_start() argument
35 walk->sg = sg; in scatterwalk_start()
39 walk->offset = sg->offset; in scatterwalk_start()
43 void *scatterwalk_map(struct scatter_walk *walk) in scatterwalk_map() argument
45 return kmap_atomic(scatterwalk_page(walk)) + in scatterwalk_map()
46 offset_in_page(walk->offset); in scatterwalk_map()
50 static void scatterwalk_pagedone(struct scatter_walk *walk, int out, in scatterwalk_pagedone() argument
56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); in scatterwalk_pagedone()
66 walk->offset += PAGE_SIZE - 1; in scatterwalk_pagedone()
67 walk->offset &= PAGE_MASK; in scatterwalk_pagedone()
[all …]
Dablkcipher.c47 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) in __ablkcipher_walk_complete() argument
51 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { in __ablkcipher_walk_complete()
59 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, in ablkcipher_queue_write() argument
62 p->dst = walk->out; in ablkcipher_queue_write()
63 list_add_tail(&p->entry, &walk->buffers); in ablkcipher_queue_write()
76 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, in ablkcipher_done_slow() argument
82 unsigned int len_this_page = scatterwalk_pagelen(&walk->out); in ablkcipher_done_slow()
86 scatterwalk_advance(&walk->out, n); in ablkcipher_done_slow()
90 scatterwalk_start(&walk->out, sg_next(walk->out.sg)); in ablkcipher_done_slow()
96 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, in ablkcipher_done_fast() argument
[all …]
Dpcbc.c46 struct blkcipher_walk *walk, in crypto_pcbc_encrypt_segment() argument
52 unsigned int nbytes = walk->nbytes; in crypto_pcbc_encrypt_segment()
53 u8 *src = walk->src.virt.addr; in crypto_pcbc_encrypt_segment()
54 u8 *dst = walk->dst.virt.addr; in crypto_pcbc_encrypt_segment()
55 u8 *iv = walk->iv; in crypto_pcbc_encrypt_segment()
71 struct blkcipher_walk *walk, in crypto_pcbc_encrypt_inplace() argument
77 unsigned int nbytes = walk->nbytes; in crypto_pcbc_encrypt_inplace()
78 u8 *src = walk->src.virt.addr; in crypto_pcbc_encrypt_inplace()
79 u8 *iv = walk->iv; in crypto_pcbc_encrypt_inplace()
92 memcpy(walk->iv, iv, bsize); in crypto_pcbc_encrypt_inplace()
[all …]
Dcbc.c43 struct blkcipher_walk *walk, in crypto_cbc_encrypt_segment() argument
49 unsigned int nbytes = walk->nbytes; in crypto_cbc_encrypt_segment()
50 u8 *src = walk->src.virt.addr; in crypto_cbc_encrypt_segment()
51 u8 *dst = walk->dst.virt.addr; in crypto_cbc_encrypt_segment()
52 u8 *iv = walk->iv; in crypto_cbc_encrypt_segment()
67 struct blkcipher_walk *walk, in crypto_cbc_encrypt_inplace() argument
73 unsigned int nbytes = walk->nbytes; in crypto_cbc_encrypt_inplace()
74 u8 *src = walk->src.virt.addr; in crypto_cbc_encrypt_inplace()
75 u8 *iv = walk->iv; in crypto_cbc_encrypt_inplace()
85 memcpy(walk->iv, iv, bsize); in crypto_cbc_encrypt_inplace()
[all …]
Dsalsa20_generic.c181 struct blkcipher_walk walk; in encrypt() local
186 blkcipher_walk_init(&walk, dst, src, nbytes); in encrypt()
187 err = blkcipher_walk_virt_block(desc, &walk, 64); in encrypt()
189 salsa20_ivsetup(ctx, walk.iv); in encrypt()
191 if (likely(walk.nbytes == nbytes)) in encrypt()
193 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, in encrypt()
194 walk.src.virt.addr, nbytes); in encrypt()
195 return blkcipher_walk_done(desc, &walk, 0); in encrypt()
198 while (walk.nbytes >= 64) { in encrypt()
199 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, in encrypt()
[all …]
Dctr.c55 static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, in crypto_ctr_crypt_final() argument
60 u8 *ctrblk = walk->iv; in crypto_ctr_crypt_final()
63 u8 *src = walk->src.virt.addr; in crypto_ctr_crypt_final()
64 u8 *dst = walk->dst.virt.addr; in crypto_ctr_crypt_final()
65 unsigned int nbytes = walk->nbytes; in crypto_ctr_crypt_final()
74 static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, in crypto_ctr_crypt_segment() argument
80 u8 *ctrblk = walk->iv; in crypto_ctr_crypt_segment()
81 u8 *src = walk->src.virt.addr; in crypto_ctr_crypt_segment()
82 u8 *dst = walk->dst.virt.addr; in crypto_ctr_crypt_segment()
83 unsigned int nbytes = walk->nbytes; in crypto_ctr_crypt_segment()
[all …]
Decb.c42 struct blkcipher_walk *walk, in crypto_ecb_crypt() argument
50 err = blkcipher_walk_virt(desc, walk); in crypto_ecb_crypt()
52 while ((nbytes = walk->nbytes)) { in crypto_ecb_crypt()
53 u8 *wsrc = walk->src.virt.addr; in crypto_ecb_crypt()
54 u8 *wdst = walk->dst.virt.addr; in crypto_ecb_crypt()
63 err = blkcipher_walk_done(desc, walk, nbytes); in crypto_ecb_crypt()
73 struct blkcipher_walk walk; in crypto_ecb_encrypt() local
78 blkcipher_walk_init(&walk, dst, src, nbytes); in crypto_ecb_encrypt()
79 return crypto_ecb_crypt(desc, &walk, child, in crypto_ecb_encrypt()
87 struct blkcipher_walk walk; in crypto_ecb_decrypt() local
[all …]
Dchacha20_generic.c146 struct blkcipher_walk walk; in crypto_chacha20_crypt() local
150 blkcipher_walk_init(&walk, dst, src, nbytes); in crypto_chacha20_crypt()
151 err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); in crypto_chacha20_crypt()
153 crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); in crypto_chacha20_crypt()
155 while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { in crypto_chacha20_crypt()
156 chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, in crypto_chacha20_crypt()
157 rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); in crypto_chacha20_crypt()
158 err = blkcipher_walk_done(desc, &walk, in crypto_chacha20_crypt()
159 walk.nbytes % CHACHA20_BLOCK_SIZE); in crypto_chacha20_crypt()
162 if (walk.nbytes) { in crypto_chacha20_crypt()
[all …]
Dcrypto_null.c81 struct blkcipher_walk walk; in skcipher_null_crypt() local
84 blkcipher_walk_init(&walk, dst, src, nbytes); in skcipher_null_crypt()
85 err = blkcipher_walk_virt(desc, &walk); in skcipher_null_crypt()
87 while (walk.nbytes) { in skcipher_null_crypt()
88 if (walk.src.virt.addr != walk.dst.virt.addr) in skcipher_null_crypt()
89 memcpy(walk.dst.virt.addr, walk.src.virt.addr, in skcipher_null_crypt()
90 walk.nbytes); in skcipher_null_crypt()
91 err = blkcipher_walk_done(desc, &walk, 0); in skcipher_null_crypt()
Darc4.c99 struct blkcipher_walk walk; in ecb_arc4_crypt() local
102 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_arc4_crypt()
104 err = blkcipher_walk_virt(desc, &walk); in ecb_arc4_crypt()
106 while (walk.nbytes > 0) { in ecb_arc4_crypt()
107 u8 *wsrc = walk.src.virt.addr; in ecb_arc4_crypt()
108 u8 *wdst = walk.dst.virt.addr; in ecb_arc4_crypt()
110 arc4_crypt(ctx, wdst, wsrc, walk.nbytes); in ecb_arc4_crypt()
112 err = blkcipher_walk_done(desc, &walk, 0); in ecb_arc4_crypt()
Dxts.c175 struct blkcipher_walk walk; in xts_crypt() local
183 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); in xts_crypt()
185 err = blkcipher_walk_virt(desc, &walk); in xts_crypt()
186 nbytes = walk.nbytes; in xts_crypt()
191 src = (be128 *)walk.src.virt.addr; in xts_crypt()
192 dst = (be128 *)walk.dst.virt.addr; in xts_crypt()
195 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); in xts_crypt()
225 *(be128 *)walk.iv = *t; in xts_crypt()
227 err = blkcipher_walk_done(desc, &walk, nbytes); in xts_crypt()
228 nbytes = walk.nbytes; in xts_crypt()
[all …]
Dlrw.c224 struct blkcipher_walk walk; in lrw_crypt() local
232 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); in lrw_crypt()
234 err = blkcipher_walk_virt(desc, &walk); in lrw_crypt()
235 nbytes = walk.nbytes; in lrw_crypt()
239 nblocks = min(walk.nbytes / bsize, max_blks); in lrw_crypt()
240 src = (be128 *)walk.src.virt.addr; in lrw_crypt()
241 dst = (be128 *)walk.dst.virt.addr; in lrw_crypt()
244 iv = (be128 *)walk.iv; in lrw_crypt()
282 err = blkcipher_walk_done(desc, &walk, nbytes); in lrw_crypt()
283 nbytes = walk.nbytes; in lrw_crypt()
[all …]
Dshash.c222 struct crypto_hash_walk walk; in shash_ahash_update() local
225 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; in shash_ahash_update()
226 nbytes = crypto_hash_walk_done(&walk, nbytes)) in shash_ahash_update()
227 nbytes = crypto_shash_update(desc, walk.data, nbytes); in shash_ahash_update()
245 struct crypto_hash_walk walk; in shash_ahash_finup() local
248 nbytes = crypto_hash_walk_first(req, &walk); in shash_ahash_finup()
253 nbytes = crypto_hash_walk_last(&walk) ? in shash_ahash_finup()
254 crypto_shash_finup(desc, walk.data, nbytes, in shash_ahash_finup()
256 crypto_shash_update(desc, walk.data, nbytes); in shash_ahash_finup()
257 nbytes = crypto_hash_walk_done(&walk, nbytes); in shash_ahash_finup()
[all …]
Dccm.c213 struct scatter_walk walk; in get_data_to_compute() local
217 scatterwalk_start(&walk, sg); in get_data_to_compute()
220 n = scatterwalk_clamp(&walk, len); in get_data_to_compute()
222 scatterwalk_start(&walk, sg_next(walk.sg)); in get_data_to_compute()
223 n = scatterwalk_clamp(&walk, len); in get_data_to_compute()
225 data_src = scatterwalk_map(&walk); in get_data_to_compute()
231 scatterwalk_advance(&walk, n); in get_data_to_compute()
232 scatterwalk_done(&walk, 0, len); in get_data_to_compute()
Dkeywrap.c113 static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, in crypto_kw_scatterlist_ff() argument
125 scatterwalk_start(walk, sg); in crypto_kw_scatterlist_ff()
126 scatterwalk_advance(walk, skip); in crypto_kw_scatterlist_ff()
/linux-4.4.14/mm/
Dpagewalk.c7 struct mm_walk *walk) in walk_pte_range() argument
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range()
28 struct mm_walk *walk) in walk_pmd_range() argument
38 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range()
39 if (walk->pte_hole) in walk_pmd_range()
40 err = walk->pte_hole(addr, next, walk); in walk_pmd_range()
49 if (walk->pmd_entry) in walk_pmd_range()
50 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range()
58 if (!walk->pte_entry) in walk_pmd_range()
61 split_huge_page_pmd_mm(walk->mm, addr, pmd); in walk_pmd_range()
[all …]
Dmincore.c23 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument
27 unsigned char *vec = walk->private; in mincore_hugetlb()
36 walk->private = vec; in mincore_hugetlb()
104 struct mm_walk *walk) in mincore_unmapped_range() argument
106 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range()
107 walk->vma, walk->private); in mincore_unmapped_range()
112 struct mm_walk *walk) in mincore_pte_range() argument
115 struct vm_area_struct *vma = walk->vma; in mincore_pte_range()
117 unsigned char *vec = walk->private; in mincore_pte_range()
131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
[all …]
Dmadvise.c141 unsigned long end, struct mm_walk *walk) in swapin_walk_pmd_entry() argument
144 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry()
178 struct mm_walk walk = { in force_swapin_readahead() local
184 walk_page_range(start, end, &walk); in force_swapin_readahead()
Dmempolicy.c486 unsigned long end, struct mm_walk *walk) in queue_pages_pte_range() argument
488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range()
490 struct queue_pages *qp = walk->private; in queue_pages_pte_range()
500 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range()
527 struct mm_walk *walk) in queue_pages_hugetlb() argument
530 struct queue_pages *qp = walk->private; in queue_pages_hugetlb()
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
587 struct mm_walk *walk) in queue_pages_test_walk() argument
589 struct vm_area_struct *vma = walk->vma; in queue_pages_test_walk()
590 struct queue_pages *qp = walk->private; in queue_pages_test_walk()
Dmemcontrol.c4711 struct mm_walk *walk) in mem_cgroup_count_precharge_pte_range() argument
4713 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
4902 struct mm_walk *walk) in mem_cgroup_move_charge_pte_range() argument
4905 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
/linux-4.4.14/arch/x86/crypto/
Dglue_helper.c37 struct blkcipher_walk *walk) in __glue_ecb_crypt_128bit() argument
45 err = blkcipher_walk_virt(desc, walk); in __glue_ecb_crypt_128bit()
47 while ((nbytes = walk->nbytes)) { in __glue_ecb_crypt_128bit()
48 u8 *wsrc = walk->src.virt.addr; in __glue_ecb_crypt_128bit()
49 u8 *wdst = walk->dst.virt.addr; in __glue_ecb_crypt_128bit()
74 err = blkcipher_walk_done(desc, walk, nbytes); in __glue_ecb_crypt_128bit()
85 struct blkcipher_walk walk; in glue_ecb_crypt_128bit() local
87 blkcipher_walk_init(&walk, dst, src, nbytes); in glue_ecb_crypt_128bit()
88 return __glue_ecb_crypt_128bit(gctx, desc, &walk); in glue_ecb_crypt_128bit()
94 struct blkcipher_walk *walk) in __glue_cbc_encrypt_128bit() argument
[all …]
Dblowfish_glue.c80 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument
89 err = blkcipher_walk_virt(desc, walk); in ecb_crypt()
91 while ((nbytes = walk->nbytes)) { in ecb_crypt()
92 u8 *wsrc = walk->src.virt.addr; in ecb_crypt()
93 u8 *wdst = walk->dst.virt.addr; in ecb_crypt()
119 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt()
128 struct blkcipher_walk walk; in ecb_encrypt() local
130 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
131 return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); in ecb_encrypt()
137 struct blkcipher_walk walk; in ecb_decrypt() local
[all …]
Dcast5_avx_glue.c59 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument
71 err = blkcipher_walk_virt(desc, walk); in ecb_crypt()
74 while ((nbytes = walk->nbytes)) { in ecb_crypt()
75 u8 *wsrc = walk->src.virt.addr; in ecb_crypt()
76 u8 *wdst = walk->dst.virt.addr; in ecb_crypt()
106 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt()
116 struct blkcipher_walk walk; in ecb_encrypt() local
118 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
119 return ecb_crypt(desc, &walk, true); in ecb_encrypt()
125 struct blkcipher_walk walk; in ecb_decrypt() local
[all …]
Ddes3_ede_glue.c86 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument
93 err = blkcipher_walk_virt(desc, walk); in ecb_crypt()
95 while ((nbytes = walk->nbytes)) { in ecb_crypt()
96 u8 *wsrc = walk->src.virt.addr; in ecb_crypt()
97 u8 *wdst = walk->dst.virt.addr; in ecb_crypt()
124 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt()
134 struct blkcipher_walk walk; in ecb_encrypt() local
136 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
137 return ecb_crypt(desc, &walk, ctx->enc_expkey); in ecb_encrypt()
144 struct blkcipher_walk walk; in ecb_decrypt() local
[all …]
Dsalsa20_glue.c52 struct blkcipher_walk walk; in encrypt() local
57 blkcipher_walk_init(&walk, dst, src, nbytes); in encrypt()
58 err = blkcipher_walk_virt_block(desc, &walk, 64); in encrypt()
60 salsa20_ivsetup(ctx, walk.iv); in encrypt()
62 if (likely(walk.nbytes == nbytes)) in encrypt()
64 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, in encrypt()
65 walk.dst.virt.addr, nbytes); in encrypt()
66 return blkcipher_walk_done(desc, &walk, 0); in encrypt()
69 while (walk.nbytes >= 64) { in encrypt()
70 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, in encrypt()
[all …]
Dchacha20_glue.c70 struct blkcipher_walk walk; in chacha20_simd() local
78 blkcipher_walk_init(&walk, dst, src, nbytes); in chacha20_simd()
79 err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); in chacha20_simd()
81 crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); in chacha20_simd()
85 while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { in chacha20_simd()
86 chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, in chacha20_simd()
87 rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); in chacha20_simd()
88 err = blkcipher_walk_done(desc, &walk, in chacha20_simd()
89 walk.nbytes % CHACHA20_BLOCK_SIZE); in chacha20_simd()
92 if (walk.nbytes) { in chacha20_simd()
[all …]
Daesni-intel_glue.c379 struct blkcipher_walk walk; in ecb_encrypt() local
382 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
383 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
387 while ((nbytes = walk.nbytes)) { in ecb_encrypt()
388 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt()
391 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_encrypt()
403 struct blkcipher_walk walk; in ecb_decrypt() local
406 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
407 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
411 while ((nbytes = walk.nbytes)) { in ecb_decrypt()
[all …]
/linux-4.4.14/arch/arm/crypto/
Daesbs-glue.c109 struct blkcipher_walk walk; in aesbs_cbc_encrypt() local
112 blkcipher_walk_init(&walk, dst, src, nbytes); in aesbs_cbc_encrypt()
113 err = blkcipher_walk_virt(desc, &walk); in aesbs_cbc_encrypt()
115 while (walk.nbytes) { in aesbs_cbc_encrypt()
116 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; in aesbs_cbc_encrypt()
117 u8 *src = walk.src.virt.addr; in aesbs_cbc_encrypt()
119 if (walk.dst.virt.addr == walk.src.virt.addr) { in aesbs_cbc_encrypt()
120 u8 *iv = walk.iv; in aesbs_cbc_encrypt()
128 memcpy(walk.iv, iv, AES_BLOCK_SIZE); in aesbs_cbc_encrypt()
130 u8 *dst = walk.dst.virt.addr; in aesbs_cbc_encrypt()
[all …]
Daes-ce-glue.c170 struct blkcipher_walk walk; in ecb_encrypt() local
175 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
176 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
179 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { in ecb_encrypt()
180 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt()
182 err = blkcipher_walk_done(desc, &walk, in ecb_encrypt()
183 walk.nbytes % AES_BLOCK_SIZE); in ecb_encrypt()
193 struct blkcipher_walk walk; in ecb_decrypt() local
198 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
199 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
[all …]
/linux-4.4.14/arch/arm64/crypto/
Daes-glue.c104 struct blkcipher_walk walk; in ecb_encrypt() local
108 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
109 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
112 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_encrypt()
113 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt()
115 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); in ecb_encrypt()
126 struct blkcipher_walk walk; in ecb_decrypt() local
130 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
131 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
134 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_decrypt()
[all …]
Daes-ce-ccm-glue.c111 struct scatter_walk walk; in ccm_calculate_auth_mac() local
127 scatterwalk_start(&walk, req->src); in ccm_calculate_auth_mac()
130 u32 n = scatterwalk_clamp(&walk, len); in ccm_calculate_auth_mac()
134 scatterwalk_start(&walk, sg_next(walk.sg)); in ccm_calculate_auth_mac()
135 n = scatterwalk_clamp(&walk, len); in ccm_calculate_auth_mac()
137 p = scatterwalk_map(&walk); in ccm_calculate_auth_mac()
143 scatterwalk_advance(&walk, n); in ccm_calculate_auth_mac()
144 scatterwalk_done(&walk, 0, len); in ccm_calculate_auth_mac()
153 struct blkcipher_walk walk; in ccm_encrypt() local
180 blkcipher_walk_init(&walk, dst, src, len); in ccm_encrypt()
[all …]
/linux-4.4.14/arch/sparc/mm/
Dextable.c19 const struct exception_table_entry *walk; in search_extable() local
39 for (walk = start; walk <= last; walk++) { in search_extable()
40 if (walk->fixup == 0) { in search_extable()
42 walk++; in search_extable()
47 if (walk->fixup == -1) in search_extable()
50 if (walk->insn == value) in search_extable()
51 return walk; in search_extable()
55 for (walk = start; walk <= (last - 1); walk++) { in search_extable()
56 if (walk->fixup) in search_extable()
59 if (walk[0].insn <= value && walk[1].insn > value) in search_extable()
[all …]
/linux-4.4.14/include/crypto/
Dscatterwalk.h50 static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) in scatterwalk_pagelen() argument
52 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; in scatterwalk_pagelen()
53 unsigned int len_this_page = offset_in_page(~walk->offset) + 1; in scatterwalk_pagelen()
57 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, in scatterwalk_clamp() argument
60 unsigned int len_this_page = scatterwalk_pagelen(walk); in scatterwalk_clamp()
64 static inline void scatterwalk_advance(struct scatter_walk *walk, in scatterwalk_advance() argument
67 walk->offset += nbytes; in scatterwalk_advance()
70 static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, in scatterwalk_aligned() argument
73 return !(walk->offset & alignmask); in scatterwalk_aligned()
76 static inline struct page *scatterwalk_page(struct scatter_walk *walk) in scatterwalk_page() argument
[all …]
Dalgapi.h193 struct blkcipher_walk *walk, int err);
195 struct blkcipher_walk *walk);
197 struct blkcipher_walk *walk);
199 struct blkcipher_walk *walk,
202 struct blkcipher_walk *walk,
207 struct ablkcipher_walk *walk, int err);
209 struct ablkcipher_walk *walk);
210 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
296 static inline void blkcipher_walk_init(struct blkcipher_walk *walk, in blkcipher_walk_init() argument
301 walk->in.sg = src; in blkcipher_walk_init()
[all …]
Dmcryptd.h59 struct crypto_hash_walk walk; member
/linux-4.4.14/arch/s390/crypto/
Ddes_s390.c87 u8 *key, struct blkcipher_walk *walk) in ecb_desall_crypt() argument
89 int ret = blkcipher_walk_virt(desc, walk); in ecb_desall_crypt()
92 while ((nbytes = walk->nbytes)) { in ecb_desall_crypt()
95 u8 *out = walk->dst.virt.addr; in ecb_desall_crypt()
96 u8 *in = walk->src.virt.addr; in ecb_desall_crypt()
103 ret = blkcipher_walk_done(desc, walk, nbytes); in ecb_desall_crypt()
110 struct blkcipher_walk *walk) in cbc_desall_crypt() argument
113 int ret = blkcipher_walk_virt(desc, walk); in cbc_desall_crypt()
114 unsigned int nbytes = walk->nbytes; in cbc_desall_crypt()
123 memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); in cbc_desall_crypt()
[all …]
Daes_s390.c317 struct blkcipher_walk *walk) in ecb_aes_crypt() argument
319 int ret = blkcipher_walk_virt(desc, walk); in ecb_aes_crypt()
322 while ((nbytes = walk->nbytes)) { in ecb_aes_crypt()
325 u8 *out = walk->dst.virt.addr; in ecb_aes_crypt()
326 u8 *in = walk->src.virt.addr; in ecb_aes_crypt()
333 ret = blkcipher_walk_done(desc, walk, nbytes); in ecb_aes_crypt()
344 struct blkcipher_walk walk; in ecb_aes_encrypt() local
349 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_encrypt()
350 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); in ecb_aes_encrypt()
358 struct blkcipher_walk walk; in ecb_aes_decrypt() local
[all …]
/linux-4.4.14/arch/powerpc/crypto/
Daes-spe-glue.c183 struct blkcipher_walk walk; in ppc_ecb_encrypt() local
188 blkcipher_walk_init(&walk, dst, src, nbytes); in ppc_ecb_encrypt()
189 err = blkcipher_walk_virt(desc, &walk); in ppc_ecb_encrypt()
191 while ((nbytes = walk.nbytes)) { in ppc_ecb_encrypt()
197 ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, in ppc_ecb_encrypt()
201 err = blkcipher_walk_done(desc, &walk, ubytes); in ppc_ecb_encrypt()
211 struct blkcipher_walk walk; in ppc_ecb_decrypt() local
216 blkcipher_walk_init(&walk, dst, src, nbytes); in ppc_ecb_decrypt()
217 err = blkcipher_walk_virt(desc, &walk); in ppc_ecb_decrypt()
219 while ((nbytes = walk.nbytes)) { in ppc_ecb_decrypt()
[all …]
/linux-4.4.14/arch/sparc/crypto/
Daes_glue.c220 struct blkcipher_walk walk; in ecb_encrypt() local
223 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
224 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
228 while ((nbytes = walk.nbytes)) { in ecb_encrypt()
233 (const u64 *)walk.src.virt.addr, in ecb_encrypt()
234 (u64 *) walk.dst.virt.addr, in ecb_encrypt()
238 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_encrypt()
249 struct blkcipher_walk walk; in ecb_decrypt() local
253 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
254 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
[all …]
Ddes_glue.c98 struct blkcipher_walk walk; in __ecb_crypt() local
101 blkcipher_walk_init(&walk, dst, src, nbytes); in __ecb_crypt()
102 err = blkcipher_walk_virt(desc, &walk); in __ecb_crypt()
109 while ((nbytes = walk.nbytes)) { in __ecb_crypt()
113 des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr, in __ecb_crypt()
114 (u64 *) walk.dst.virt.addr, in __ecb_crypt()
118 err = blkcipher_walk_done(desc, &walk, nbytes); in __ecb_crypt()
146 struct blkcipher_walk walk; in cbc_encrypt() local
149 blkcipher_walk_init(&walk, dst, src, nbytes); in cbc_encrypt()
150 err = blkcipher_walk_virt(desc, &walk); in cbc_encrypt()
[all …]
Dcamellia_glue.c90 struct blkcipher_walk walk; in __ecb_crypt() local
99 blkcipher_walk_init(&walk, dst, src, nbytes); in __ecb_crypt()
100 err = blkcipher_walk_virt(desc, &walk); in __ecb_crypt()
108 while ((nbytes = walk.nbytes)) { in __ecb_crypt()
115 src64 = (const u64 *)walk.src.virt.addr; in __ecb_crypt()
116 dst64 = (u64 *) walk.dst.virt.addr; in __ecb_crypt()
120 err = blkcipher_walk_done(desc, &walk, nbytes); in __ecb_crypt()
153 struct blkcipher_walk walk; in cbc_encrypt() local
162 blkcipher_walk_init(&walk, dst, src, nbytes); in cbc_encrypt()
163 err = blkcipher_walk_virt(desc, &walk); in cbc_encrypt()
[all …]
/linux-4.4.14/drivers/crypto/vmx/
Daes_ctr.c94 struct blkcipher_walk *walk) in p8_aes_ctr_final() argument
96 u8 *ctrblk = walk->iv; in p8_aes_ctr_final()
98 u8 *src = walk->src.virt.addr; in p8_aes_ctr_final()
99 u8 *dst = walk->dst.virt.addr; in p8_aes_ctr_final()
100 unsigned int nbytes = walk->nbytes; in p8_aes_ctr_final()
119 struct blkcipher_walk walk; in p8_aes_ctr_crypt() local
132 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_ctr_crypt()
133 ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); in p8_aes_ctr_crypt()
134 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { in p8_aes_ctr_crypt()
138 aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, in p8_aes_ctr_crypt()
[all …]
Daes_cbc.c103 struct blkcipher_walk walk; in p8_aes_cbc_encrypt() local
121 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_cbc_encrypt()
122 ret = blkcipher_walk_virt(desc, &walk); in p8_aes_cbc_encrypt()
123 while ((nbytes = walk.nbytes)) { in p8_aes_cbc_encrypt()
124 aes_p8_cbc_encrypt(walk.src.virt.addr, in p8_aes_cbc_encrypt()
125 walk.dst.virt.addr, in p8_aes_cbc_encrypt()
127 &ctx->enc_key, walk.iv, 1); in p8_aes_cbc_encrypt()
129 ret = blkcipher_walk_done(desc, &walk, nbytes); in p8_aes_cbc_encrypt()
144 struct blkcipher_walk walk; in p8_aes_cbc_decrypt() local
162 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_cbc_decrypt()
[all …]
/linux-4.4.14/drivers/atm/
Didt77105.c85 struct idt77105_priv *walk; in idt77105_stats_timer_func() local
90 for (walk = idt77105_all; walk; walk = walk->next) { in idt77105_stats_timer_func()
91 dev = walk->dev; in idt77105_stats_timer_func()
93 stats = &walk->stats; in idt77105_stats_timer_func()
114 struct idt77105_priv *walk; in idt77105_restart_timer_func() local
119 for (walk = idt77105_all; walk; walk = walk->next) { in idt77105_restart_timer_func()
120 dev = walk->dev; in idt77105_restart_timer_func()
134 PUT( walk->old_mcr ,MCR); in idt77105_restart_timer_func()
324 struct idt77105_priv *walk, *prev; in idt77105_stop() local
332 for (prev = NULL, walk = idt77105_all ; in idt77105_stop()
[all …]
Dsuni.c58 struct suni_priv *walk; in suni_hz() local
62 for (walk = sunis; walk; walk = walk->next) { in suni_hz()
63 dev = walk->dev; in suni_hz()
64 stats = &walk->sonet_stats; in suni_hz()
344 struct suni_priv **walk; in suni_stop() local
349 for (walk = &sunis; *walk != PRIV(dev); in suni_stop()
350 walk = &PRIV((*walk)->dev)->next); in suni_stop()
351 *walk = PRIV((*walk)->dev)->next; in suni_stop()
/linux-4.4.14/drivers/crypto/
Dpadlock-aes.c347 struct blkcipher_walk walk; in ecb_aes_encrypt() local
353 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_encrypt()
354 err = blkcipher_walk_virt(desc, &walk); in ecb_aes_encrypt()
357 while ((nbytes = walk.nbytes)) { in ecb_aes_encrypt()
358 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, in ecb_aes_encrypt()
362 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_aes_encrypt()
376 struct blkcipher_walk walk; in ecb_aes_decrypt() local
382 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_decrypt()
383 err = blkcipher_walk_virt(desc, &walk); in ecb_aes_decrypt()
386 while ((nbytes = walk.nbytes)) { in ecb_aes_decrypt()
[all …]
Dgeode-aes.c309 struct blkcipher_walk walk; in geode_cbc_decrypt() local
315 blkcipher_walk_init(&walk, dst, src, nbytes); in geode_cbc_decrypt()
316 err = blkcipher_walk_virt(desc, &walk); in geode_cbc_decrypt()
317 op->iv = walk.iv; in geode_cbc_decrypt()
319 while ((nbytes = walk.nbytes)) { in geode_cbc_decrypt()
320 op->src = walk.src.virt.addr, in geode_cbc_decrypt()
321 op->dst = walk.dst.virt.addr; in geode_cbc_decrypt()
329 err = blkcipher_walk_done(desc, &walk, nbytes); in geode_cbc_decrypt()
341 struct blkcipher_walk walk; in geode_cbc_encrypt() local
347 blkcipher_walk_init(&walk, dst, src, nbytes); in geode_cbc_encrypt()
[all …]
Dn2_core.c513 struct crypto_hash_walk walk; in n2_do_async_digest() local
536 nbytes = crypto_hash_walk_first(req, &walk); in n2_do_async_digest()
555 ent->src_addr = __pa(walk.data); in n2_do_async_digest()
563 nbytes = crypto_hash_walk_done(&walk, 0); in n2_do_async_digest()
568 ent->src_addr = __pa(walk.data); in n2_do_async_digest()
576 nbytes = crypto_hash_walk_done(&walk, 0); in n2_do_async_digest()
672 struct ablkcipher_walk walk; member
711 struct ablkcipher_walk walk; member
876 struct ablkcipher_walk *walk = &rctx->walk; in n2_compute_chunks() local
883 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); in n2_compute_chunks()
[all …]
Dhifn_795x.c658 struct hifn_cipher_walk walk; member
1390 t = &rctx->walk.cache[0]; in hifn_setup_dma()
1393 if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_setup_dma()
1586 rctx->walk.flags = 0; in hifn_setup_session()
1594 rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; in hifn_setup_session()
1600 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_setup_session()
1601 err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); in hifn_setup_session()
1606 sg_num = hifn_cipher_walk(req, &rctx->walk); in hifn_setup_session()
1666 rctx.walk.cache[0].length = 0; in hifn_test()
1772 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_process_ready()
[all …]
Domap-des.c389 struct scatter_walk walk; in sg_copy_buf() local
394 scatterwalk_start(&walk, sg); in sg_copy_buf()
395 scatterwalk_advance(&walk, start); in sg_copy_buf()
396 scatterwalk_copychunks(buf, &walk, nbytes, out); in sg_copy_buf()
397 scatterwalk_done(&walk, out, 0); in sg_copy_buf()
Domap-aes.c402 struct scatter_walk walk; in sg_copy_buf() local
407 scatterwalk_start(&walk, sg); in sg_copy_buf()
408 scatterwalk_advance(&walk, start); in sg_copy_buf()
409 scatterwalk_copychunks(buf, &walk, nbytes, out); in sg_copy_buf()
410 scatterwalk_done(&walk, out, 0); in sg_copy_buf()
/linux-4.4.14/include/crypto/internal/
Dhash.h55 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
57 struct crypto_hash_walk *walk);
59 struct crypto_hash_walk *walk);
61 struct crypto_hash_walk *walk,
64 static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, in crypto_ahash_walk_done() argument
67 return crypto_hash_walk_done(walk, err); in crypto_ahash_walk_done()
70 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) in crypto_hash_walk_last() argument
72 return !(walk->entrylen | walk->total); in crypto_hash_walk_last()
75 static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) in crypto_ahash_walk_last() argument
77 return crypto_hash_walk_last(walk); in crypto_ahash_walk_last()
/linux-4.4.14/Documentation/filesystems/
Dpath-lookup.txt5 performing a path walk. Typically, for every open(), stat() etc., the path name
17 thus in every component during path look-up. Since 2.5.10 onwards, fast-walk
30 are path-walk intensive tend to do path lookups starting from a common dentry
34 Since 2.6.38, RCU is used to make a significant part of the entire path walk
36 even stores into cachelines of common dentries). This is known as "rcu-walk"
56 permissions on the parent inode to be able to walk into it.
67 - find the start point of the walk;
92 point to perform the next step of our path walk against.
162 still at 2. Now when it follows 2's 'next' pointer, it will walk off into
180 start the next part of the path walk from).
[all …]
Dporting354 via rcu-walk path walk (basically, if the file can have had a path name in the
364 vfs now tries to do path walking in "rcu-walk mode", which avoids
368 filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so
370 the benefits of rcu-walk mode. We will begin to add filesystem callbacks that
371 are rcu-walk aware, shown below. Filesystems should take advantage of this
377 the filesystem provides it), which requires dropping out of rcu-walk mode. This
378 may now be called in rcu-walk mode (nd->flags & LOOKUP_RCU). -ECHILD should be
379 returned if the filesystem cannot handle rcu-walk. See
383 directory inodes on the way down a path walk (to check for exec permission). It
384 must now be rcu-walk aware (mask & MAY_NOT_BLOCK). See
Dvfs.txt455 May be called in rcu-walk mode (mask & MAY_NOT_BLOCK). If in rcu-walk
459 If a situation is encountered that rcu-walk cannot handle, return
460 -ECHILD and it will be called again in ref-walk mode.
954 d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU).
955 If in rcu-walk mode, the filesystem must revalidate the dentry without
960 If a situation is encountered that rcu-walk cannot handle, return
961 -ECHILD and it will be called again in ref-walk mode.
964 This is called when a path-walk ends at dentry that was not acquired by
975 d_weak_revalidate is only called after leaving rcu-walk mode.
999 "rcu-walk", ie. without any locks or references on things.
[all …]
DLocking25 rename_lock ->d_lock may block rcu-walk
26 d_revalidate: no no yes (ref-walk) maybe
36 d_manage: no no yes (ref-walk) maybe
89 permission: no (may not block if called in rcu-walk mode)
Dautofs4-mount-control.txt23 needs to walk back up the mount tree to construct a path, such as
65 trigger. So when we walk on the path we mount shark:/autofs/export1 "on
Dxfs-self-describing-metadata.txt105 object, we don't know what inode it belongs to and hence have to walk the entire
Dxfs-delayed-logging-design.txt402 it. The fact that we walk the log items (in the CIL) just to chain the log
668 sequencing also requires the same lock, list walk, and blocking mechanism to
/linux-4.4.14/fs/
Dselect.c801 struct poll_list *walk; in do_poll() local
804 for (walk = list; walk != NULL; walk = walk->next) { in do_poll()
807 pfd = walk->entries; in do_poll()
808 pfd_end = pfd + walk->len; in do_poll()
880 struct poll_list *walk = head; in do_sys_poll() local
888 walk->next = NULL; in do_sys_poll()
889 walk->len = len; in do_sys_poll()
893 if (copy_from_user(walk->entries, ufds + nfds-todo, in do_sys_poll()
894 sizeof(struct pollfd) * walk->len)) in do_sys_poll()
897 todo -= walk->len; in do_sys_poll()
[all …]
/linux-4.4.14/fs/proc/
Dtask_mmu.c489 struct mm_walk *walk) in smaps_pte_entry() argument
491 struct mem_size_stats *mss = walk->private; in smaps_pte_entry()
492 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry()
524 struct mm_walk *walk) in smaps_pmd_entry() argument
526 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry()
527 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry()
540 struct mm_walk *walk) in smaps_pmd_entry() argument
546 struct mm_walk *walk) in smaps_pte_range() argument
548 struct vm_area_struct *vma = walk->vma; in smaps_pte_range()
553 smaps_pmd_entry(pmd, addr, walk); in smaps_pte_range()
[all …]
/linux-4.4.14/security/
Ddevice_cgroup.c96 struct dev_exception_item *excopy, *walk; in dev_exception_add() local
104 list_for_each_entry(walk, &dev_cgroup->exceptions, list) { in dev_exception_add()
105 if (walk->type != ex->type) in dev_exception_add()
107 if (walk->major != ex->major) in dev_exception_add()
109 if (walk->minor != ex->minor) in dev_exception_add()
112 walk->access |= ex->access; in dev_exception_add()
128 struct dev_exception_item *walk, *tmp; in dev_exception_rm() local
132 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) { in dev_exception_rm()
133 if (walk->type != ex->type) in dev_exception_rm()
135 if (walk->major != ex->major) in dev_exception_rm()
[all …]
/linux-4.4.14/arch/openrisc/kernel/
Ddma.c33 unsigned long next, struct mm_walk *walk) in page_set_nocache() argument
54 unsigned long next, struct mm_walk *walk) in page_clear_nocache() argument
90 struct mm_walk walk = { in or1k_dma_alloc() local
109 if (walk_page_range(va, va + size, &walk)) { in or1k_dma_alloc()
123 struct mm_walk walk = { in or1k_dma_free() local
130 WARN_ON(walk_page_range(va, va + size, &walk)); in or1k_dma_free()
/linux-4.4.14/fs/fat/
Dnamei_msdos.c26 unsigned char *walk; in msdos_format_name() local
43 for (walk = res; len && walk - res < 8; walk++) { in msdos_format_name()
62 if ((res == walk) && (c == 0xE5)) in msdos_format_name()
67 *walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c; in msdos_format_name()
80 while (walk - res < 8) in msdos_format_name()
81 *walk++ = ' '; in msdos_format_name()
82 while (len > 0 && walk - res < MSDOS_NAME) { in msdos_format_name()
101 *walk++ = c - 32; in msdos_format_name()
103 *walk++ = c; in msdos_format_name()
110 while (walk - res < MSDOS_NAME) in msdos_format_name()
[all …]
Dinode.c427 unsigned char exe_extensions[] = "EXECOMBAT", *walk; in is_exec() local
429 for (walk = exe_extensions; *walk; walk += 3) in is_exec()
430 if (!strncmp(extension, walk, 3)) in is_exec()
/linux-4.4.14/kernel/locking/
Drtmutex.h33 enum rtmutex_chainwalk walk) in debug_rt_mutex_detect_deadlock() argument
35 return walk == RT_MUTEX_FULL_CHAINWALK; in debug_rt_mutex_detect_deadlock()
Drtmutex-debug.h31 enum rtmutex_chainwalk walk) in debug_rt_mutex_detect_deadlock() argument
/linux-4.4.14/drivers/crypto/nx/
Dnx.c167 struct scatter_walk walk; in nx_walk_and_build() local
174 scatterwalk_start(&walk, sg_src); in nx_walk_and_build()
185 scatterwalk_advance(&walk, start - offset); in nx_walk_and_build()
188 n = scatterwalk_clamp(&walk, len); in nx_walk_and_build()
192 scatterwalk_start(&walk, sg_next(walk.sg)); in nx_walk_and_build()
193 n = scatterwalk_clamp(&walk, len); in nx_walk_and_build()
195 dst = scatterwalk_map(&walk); in nx_walk_and_build()
201 scatterwalk_advance(&walk, n); in nx_walk_and_build()
202 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); in nx_walk_and_build()
Dnx-aes-gcm.c117 struct scatter_walk walk; in nx_gca() local
124 scatterwalk_start(&walk, req->src); in nx_gca()
125 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); in nx_gca()
126 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); in nx_gca()
/linux-4.4.14/net/xfrm/
Dxfrm_policy.c188 if (unlikely(xp->walk.dead)) in xfrm_policy_timer()
252 if (unlikely(pol->walk.dead)) in xfrm_policy_flo_get()
264 return !pol->walk.dead; in xfrm_policy_flo_check()
290 INIT_LIST_HEAD(&policy->walk.all); in xfrm_policy_alloc()
318 BUG_ON(!policy->walk.dead); in xfrm_policy_destroy()
333 policy->walk.dead = 1; in xfrm_policy_kill()
628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { in xfrm_hash_rebuild()
997 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, in xfrm_policy_walk() argument
1005 if (walk->type >= XFRM_POLICY_TYPE_MAX && in xfrm_policy_walk()
1006 walk->type != XFRM_POLICY_TYPE_ANY) in xfrm_policy_walk()
[all …]
Dxfrm_state.c1614 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, in xfrm_state_walk() argument
1622 if (walk->seq != 0 && list_empty(&walk->all)) in xfrm_state_walk()
1626 if (list_empty(&walk->all)) in xfrm_state_walk()
1629 x = list_first_entry(&walk->all, struct xfrm_state_walk, all); in xfrm_state_walk()
1634 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) in xfrm_state_walk()
1636 if (!__xfrm_state_filter_match(state, walk->filter)) in xfrm_state_walk()
1638 err = func(state, walk->seq, data); in xfrm_state_walk()
1640 list_move_tail(&walk->all, &x->all); in xfrm_state_walk()
1643 walk->seq++; in xfrm_state_walk()
1645 if (walk->seq == 0) { in xfrm_state_walk()
[all …]
Dxfrm_user.c894 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; in xfrm_dump_sa_done() local
898 xfrm_state_walk_done(walk, net); in xfrm_dump_sa_done()
906 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; in xfrm_dump_sa() local
940 xfrm_state_walk_init(walk, proto, filter); in xfrm_dump_sa()
943 (void) xfrm_state_walk(net, walk, dump_one_state, &info); in xfrm_dump_sa()
1496 xp->walk.dead = 1; in xfrm_policy_construct()
1648 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; in xfrm_dump_policy_done() local
1651 xfrm_policy_walk_done(walk, net); in xfrm_dump_policy_done()
1658 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; in xfrm_dump_policy() local
1671 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); in xfrm_dump_policy()
[all …]
/linux-4.4.14/arch/x86/crypto/sha-mb/
Dsha1_mb.c385 nbytes = crypto_ahash_walk_done(&rctx->walk, 0); in sha_finish_walk()
391 if (crypto_ahash_walk_last(&rctx->walk)) { in sha_finish_walk()
399 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); in sha_finish_walk()
510 nbytes = crypto_ahash_walk_first(req, &rctx->walk); in sha1_mb_update()
517 if (crypto_ahash_walk_last(&rctx->walk)) in sha1_mb_update()
524 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); in sha1_mb_update()
568 nbytes = crypto_ahash_walk_first(req, &rctx->walk); in sha1_mb_finup()
575 if (crypto_ahash_walk_last(&rctx->walk)) { in sha1_mb_finup()
587 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); in sha1_mb_finup()
/linux-4.4.14/net/sched/
Dcls_tcindex.c157 struct tcindex_filter __rcu **walk; in tcindex_delete() local
168 walk = p->h + i; in tcindex_delete()
169 for (f = rtnl_dereference(*walk); f; in tcindex_delete()
170 walk = &f->next, f = rtnl_dereference(*walk)) { in tcindex_delete()
178 rcu_assign_pointer(*walk, rtnl_dereference(f->next)); in tcindex_delete()
582 .walk = tcindex_walk,
Dsch_ingress.c86 .walk = ingress_walk,
Dcls_cgroup.c197 .walk = cls_cgroup_walk,
Dact_api.c320 if (!act->walk) in tcf_register_action()
321 act->walk = tcf_generic_walker; in tcf_register_action()
822 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); in tca_action_flush()
1081 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); in tc_dump_action()
Dcls_api.c486 if (tp->ops->walk == NULL) in tc_dump_tfilter()
494 tp->ops->walk(tp, &arg.w); in tc_dump_tfilter()
Dsch_mq.c232 .walk = mq_walk,
Dcls_basic.c294 .walk = basic_walk,
Dact_police.c356 .walk = tcf_act_police_walker
Dsch_multiq.c402 .walk = multiq_walk,
Dsch_prio.c365 .walk = prio_walk,
Dsch_red.c353 .walk = red_walk,
Dcls_fw.c421 .walk = fw_walk,
Dsch_mqprio.c399 .walk = mqprio_walk,
Dsch_dsmark.c477 .walk = dsmark_walk,
Dsch_tbf.c544 .walk = tbf_walk,
Dcls_bpf.c538 .walk = cls_bpf_walk,
Dsch_api.c164 if (!(cops->get && cops->put && cops->walk && cops->leaf)) in register_qdisc()
1086 q->ops->cl_ops->walk(q, &arg.w); in check_loop()
1755 q->ops->cl_ops->walk(q, &arg.w); in tc_dump_tclass_qdisc()
Dsch_drr.c497 .walk = drr_walk,
Dcls_route.c657 .walk = route4_walk,
Dsch_atm.c659 .walk = atm_tc_walk,
Dcls_flow.c689 .walk = flow_walk,
Dsch_fq_codel.c629 .walk = fq_codel_walk,
Dsch_sfb.c677 .walk = sfb_walk,
Dcls_rsvp.h726 .walk = rsvp_walk,
Dcls_flower.c677 .walk = fl_walk,
Dsch_sfq.c889 .walk = sfq_walk,
Dsch_netem.c1134 .walk = netem_walk,
Dcls_u32.c1061 .walk = u32_walk,
Dsch_qfq.c1553 .walk = qfq_walk,
Dsch_htb.c1592 .walk = htb_walk,
Dsch_hfsc.c1718 .walk = hfsc_walk
Dsch_cbq.c2022 .walk = cbq_walk,
/linux-4.4.14/drivers/vfio/pci/
Dvfio_pci.c402 struct vfio_pci_walk_info *walk = data; in vfio_pci_walk_wrapper() local
404 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot)) in vfio_pci_walk_wrapper()
405 walk->ret = walk->fn(pdev, walk->data); in vfio_pci_walk_wrapper()
407 return walk->ret; in vfio_pci_walk_wrapper()
415 struct vfio_pci_walk_info walk = { in vfio_pci_for_each_slot_or_bus() local
419 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk); in vfio_pci_for_each_slot_or_bus()
421 return walk.ret; in vfio_pci_for_each_slot_or_bus()
/linux-4.4.14/ipc/
Dsem.c790 struct list_head *walk; in wake_const_ops() local
799 walk = pending_list->next; in wake_const_ops()
800 while (walk != pending_list) { in wake_const_ops()
803 q = container_of(walk, struct sem_queue, list); in wake_const_ops()
804 walk = walk->next; in wake_const_ops()
891 struct list_head *walk; in update_queue() local
901 walk = pending_list->next; in update_queue()
902 while (walk != pending_list) { in update_queue()
905 q = container_of(walk, struct sem_queue, list); in update_queue()
906 walk = walk->next; in update_queue()
Dmqueue.c543 struct ext_wait_queue *walk; in wq_add() local
547 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { in wq_add()
548 if (walk->task->static_prio <= current->static_prio) { in wq_add()
549 list_add_tail(&ewp->list, &walk->list); in wq_add()
/linux-4.4.14/net/atm/
Dclip.c88 struct clip_vcc **walk; in unlink_clip_vcc() local
96 for (walk = &entry->vccs; *walk; walk = &(*walk)->next) in unlink_clip_vcc()
97 if (*walk == clip_vcc) { in unlink_clip_vcc()
100 *walk = clip_vcc->next; /* atomic */ in unlink_clip_vcc()
Dcommon.c321 struct atm_vcc *walk; in check_ci() local
324 walk = atm_sk(s); in check_ci()
325 if (walk->dev != vcc->dev) in check_ci()
327 if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi && in check_ci()
328 walk->vci == vci && ((walk->qos.txtp.traffic_class != in check_ci()
330 (walk->qos.rxtp.traffic_class != ATM_NONE && in check_ci()
/linux-4.4.14/net/l2tp/
Dl2tp_debugfs.c107 struct hlist_node *walk; in l2tp_dfs_seq_tunnel_show() local
112 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { in l2tp_dfs_seq_tunnel_show()
115 session = hlist_entry(walk, struct l2tp_session, hlist); in l2tp_dfs_seq_tunnel_show()
Dl2tp_core.c1241 struct hlist_node *walk; in l2tp_tunnel_closeall() local
1253 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { in l2tp_tunnel_closeall()
1254 session = hlist_entry(walk, struct l2tp_session, hlist); in l2tp_tunnel_closeall()
/linux-4.4.14/arch/s390/mm/
Dpgtable.c1154 unsigned long next, struct mm_walk *walk) in __s390_enable_skey() argument
1166 ptep_flush_direct(walk->mm, addr, pte); in __s390_enable_skey()
1181 struct mm_walk walk = { .pte_entry = __s390_enable_skey }; in s390_enable_skey() local
1201 walk.mm = mm; in s390_enable_skey()
1202 walk_page_range(0, TASK_SIZE, &walk); in s390_enable_skey()
1214 unsigned long next, struct mm_walk *walk) in __s390_reset_cmma() argument
1226 struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; in s390_reset_cmma() local
1229 walk.mm = mm; in s390_reset_cmma()
1230 walk_page_range(0, TASK_SIZE, &walk); in s390_reset_cmma()
/linux-4.4.14/drivers/crypto/ux500/cryp/
Dcryp_core.c883 struct ablkcipher_walk walk; in ablk_crypt() local
898 ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); in ablk_crypt()
899 ret = ablkcipher_walk_phys(areq, &walk); in ablk_crypt()
907 while ((nbytes = walk.nbytes) > 0) { in ablk_crypt()
908 ctx->iv = walk.iv; in ablk_crypt()
909 src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); in ablk_crypt()
912 dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); in ablk_crypt()
922 ret = ablkcipher_walk_done(areq, &walk, nbytes); in ablk_crypt()
926 ablkcipher_walk_complete(&walk); in ablk_crypt()
/linux-4.4.14/arch/powerpc/mm/
Dsubpage-prot.c135 unsigned long end, struct mm_walk *walk) in subpage_walk_pmd_entry() argument
137 struct vm_area_struct *vma = walk->vma; in subpage_walk_pmd_entry()
/linux-4.4.14/tools/testing/selftests/net/
Dpsock_tpacket.c83 void (*walk)(int sock, struct ring *ring); member
600 ring->walk = walk_v1_v2; in __v1_v2_fill()
620 ring->walk = walk_v3; in __v3_fill()
704 ring->walk(sock, ring); in walk_ring()
/linux-4.4.14/include/net/
Dxfrm.h513 struct xfrm_policy_walk_entry walk; member
542 struct xfrm_policy_walk_entry walk; member
1436 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1438 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1440 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1587 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1588 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1591 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
Dact_api.h110 int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); member
Dsch_generic.h167 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); member
227 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); member
/linux-4.4.14/include/linux/
Dmm.h1119 unsigned long next, struct mm_walk *walk);
1121 unsigned long next, struct mm_walk *walk);
1123 struct mm_walk *walk);
1126 struct mm_walk *walk);
1128 struct mm_walk *walk);
1135 struct mm_walk *walk);
1136 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
/linux-4.4.14/scripts/gdb/linux/
Dsymbols.py78 for root, dirs, files in os.walk(path):
/linux-4.4.14/Documentation/networking/
Dfib_trie.txt76 it is run to optimize and reorganize. It will walk the trie upwards
103 slower than the corresponding fib_hash function, as we have to walk the
Dtcp.txt102 On a timer we walk the retransmit list to send any retransmits, update the
Dpacket_mmap.txt1038 frames to be updated resp. the frame handed over to the application, iv) walk
Dfilter.txt422 will walk through the pcap file continuing from the current packet and
/linux-4.4.14/fs/jffs2/
DREADME.Locking88 erase_completion_lock. So you can walk the list only while holding the
89 erase_completion_lock, and can drop the lock temporarily mid-walk as
/linux-4.4.14/net/netfilter/
Dnft_rbtree.c260 .walk = nft_rbtree_walk,
Dnft_hash.c374 .walk = nft_hash_walk,
Dnf_tables_api.c2947 set->ops->walk(ctx, set, &iter); in nf_tables_bind_set()
3188 set->ops->walk(&ctx, set, &args.iter); in nf_tables_dump_set()
4243 set->ops->walk(ctx, set, &iter); in nf_tables_check_loops()
/linux-4.4.14/drivers/crypto/ux500/hash/
Dhash_core.c1087 struct crypto_hash_walk walk; in hash_hw_update() local
1088 int msg_length = crypto_hash_walk_first(req, &walk); in hash_hw_update()
1111 data_buffer = walk.data; in hash_hw_update()
1121 msg_length = crypto_hash_walk_done(&walk, 0); in hash_hw_update()
/linux-4.4.14/arch/ia64/kernel/
Defi.c305 walk (efi_freemem_callback_t callback, void *arg, u64 attr) in walk() function
329 walk(callback, arg, EFI_MEMORY_WB); in efi_memmap_walk()
339 walk(callback, arg, EFI_MEMORY_UC); in efi_memmap_walk_uc()
/linux-4.4.14/Documentation/
Drobust-futex-ABI.txt87 the kernel will walk this list, mark any such locks with a bit
118 list 'head' is, and to walk the list on thread exit, handling locks
DBUG-HUNTING122 And then walk through that file, one routine at a time and
Dclk.txt112 Let's walk through enabling this clk from driver code:
Dsysfs-rules.txt160 by its subsystem value. You need to walk up the chain until you find
/linux-4.4.14/Documentation/virtual/kvm/
Dmmu.txt291 - walk shadow page table
299 - if needed, walk the guest page tables to determine the guest translation
305 - walk the shadow page table to find the spte for the translation,
317 - walk the shadow page hierarchy and drop affected translations
/linux-4.4.14/drivers/net/ethernet/sun/
Dsungem.c663 int walk = entry; in gem_tx() local
668 walk = NEXT_TX(walk); in gem_tx()
669 if (walk == limit) in gem_tx()
671 if (walk == last) in gem_tx()
/linux-4.4.14/Documentation/cgroups/
Dfreezer-subsystem.txt13 walk /proc or invoke a kernel interface to gather information about the
/linux-4.4.14/arch/arc/mm/
Dtlbex.S215 bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
/linux-4.4.14/Documentation/block/
Dbiovecs.txt53 it had to walk two different bios at the same time, keeping both bi_idx and
/linux-4.4.14/Documentation/locking/
Drt-mutex-design.txt401 High level overview of the PI chain walk
404 The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain.
449 walk is only needed when a new top pi waiter is made to a task.
555 Taking of a mutex (The walk through)
558 OK, now let's take a look at the detailed walk through of what happens when
/linux-4.4.14/net/key/
Daf_key.c1868 struct xfrm_policy_walk walk; in gen_reqid() local
1878 xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); in gen_reqid()
1879 rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); in gen_reqid()
1880 xfrm_policy_walk_done(&walk, net); in gen_reqid()
2299 xp->walk.dead = 1; in pfkey_spdadd()
3271 xp->walk.dead = 1; in pfkey_compile_policy()
/linux-4.4.14/include/net/netfilter/
Dnf_tables.h260 void (*walk)(const struct nft_ctx *ctx, member
/linux-4.4.14/Documentation/vm/
Dunevictable-lru.txt513 for the munlock case, calls __munlock_vma_pages_range() to walk the page table
564 processing. Again, these functions walk the respective reverse maps looking
569 Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's
Dtranshuge.txt357 pagetable walk). If the second pmd_trans_huge returns false, you
/linux-4.4.14/Documentation/timers/
Dhrtimers.txt97 queued timers, without having to walk the rbtree.
/linux-4.4.14/fs/btrfs/
Draid56.c669 int walk = 0; in lock_stripe_add() local
673 walk++; in lock_stripe_add()
/linux-4.4.14/Documentation/nvdimm/
Dbtt.txt201 On startup, we analyze the BTT flog to create our list of free blocks. We walk
/linux-4.4.14/scripts/
Danalyze_suspend.py2833 for dirname, dirnames, filenames in os.walk('/sys/devices'):
2903 for dirname, dirnames, filenames in os.walk('/sys/devices'):
3330 for dirname, dirnames, filenames in os.walk(subdir):
/linux-4.4.14/Documentation/DocBook/
Dkernel-api.xml.db547 API-dmi-walk
/linux-4.4.14/drivers/scsi/aic7xxx/
Daic79xx.seq556 * manually walk the list counting MAXCMDCNT elements
722 * Brute force walk.
/linux-4.4.14/net/ipv4/
Dtcp_input.c1742 goto walk; in tcp_sacktag_write_queue()
1759 walk: in tcp_sacktag_write_queue()
/linux-4.4.14/arch/arm/
DKconfig1174 r3p*) erratum. A speculative memory access may cause a page table walk
/linux-4.4.14/Documentation/scsi/
DChangeLog.1992-1997829 * scsi.c: When incrementing usage count, walk block linked list
/linux-4.4.14/Documentation/virtual/uml/
DUserModeLinux-HOWTO.txt2757 is at module_list. If it's not, walk down the next links, looking at