Home
last modified time | relevance | path

Searched refs:walk (Results 1 – 152 of 152) sorted by relevance

/linux-4.1.27/crypto/
Dblkcipher.c40 struct blkcipher_walk *walk);
42 struct blkcipher_walk *walk);
44 static inline void blkcipher_map_src(struct blkcipher_walk *walk) in blkcipher_map_src() argument
46 walk->src.virt.addr = scatterwalk_map(&walk->in); in blkcipher_map_src()
49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk) in blkcipher_map_dst() argument
51 walk->dst.virt.addr = scatterwalk_map(&walk->out); in blkcipher_map_dst()
54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) in blkcipher_unmap_src() argument
56 scatterwalk_unmap(walk->src.virt.addr); in blkcipher_unmap_src()
59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) in blkcipher_unmap_dst() argument
61 scatterwalk_unmap(walk->dst.virt.addr); in blkcipher_unmap_dst()
[all …]
Dscatterwalk.c33 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) in scatterwalk_start() argument
35 walk->sg = sg; in scatterwalk_start()
39 walk->offset = sg->offset; in scatterwalk_start()
43 void *scatterwalk_map(struct scatter_walk *walk) in scatterwalk_map() argument
45 return kmap_atomic(scatterwalk_page(walk)) + in scatterwalk_map()
46 offset_in_page(walk->offset); in scatterwalk_map()
50 static void scatterwalk_pagedone(struct scatter_walk *walk, int out, in scatterwalk_pagedone() argument
56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); in scatterwalk_pagedone()
62 walk->offset += PAGE_SIZE - 1; in scatterwalk_pagedone()
63 walk->offset &= PAGE_MASK; in scatterwalk_pagedone()
[all …]
Dahash.c43 static int hash_walk_next(struct crypto_hash_walk *walk) in hash_walk_next() argument
45 unsigned int alignmask = walk->alignmask; in hash_walk_next()
46 unsigned int offset = walk->offset; in hash_walk_next()
47 unsigned int nbytes = min(walk->entrylen, in hash_walk_next()
50 if (walk->flags & CRYPTO_ALG_ASYNC) in hash_walk_next()
51 walk->data = kmap(walk->pg); in hash_walk_next()
53 walk->data = kmap_atomic(walk->pg); in hash_walk_next()
54 walk->data += offset; in hash_walk_next()
63 walk->entrylen -= nbytes; in hash_walk_next()
67 static int hash_walk_new_entry(struct crypto_hash_walk *walk) in hash_walk_new_entry() argument
[all …]
Dablkcipher.c47 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) in __ablkcipher_walk_complete() argument
51 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { in __ablkcipher_walk_complete()
59 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, in ablkcipher_queue_write() argument
62 p->dst = walk->out; in ablkcipher_queue_write()
63 list_add_tail(&p->entry, &walk->buffers); in ablkcipher_queue_write()
76 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, in ablkcipher_done_slow() argument
82 unsigned int len_this_page = scatterwalk_pagelen(&walk->out); in ablkcipher_done_slow()
86 scatterwalk_advance(&walk->out, n); in ablkcipher_done_slow()
90 scatterwalk_start(&walk->out, sg_next(walk->out.sg)); in ablkcipher_done_slow()
96 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, in ablkcipher_done_fast() argument
[all …]
Dcbc.c43 struct blkcipher_walk *walk, in crypto_cbc_encrypt_segment() argument
49 unsigned int nbytes = walk->nbytes; in crypto_cbc_encrypt_segment()
50 u8 *src = walk->src.virt.addr; in crypto_cbc_encrypt_segment()
51 u8 *dst = walk->dst.virt.addr; in crypto_cbc_encrypt_segment()
52 u8 *iv = walk->iv; in crypto_cbc_encrypt_segment()
67 struct blkcipher_walk *walk, in crypto_cbc_encrypt_inplace() argument
73 unsigned int nbytes = walk->nbytes; in crypto_cbc_encrypt_inplace()
74 u8 *src = walk->src.virt.addr; in crypto_cbc_encrypt_inplace()
75 u8 *iv = walk->iv; in crypto_cbc_encrypt_inplace()
85 memcpy(walk->iv, iv, bsize); in crypto_cbc_encrypt_inplace()
[all …]
Dpcbc.c46 struct blkcipher_walk *walk, in crypto_pcbc_encrypt_segment() argument
52 unsigned int nbytes = walk->nbytes; in crypto_pcbc_encrypt_segment()
53 u8 *src = walk->src.virt.addr; in crypto_pcbc_encrypt_segment()
54 u8 *dst = walk->dst.virt.addr; in crypto_pcbc_encrypt_segment()
55 u8 *iv = walk->iv; in crypto_pcbc_encrypt_segment()
71 struct blkcipher_walk *walk, in crypto_pcbc_encrypt_inplace() argument
77 unsigned int nbytes = walk->nbytes; in crypto_pcbc_encrypt_inplace()
78 u8 *src = walk->src.virt.addr; in crypto_pcbc_encrypt_inplace()
79 u8 *iv = walk->iv; in crypto_pcbc_encrypt_inplace()
92 memcpy(walk->iv, iv, bsize); in crypto_pcbc_encrypt_inplace()
[all …]
Dsalsa20_generic.c181 struct blkcipher_walk walk; in encrypt() local
186 blkcipher_walk_init(&walk, dst, src, nbytes); in encrypt()
187 err = blkcipher_walk_virt_block(desc, &walk, 64); in encrypt()
189 salsa20_ivsetup(ctx, walk.iv); in encrypt()
191 if (likely(walk.nbytes == nbytes)) in encrypt()
193 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, in encrypt()
194 walk.src.virt.addr, nbytes); in encrypt()
195 return blkcipher_walk_done(desc, &walk, 0); in encrypt()
198 while (walk.nbytes >= 64) { in encrypt()
199 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, in encrypt()
[all …]
Dctr.c55 static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, in crypto_ctr_crypt_final() argument
60 u8 *ctrblk = walk->iv; in crypto_ctr_crypt_final()
63 u8 *src = walk->src.virt.addr; in crypto_ctr_crypt_final()
64 u8 *dst = walk->dst.virt.addr; in crypto_ctr_crypt_final()
65 unsigned int nbytes = walk->nbytes; in crypto_ctr_crypt_final()
74 static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, in crypto_ctr_crypt_segment() argument
80 u8 *ctrblk = walk->iv; in crypto_ctr_crypt_segment()
81 u8 *src = walk->src.virt.addr; in crypto_ctr_crypt_segment()
82 u8 *dst = walk->dst.virt.addr; in crypto_ctr_crypt_segment()
83 unsigned int nbytes = walk->nbytes; in crypto_ctr_crypt_segment()
[all …]
Decb.c42 struct blkcipher_walk *walk, in crypto_ecb_crypt() argument
50 err = blkcipher_walk_virt(desc, walk); in crypto_ecb_crypt()
52 while ((nbytes = walk->nbytes)) { in crypto_ecb_crypt()
53 u8 *wsrc = walk->src.virt.addr; in crypto_ecb_crypt()
54 u8 *wdst = walk->dst.virt.addr; in crypto_ecb_crypt()
63 err = blkcipher_walk_done(desc, walk, nbytes); in crypto_ecb_crypt()
73 struct blkcipher_walk walk; in crypto_ecb_encrypt() local
78 blkcipher_walk_init(&walk, dst, src, nbytes); in crypto_ecb_encrypt()
79 return crypto_ecb_crypt(desc, &walk, child, in crypto_ecb_encrypt()
87 struct blkcipher_walk walk; in crypto_ecb_decrypt() local
[all …]
Dcrypto_null.c77 struct blkcipher_walk walk; in skcipher_null_crypt() local
80 blkcipher_walk_init(&walk, dst, src, nbytes); in skcipher_null_crypt()
81 err = blkcipher_walk_virt(desc, &walk); in skcipher_null_crypt()
83 while (walk.nbytes) { in skcipher_null_crypt()
84 if (walk.src.virt.addr != walk.dst.virt.addr) in skcipher_null_crypt()
85 memcpy(walk.dst.virt.addr, walk.src.virt.addr, in skcipher_null_crypt()
86 walk.nbytes); in skcipher_null_crypt()
87 err = blkcipher_walk_done(desc, &walk, 0); in skcipher_null_crypt()
Darc4.c99 struct blkcipher_walk walk; in ecb_arc4_crypt() local
102 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_arc4_crypt()
104 err = blkcipher_walk_virt(desc, &walk); in ecb_arc4_crypt()
106 while (walk.nbytes > 0) { in ecb_arc4_crypt()
107 u8 *wsrc = walk.src.virt.addr; in ecb_arc4_crypt()
108 u8 *wdst = walk.dst.virt.addr; in ecb_arc4_crypt()
110 arc4_crypt(ctx, wdst, wsrc, walk.nbytes); in ecb_arc4_crypt()
112 err = blkcipher_walk_done(desc, &walk, 0); in ecb_arc4_crypt()
Dxts.c175 struct blkcipher_walk walk; in xts_crypt() local
183 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); in xts_crypt()
185 err = blkcipher_walk_virt(desc, &walk); in xts_crypt()
186 nbytes = walk.nbytes; in xts_crypt()
191 src = (be128 *)walk.src.virt.addr; in xts_crypt()
192 dst = (be128 *)walk.dst.virt.addr; in xts_crypt()
195 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); in xts_crypt()
225 *(be128 *)walk.iv = *t; in xts_crypt()
227 err = blkcipher_walk_done(desc, &walk, nbytes); in xts_crypt()
228 nbytes = walk.nbytes; in xts_crypt()
[all …]
Dlrw.c224 struct blkcipher_walk walk; in lrw_crypt() local
232 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); in lrw_crypt()
234 err = blkcipher_walk_virt(desc, &walk); in lrw_crypt()
235 nbytes = walk.nbytes; in lrw_crypt()
239 nblocks = min(walk.nbytes / bsize, max_blks); in lrw_crypt()
240 src = (be128 *)walk.src.virt.addr; in lrw_crypt()
241 dst = (be128 *)walk.dst.virt.addr; in lrw_crypt()
244 iv = (be128 *)walk.iv; in lrw_crypt()
282 err = blkcipher_walk_done(desc, &walk, nbytes); in lrw_crypt()
283 nbytes = walk.nbytes; in lrw_crypt()
[all …]
Dshash.c222 struct crypto_hash_walk walk; in shash_ahash_update() local
225 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; in shash_ahash_update()
226 nbytes = crypto_hash_walk_done(&walk, nbytes)) in shash_ahash_update()
227 nbytes = crypto_shash_update(desc, walk.data, nbytes); in shash_ahash_update()
245 struct crypto_hash_walk walk; in shash_ahash_finup() local
248 nbytes = crypto_hash_walk_first(req, &walk); in shash_ahash_finup()
253 nbytes = crypto_hash_walk_last(&walk) ? in shash_ahash_finup()
254 crypto_shash_finup(desc, walk.data, nbytes, in shash_ahash_finup()
256 crypto_shash_update(desc, walk.data, nbytes); in shash_ahash_finup()
257 nbytes = crypto_hash_walk_done(&walk, nbytes); in shash_ahash_finup()
[all …]
Dccm.c207 struct scatter_walk walk; in get_data_to_compute() local
211 scatterwalk_start(&walk, sg); in get_data_to_compute()
214 n = scatterwalk_clamp(&walk, len); in get_data_to_compute()
216 scatterwalk_start(&walk, sg_next(walk.sg)); in get_data_to_compute()
217 n = scatterwalk_clamp(&walk, len); in get_data_to_compute()
219 data_src = scatterwalk_map(&walk); in get_data_to_compute()
225 scatterwalk_advance(&walk, n); in get_data_to_compute()
226 scatterwalk_done(&walk, 0, len); in get_data_to_compute()
/linux-4.1.27/mm/
Dpagewalk.c7 struct mm_walk *walk) in walk_pte_range() argument
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range()
28 struct mm_walk *walk) in walk_pmd_range() argument
38 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range()
39 if (walk->pte_hole) in walk_pmd_range()
40 err = walk->pte_hole(addr, next, walk); in walk_pmd_range()
49 if (walk->pmd_entry) in walk_pmd_range()
50 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range()
58 if (!walk->pte_entry) in walk_pmd_range()
61 split_huge_page_pmd_mm(walk->mm, addr, pmd); in walk_pmd_range()
[all …]
Dmincore.c23 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument
27 unsigned char *vec = walk->private; in mincore_hugetlb()
36 walk->private = vec; in mincore_hugetlb()
104 struct mm_walk *walk) in mincore_unmapped_range() argument
106 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range()
107 walk->vma, walk->private); in mincore_unmapped_range()
112 struct mm_walk *walk) in mincore_pte_range() argument
115 struct vm_area_struct *vma = walk->vma; in mincore_pte_range()
117 unsigned char *vec = walk->private; in mincore_pte_range()
131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
[all …]
Dmadvise.c139 unsigned long end, struct mm_walk *walk) in swapin_walk_pmd_entry() argument
142 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry()
176 struct mm_walk walk = { in force_swapin_readahead() local
182 walk_page_range(start, end, &walk); in force_swapin_readahead()
Dmempolicy.c486 unsigned long end, struct mm_walk *walk) in queue_pages_pte_range() argument
488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range()
490 struct queue_pages *qp = walk->private; in queue_pages_pte_range()
500 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range()
527 struct mm_walk *walk) in queue_pages_hugetlb() argument
530 struct queue_pages *qp = walk->private; in queue_pages_hugetlb()
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
587 struct mm_walk *walk) in queue_pages_test_walk() argument
589 struct vm_area_struct *vma = walk->vma; in queue_pages_test_walk()
590 struct queue_pages *qp = walk->private; in queue_pages_test_walk()
Dmemcontrol.c4893 struct mm_walk *walk) in mem_cgroup_count_precharge_pte_range() argument
4895 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
5057 struct mm_walk *walk) in mem_cgroup_move_charge_pte_range() argument
5060 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
/linux-4.1.27/arch/x86/crypto/
Dglue_helper.c37 struct blkcipher_walk *walk) in __glue_ecb_crypt_128bit() argument
45 err = blkcipher_walk_virt(desc, walk); in __glue_ecb_crypt_128bit()
47 while ((nbytes = walk->nbytes)) { in __glue_ecb_crypt_128bit()
48 u8 *wsrc = walk->src.virt.addr; in __glue_ecb_crypt_128bit()
49 u8 *wdst = walk->dst.virt.addr; in __glue_ecb_crypt_128bit()
74 err = blkcipher_walk_done(desc, walk, nbytes); in __glue_ecb_crypt_128bit()
85 struct blkcipher_walk walk; in glue_ecb_crypt_128bit() local
87 blkcipher_walk_init(&walk, dst, src, nbytes); in glue_ecb_crypt_128bit()
88 return __glue_ecb_crypt_128bit(gctx, desc, &walk); in glue_ecb_crypt_128bit()
94 struct blkcipher_walk *walk) in __glue_cbc_encrypt_128bit() argument
[all …]
Dblowfish_glue.c80 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument
89 err = blkcipher_walk_virt(desc, walk); in ecb_crypt()
91 while ((nbytes = walk->nbytes)) { in ecb_crypt()
92 u8 *wsrc = walk->src.virt.addr; in ecb_crypt()
93 u8 *wdst = walk->dst.virt.addr; in ecb_crypt()
119 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt()
128 struct blkcipher_walk walk; in ecb_encrypt() local
130 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
131 return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); in ecb_encrypt()
137 struct blkcipher_walk walk; in ecb_decrypt() local
[all …]
Dcast5_avx_glue.c60 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument
72 err = blkcipher_walk_virt(desc, walk); in ecb_crypt()
75 while ((nbytes = walk->nbytes)) { in ecb_crypt()
76 u8 *wsrc = walk->src.virt.addr; in ecb_crypt()
77 u8 *wdst = walk->dst.virt.addr; in ecb_crypt()
107 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt()
117 struct blkcipher_walk walk; in ecb_encrypt() local
119 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
120 return ecb_crypt(desc, &walk, true); in ecb_encrypt()
126 struct blkcipher_walk walk; in ecb_decrypt() local
[all …]
Ddes3_ede_glue.c86 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument
93 err = blkcipher_walk_virt(desc, walk); in ecb_crypt()
95 while ((nbytes = walk->nbytes)) { in ecb_crypt()
96 u8 *wsrc = walk->src.virt.addr; in ecb_crypt()
97 u8 *wdst = walk->dst.virt.addr; in ecb_crypt()
124 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt()
134 struct blkcipher_walk walk; in ecb_encrypt() local
136 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
137 return ecb_crypt(desc, &walk, ctx->enc_expkey); in ecb_encrypt()
144 struct blkcipher_walk walk; in ecb_decrypt() local
[all …]
Dsalsa20_glue.c52 struct blkcipher_walk walk; in encrypt() local
57 blkcipher_walk_init(&walk, dst, src, nbytes); in encrypt()
58 err = blkcipher_walk_virt_block(desc, &walk, 64); in encrypt()
60 salsa20_ivsetup(ctx, walk.iv); in encrypt()
62 if (likely(walk.nbytes == nbytes)) in encrypt()
64 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, in encrypt()
65 walk.dst.virt.addr, nbytes); in encrypt()
66 return blkcipher_walk_done(desc, &walk, 0); in encrypt()
69 while (walk.nbytes >= 64) { in encrypt()
70 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, in encrypt()
[all …]
Daesni-intel_glue.c378 struct blkcipher_walk walk; in ecb_encrypt() local
381 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
382 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
386 while ((nbytes = walk.nbytes)) { in ecb_encrypt()
387 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt()
390 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_encrypt()
402 struct blkcipher_walk walk; in ecb_decrypt() local
405 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
406 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
410 while ((nbytes = walk.nbytes)) { in ecb_decrypt()
[all …]
/linux-4.1.27/arch/arm/crypto/
Daesbs-glue.c109 struct blkcipher_walk walk; in aesbs_cbc_encrypt() local
112 blkcipher_walk_init(&walk, dst, src, nbytes); in aesbs_cbc_encrypt()
113 err = blkcipher_walk_virt(desc, &walk); in aesbs_cbc_encrypt()
115 while (walk.nbytes) { in aesbs_cbc_encrypt()
116 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; in aesbs_cbc_encrypt()
117 u8 *src = walk.src.virt.addr; in aesbs_cbc_encrypt()
119 if (walk.dst.virt.addr == walk.src.virt.addr) { in aesbs_cbc_encrypt()
120 u8 *iv = walk.iv; in aesbs_cbc_encrypt()
128 memcpy(walk.iv, iv, AES_BLOCK_SIZE); in aesbs_cbc_encrypt()
130 u8 *dst = walk.dst.virt.addr; in aesbs_cbc_encrypt()
[all …]
Daes-ce-glue.c170 struct blkcipher_walk walk; in ecb_encrypt() local
175 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
176 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
179 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { in ecb_encrypt()
180 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt()
182 err = blkcipher_walk_done(desc, &walk, in ecb_encrypt()
183 walk.nbytes % AES_BLOCK_SIZE); in ecb_encrypt()
193 struct blkcipher_walk walk; in ecb_decrypt() local
198 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
199 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
[all …]
/linux-4.1.27/arch/arm64/crypto/
Daes-glue.c104 struct blkcipher_walk walk; in ecb_encrypt() local
108 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
109 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
112 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_encrypt()
113 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt()
115 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); in ecb_encrypt()
126 struct blkcipher_walk walk; in ecb_decrypt() local
130 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
131 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
134 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_decrypt()
[all …]
Daes-ce-ccm-glue.c111 struct scatter_walk walk; in ccm_calculate_auth_mac() local
127 scatterwalk_start(&walk, req->assoc); in ccm_calculate_auth_mac()
130 u32 n = scatterwalk_clamp(&walk, len); in ccm_calculate_auth_mac()
134 scatterwalk_start(&walk, sg_next(walk.sg)); in ccm_calculate_auth_mac()
135 n = scatterwalk_clamp(&walk, len); in ccm_calculate_auth_mac()
137 p = scatterwalk_map(&walk); in ccm_calculate_auth_mac()
143 scatterwalk_advance(&walk, n); in ccm_calculate_auth_mac()
144 scatterwalk_done(&walk, 0, len); in ccm_calculate_auth_mac()
153 struct blkcipher_walk walk; in ccm_encrypt() local
171 blkcipher_walk_init(&walk, req->dst, req->src, len); in ccm_encrypt()
[all …]
/linux-4.1.27/arch/sparc/mm/
Dextable.c19 const struct exception_table_entry *walk; in search_extable() local
39 for (walk = start; walk <= last; walk++) { in search_extable()
40 if (walk->fixup == 0) { in search_extable()
42 walk++; in search_extable()
47 if (walk->fixup == -1) in search_extable()
50 if (walk->insn == value) in search_extable()
51 return walk; in search_extable()
55 for (walk = start; walk <= (last - 1); walk++) { in search_extable()
56 if (walk->fixup) in search_extable()
59 if (walk[0].insn <= value && walk[1].insn > value) in search_extable()
[all …]
/linux-4.1.27/include/crypto/
Dscatterwalk.h58 static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) in scatterwalk_pagelen() argument
60 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; in scatterwalk_pagelen()
61 unsigned int len_this_page = offset_in_page(~walk->offset) + 1; in scatterwalk_pagelen()
65 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, in scatterwalk_clamp() argument
68 unsigned int len_this_page = scatterwalk_pagelen(walk); in scatterwalk_clamp()
72 static inline void scatterwalk_advance(struct scatter_walk *walk, in scatterwalk_advance() argument
75 walk->offset += nbytes; in scatterwalk_advance()
78 static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, in scatterwalk_aligned() argument
81 return !(walk->offset & alignmask); in scatterwalk_aligned()
84 static inline struct page *scatterwalk_page(struct scatter_walk *walk) in scatterwalk_page() argument
[all …]
Dalgapi.h190 struct blkcipher_walk *walk, int err);
192 struct blkcipher_walk *walk);
194 struct blkcipher_walk *walk);
196 struct blkcipher_walk *walk,
199 struct blkcipher_walk *walk,
204 struct ablkcipher_walk *walk, int err);
206 struct ablkcipher_walk *walk);
207 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
309 static inline void blkcipher_walk_init(struct blkcipher_walk *walk, in blkcipher_walk_init() argument
314 walk->in.sg = src; in blkcipher_walk_init()
[all …]
Dmcryptd.h59 struct crypto_hash_walk walk; member
/linux-4.1.27/arch/s390/crypto/
Ddes_s390.c86 u8 *key, struct blkcipher_walk *walk) in ecb_desall_crypt() argument
88 int ret = blkcipher_walk_virt(desc, walk); in ecb_desall_crypt()
91 while ((nbytes = walk->nbytes)) { in ecb_desall_crypt()
94 u8 *out = walk->dst.virt.addr; in ecb_desall_crypt()
95 u8 *in = walk->src.virt.addr; in ecb_desall_crypt()
102 ret = blkcipher_walk_done(desc, walk, nbytes); in ecb_desall_crypt()
109 struct blkcipher_walk *walk) in cbc_desall_crypt() argument
112 int ret = blkcipher_walk_virt(desc, walk); in cbc_desall_crypt()
113 unsigned int nbytes = walk->nbytes; in cbc_desall_crypt()
122 memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); in cbc_desall_crypt()
[all …]
Daes_s390.c316 struct blkcipher_walk *walk) in ecb_aes_crypt() argument
318 int ret = blkcipher_walk_virt(desc, walk); in ecb_aes_crypt()
321 while ((nbytes = walk->nbytes)) { in ecb_aes_crypt()
324 u8 *out = walk->dst.virt.addr; in ecb_aes_crypt()
325 u8 *in = walk->src.virt.addr; in ecb_aes_crypt()
332 ret = blkcipher_walk_done(desc, walk, nbytes); in ecb_aes_crypt()
343 struct blkcipher_walk walk; in ecb_aes_encrypt() local
348 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_encrypt()
349 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); in ecb_aes_encrypt()
357 struct blkcipher_walk walk; in ecb_aes_decrypt() local
[all …]
/linux-4.1.27/arch/powerpc/crypto/
Daes-spe-glue.c183 struct blkcipher_walk walk; in ppc_ecb_encrypt() local
188 blkcipher_walk_init(&walk, dst, src, nbytes); in ppc_ecb_encrypt()
189 err = blkcipher_walk_virt(desc, &walk); in ppc_ecb_encrypt()
191 while ((nbytes = walk.nbytes)) { in ppc_ecb_encrypt()
197 ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, in ppc_ecb_encrypt()
201 err = blkcipher_walk_done(desc, &walk, ubytes); in ppc_ecb_encrypt()
211 struct blkcipher_walk walk; in ppc_ecb_decrypt() local
216 blkcipher_walk_init(&walk, dst, src, nbytes); in ppc_ecb_decrypt()
217 err = blkcipher_walk_virt(desc, &walk); in ppc_ecb_decrypt()
219 while ((nbytes = walk.nbytes)) { in ppc_ecb_decrypt()
[all …]
/linux-4.1.27/arch/sparc/crypto/
Daes_glue.c220 struct blkcipher_walk walk; in ecb_encrypt() local
223 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt()
224 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt()
228 while ((nbytes = walk.nbytes)) { in ecb_encrypt()
233 (const u64 *)walk.src.virt.addr, in ecb_encrypt()
234 (u64 *) walk.dst.virt.addr, in ecb_encrypt()
238 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_encrypt()
249 struct blkcipher_walk walk; in ecb_decrypt() local
253 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt()
254 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt()
[all …]
Ddes_glue.c98 struct blkcipher_walk walk; in __ecb_crypt() local
101 blkcipher_walk_init(&walk, dst, src, nbytes); in __ecb_crypt()
102 err = blkcipher_walk_virt(desc, &walk); in __ecb_crypt()
109 while ((nbytes = walk.nbytes)) { in __ecb_crypt()
113 des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr, in __ecb_crypt()
114 (u64 *) walk.dst.virt.addr, in __ecb_crypt()
118 err = blkcipher_walk_done(desc, &walk, nbytes); in __ecb_crypt()
146 struct blkcipher_walk walk; in cbc_encrypt() local
149 blkcipher_walk_init(&walk, dst, src, nbytes); in cbc_encrypt()
150 err = blkcipher_walk_virt(desc, &walk); in cbc_encrypt()
[all …]
Dcamellia_glue.c90 struct blkcipher_walk walk; in __ecb_crypt() local
99 blkcipher_walk_init(&walk, dst, src, nbytes); in __ecb_crypt()
100 err = blkcipher_walk_virt(desc, &walk); in __ecb_crypt()
108 while ((nbytes = walk.nbytes)) { in __ecb_crypt()
115 src64 = (const u64 *)walk.src.virt.addr; in __ecb_crypt()
116 dst64 = (u64 *) walk.dst.virt.addr; in __ecb_crypt()
120 err = blkcipher_walk_done(desc, &walk, nbytes); in __ecb_crypt()
153 struct blkcipher_walk walk; in cbc_encrypt() local
162 blkcipher_walk_init(&walk, dst, src, nbytes); in cbc_encrypt()
163 err = blkcipher_walk_virt(desc, &walk); in cbc_encrypt()
[all …]
/linux-4.1.27/drivers/crypto/vmx/
Daes_ctr.c91 struct blkcipher_walk *walk) in p8_aes_ctr_final() argument
93 u8 *ctrblk = walk->iv; in p8_aes_ctr_final()
95 u8 *src = walk->src.virt.addr; in p8_aes_ctr_final()
96 u8 *dst = walk->dst.virt.addr; in p8_aes_ctr_final()
97 unsigned int nbytes = walk->nbytes; in p8_aes_ctr_final()
115 struct blkcipher_walk walk; in p8_aes_ctr_crypt() local
127 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_ctr_crypt()
128 ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); in p8_aes_ctr_crypt()
129 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { in p8_aes_ctr_crypt()
133 aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, in p8_aes_ctr_crypt()
[all …]
Daes_cbc.c98 struct blkcipher_walk walk; in p8_aes_cbc_encrypt() local
114 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_cbc_encrypt()
115 ret = blkcipher_walk_virt(desc, &walk); in p8_aes_cbc_encrypt()
116 while ((nbytes = walk.nbytes)) { in p8_aes_cbc_encrypt()
117 aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, in p8_aes_cbc_encrypt()
118 nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); in p8_aes_cbc_encrypt()
120 ret = blkcipher_walk_done(desc, &walk, nbytes); in p8_aes_cbc_encrypt()
134 struct blkcipher_walk walk; in p8_aes_cbc_decrypt() local
150 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_cbc_decrypt()
151 ret = blkcipher_walk_virt(desc, &walk); in p8_aes_cbc_decrypt()
[all …]
/linux-4.1.27/drivers/atm/
Didt77105.c85 struct idt77105_priv *walk; in idt77105_stats_timer_func() local
90 for (walk = idt77105_all; walk; walk = walk->next) { in idt77105_stats_timer_func()
91 dev = walk->dev; in idt77105_stats_timer_func()
93 stats = &walk->stats; in idt77105_stats_timer_func()
114 struct idt77105_priv *walk; in idt77105_restart_timer_func() local
119 for (walk = idt77105_all; walk; walk = walk->next) { in idt77105_restart_timer_func()
120 dev = walk->dev; in idt77105_restart_timer_func()
134 PUT( walk->old_mcr ,MCR); in idt77105_restart_timer_func()
326 struct idt77105_priv *walk, *prev; in idt77105_stop() local
334 for (prev = NULL, walk = idt77105_all ; in idt77105_stop()
[all …]
Dsuni.c58 struct suni_priv *walk; in suni_hz() local
62 for (walk = sunis; walk; walk = walk->next) { in suni_hz()
63 dev = walk->dev; in suni_hz()
64 stats = &walk->sonet_stats; in suni_hz()
344 struct suni_priv **walk; in suni_stop() local
349 for (walk = &sunis; *walk != PRIV(dev); in suni_stop()
350 walk = &PRIV((*walk)->dev)->next); in suni_stop()
351 *walk = PRIV((*walk)->dev)->next; in suni_stop()
/linux-4.1.27/drivers/crypto/
Dpadlock-aes.c347 struct blkcipher_walk walk; in ecb_aes_encrypt() local
353 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_encrypt()
354 err = blkcipher_walk_virt(desc, &walk); in ecb_aes_encrypt()
357 while ((nbytes = walk.nbytes)) { in ecb_aes_encrypt()
358 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, in ecb_aes_encrypt()
362 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_aes_encrypt()
376 struct blkcipher_walk walk; in ecb_aes_decrypt() local
382 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_decrypt()
383 err = blkcipher_walk_virt(desc, &walk); in ecb_aes_decrypt()
386 while ((nbytes = walk.nbytes)) { in ecb_aes_decrypt()
[all …]
Dgeode-aes.c309 struct blkcipher_walk walk; in geode_cbc_decrypt() local
315 blkcipher_walk_init(&walk, dst, src, nbytes); in geode_cbc_decrypt()
316 err = blkcipher_walk_virt(desc, &walk); in geode_cbc_decrypt()
317 op->iv = walk.iv; in geode_cbc_decrypt()
319 while ((nbytes = walk.nbytes)) { in geode_cbc_decrypt()
320 op->src = walk.src.virt.addr, in geode_cbc_decrypt()
321 op->dst = walk.dst.virt.addr; in geode_cbc_decrypt()
329 err = blkcipher_walk_done(desc, &walk, nbytes); in geode_cbc_decrypt()
341 struct blkcipher_walk walk; in geode_cbc_encrypt() local
347 blkcipher_walk_init(&walk, dst, src, nbytes); in geode_cbc_encrypt()
[all …]
Dn2_core.c513 struct crypto_hash_walk walk; in n2_do_async_digest() local
536 nbytes = crypto_hash_walk_first(req, &walk); in n2_do_async_digest()
555 ent->src_addr = __pa(walk.data); in n2_do_async_digest()
563 nbytes = crypto_hash_walk_done(&walk, 0); in n2_do_async_digest()
568 ent->src_addr = __pa(walk.data); in n2_do_async_digest()
576 nbytes = crypto_hash_walk_done(&walk, 0); in n2_do_async_digest()
672 struct ablkcipher_walk walk; member
711 struct ablkcipher_walk walk; member
876 struct ablkcipher_walk *walk = &rctx->walk; in n2_compute_chunks() local
883 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); in n2_compute_chunks()
[all …]
Dhifn_795x.c660 struct hifn_cipher_walk walk; member
1392 t = &rctx->walk.cache[0]; in hifn_setup_dma()
1395 if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_setup_dma()
1588 rctx->walk.flags = 0; in hifn_setup_session()
1596 rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; in hifn_setup_session()
1602 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_setup_session()
1603 err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); in hifn_setup_session()
1608 sg_num = hifn_cipher_walk(req, &rctx->walk); in hifn_setup_session()
1668 rctx.walk.cache[0].length = 0; in hifn_test()
1774 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_process_ready()
[all …]
Domap-des.c389 struct scatter_walk walk; in sg_copy_buf() local
394 scatterwalk_start(&walk, sg); in sg_copy_buf()
395 scatterwalk_advance(&walk, start); in sg_copy_buf()
396 scatterwalk_copychunks(buf, &walk, nbytes, out); in sg_copy_buf()
397 scatterwalk_done(&walk, out, 0); in sg_copy_buf()
Domap-aes.c405 struct scatter_walk walk; in sg_copy_buf() local
410 scatterwalk_start(&walk, sg); in sg_copy_buf()
411 scatterwalk_advance(&walk, start); in sg_copy_buf()
412 scatterwalk_copychunks(buf, &walk, nbytes, out); in sg_copy_buf()
413 scatterwalk_done(&walk, out, 0); in sg_copy_buf()
/linux-4.1.27/include/crypto/internal/
Dhash.h55 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
57 struct crypto_hash_walk *walk);
59 struct crypto_hash_walk *walk);
61 struct crypto_hash_walk *walk,
64 static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, in crypto_ahash_walk_done() argument
67 return crypto_hash_walk_done(walk, err); in crypto_ahash_walk_done()
70 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) in crypto_hash_walk_last() argument
72 return !(walk->entrylen | walk->total); in crypto_hash_walk_last()
75 static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) in crypto_ahash_walk_last() argument
77 return crypto_hash_walk_last(walk); in crypto_ahash_walk_last()
/linux-4.1.27/Documentation/filesystems/
Dpath-lookup.txt5 performing a path walk. Typically, for every open(), stat() etc., the path name
17 thus in every component during path look-up. Since 2.5.10 onwards, fast-walk
30 are path-walk intensive tend to do path lookups starting from a common dentry
34 Since 2.6.38, RCU is used to make a significant part of the entire path walk
36 even stores into cachelines of common dentries). This is known as "rcu-walk"
56 permissions on the parent inode to be able to walk into it.
67 - find the start point of the walk;
92 point to perform the next step of our path walk against.
162 still at 2. Now when it follows 2's 'next' pointer, it will walk off into
180 start the next part of the path walk from).
[all …]
Dporting354 via rcu-walk path walk (basically, if the file can have had a path name in the
364 vfs now tries to do path walking in "rcu-walk mode", which avoids
368 filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so
370 the benefits of rcu-walk mode. We will begin to add filesystem callbacks that
371 are rcu-walk aware, shown below. Filesystems should take advantage of this
377 the filesystem provides it), which requires dropping out of rcu-walk mode. This
378 may now be called in rcu-walk mode (nd->flags & LOOKUP_RCU). -ECHILD should be
379 returned if the filesystem cannot handle rcu-walk. See
383 on many or all directory inodes on the way down a path walk (to check for
384 exec permission). These must now be rcu-walk aware (flags & IPERM_FLAG_RCU).
Dvfs.txt446 (i.e. page that was installed when the symbolic link walk
448 walk).
453 May be called in rcu-walk mode (mask & MAY_NOT_BLOCK). If in rcu-walk
457 If a situation is encountered that rcu-walk cannot handle, return
458 -ECHILD and it will be called again in ref-walk mode.
947 d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU).
948 If in rcu-walk mode, the filesystem must revalidate the dentry without
953 If a situation is encountered that rcu-walk cannot handle, return
954 -ECHILD and it will be called again in ref-walk mode.
957 This is called when a path-walk ends at dentry that was not acquired by
[all …]
DLocking25 rename_lock ->d_lock may block rcu-walk
26 d_revalidate: no no yes (ref-walk) maybe
36 d_manage: no no yes (ref-walk) maybe
89 permission: no (may not block if called in rcu-walk mode)
Dautofs4-mount-control.txt23 needs to walk back up the mount tree to construct a path, such as
65 trigger. So when we walk on the path we mount shark:/autofs/export1 "on
Dxfs-self-describing-metadata.txt105 object, we don't know what inode it belongs to and hence have to walk the entire
Dxfs-delayed-logging-design.txt402 it. The fact that we walk the log items (in the CIL) just to chain the log
668 sequencing also requires the same lock, list walk, and blocking mechanism to
/linux-4.1.27/fs/
Dselect.c801 struct poll_list *walk; in do_poll() local
804 for (walk = list; walk != NULL; walk = walk->next) { in do_poll()
807 pfd = walk->entries; in do_poll()
808 pfd_end = pfd + walk->len; in do_poll()
880 struct poll_list *walk = head; in do_sys_poll() local
888 walk->next = NULL; in do_sys_poll()
889 walk->len = len; in do_sys_poll()
893 if (copy_from_user(walk->entries, ufds + nfds-todo, in do_sys_poll()
894 sizeof(struct pollfd) * walk->len)) in do_sys_poll()
897 todo -= walk->len; in do_sys_poll()
[all …]
/linux-4.1.27/security/
Ddevice_cgroup.c96 struct dev_exception_item *excopy, *walk; in dev_exception_add() local
104 list_for_each_entry(walk, &dev_cgroup->exceptions, list) { in dev_exception_add()
105 if (walk->type != ex->type) in dev_exception_add()
107 if (walk->major != ex->major) in dev_exception_add()
109 if (walk->minor != ex->minor) in dev_exception_add()
112 walk->access |= ex->access; in dev_exception_add()
128 struct dev_exception_item *walk, *tmp; in dev_exception_rm() local
132 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) { in dev_exception_rm()
133 if (walk->type != ex->type) in dev_exception_rm()
135 if (walk->major != ex->major) in dev_exception_rm()
[all …]
/linux-4.1.27/fs/proc/
Dtask_mmu.c484 struct mm_walk *walk) in smaps_pte_entry() argument
486 struct mem_size_stats *mss = walk->private; in smaps_pte_entry()
487 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry()
508 struct mm_walk *walk) in smaps_pmd_entry() argument
510 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry()
511 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry()
524 struct mm_walk *walk) in smaps_pmd_entry() argument
530 struct mm_walk *walk) in smaps_pte_range() argument
532 struct vm_area_struct *vma = walk->vma; in smaps_pte_range()
537 smaps_pmd_entry(pmd, addr, walk); in smaps_pte_range()
[all …]
/linux-4.1.27/arch/openrisc/kernel/
Ddma.c33 unsigned long next, struct mm_walk *walk) in page_set_nocache() argument
54 unsigned long next, struct mm_walk *walk) in page_clear_nocache() argument
90 struct mm_walk walk = { in or1k_dma_alloc() local
109 if (walk_page_range(va, va + size, &walk)) { in or1k_dma_alloc()
123 struct mm_walk walk = { in or1k_dma_free() local
130 WARN_ON(walk_page_range(va, va + size, &walk)); in or1k_dma_free()
/linux-4.1.27/fs/fat/
Dnamei_msdos.c26 unsigned char *walk; in msdos_format_name() local
43 for (walk = res; len && walk - res < 8; walk++) { in msdos_format_name()
62 if ((res == walk) && (c == 0xE5)) in msdos_format_name()
67 *walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c; in msdos_format_name()
80 while (walk - res < 8) in msdos_format_name()
81 *walk++ = ' '; in msdos_format_name()
82 while (len > 0 && walk - res < MSDOS_NAME) { in msdos_format_name()
101 *walk++ = c - 32; in msdos_format_name()
103 *walk++ = c; in msdos_format_name()
110 while (walk - res < MSDOS_NAME) in msdos_format_name()
[all …]
Dinode.c426 unsigned char exe_extensions[] = "EXECOMBAT", *walk; in is_exec() local
428 for (walk = exe_extensions; *walk; walk += 3) in is_exec()
429 if (!strncmp(extension, walk, 3)) in is_exec()
/linux-4.1.27/kernel/locking/
Drtmutex.h33 enum rtmutex_chainwalk walk) in debug_rt_mutex_detect_deadlock() argument
35 return walk == RT_MUTEX_FULL_CHAINWALK; in debug_rt_mutex_detect_deadlock()
Drtmutex-debug.h31 enum rtmutex_chainwalk walk) in debug_rt_mutex_detect_deadlock() argument
/linux-4.1.27/drivers/crypto/nx/
Dnx.c167 struct scatter_walk walk; in nx_walk_and_build() local
174 scatterwalk_start(&walk, sg_src); in nx_walk_and_build()
185 scatterwalk_advance(&walk, start - offset); in nx_walk_and_build()
188 n = scatterwalk_clamp(&walk, len); in nx_walk_and_build()
192 scatterwalk_start(&walk, sg_next(walk.sg)); in nx_walk_and_build()
193 n = scatterwalk_clamp(&walk, len); in nx_walk_and_build()
195 dst = scatterwalk_map(&walk); in nx_walk_and_build()
201 scatterwalk_advance(&walk, n); in nx_walk_and_build()
202 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); in nx_walk_and_build()
Dnx-aes-gcm.c130 struct scatter_walk walk; in nx_gca() local
137 scatterwalk_start(&walk, req->assoc); in nx_gca()
138 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); in nx_gca()
139 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); in nx_gca()
/linux-4.1.27/net/xfrm/
Dxfrm_policy.c186 if (unlikely(xp->walk.dead)) in xfrm_policy_timer()
250 if (unlikely(pol->walk.dead)) in xfrm_policy_flo_get()
262 return !pol->walk.dead; in xfrm_policy_flo_check()
288 INIT_LIST_HEAD(&policy->walk.all); in xfrm_policy_alloc()
308 BUG_ON(!policy->walk.dead); in xfrm_policy_destroy()
332 policy->walk.dead = 1; in xfrm_policy_kill()
627 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { in xfrm_hash_rebuild()
996 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, in xfrm_policy_walk() argument
1004 if (walk->type >= XFRM_POLICY_TYPE_MAX && in xfrm_policy_walk()
1005 walk->type != XFRM_POLICY_TYPE_ANY) in xfrm_policy_walk()
[all …]
Dxfrm_state.c1614 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, in xfrm_state_walk() argument
1622 if (walk->seq != 0 && list_empty(&walk->all)) in xfrm_state_walk()
1626 if (list_empty(&walk->all)) in xfrm_state_walk()
1629 x = list_entry(&walk->all, struct xfrm_state_walk, all); in xfrm_state_walk()
1634 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) in xfrm_state_walk()
1636 if (!__xfrm_state_filter_match(state, walk->filter)) in xfrm_state_walk()
1638 err = func(state, walk->seq, data); in xfrm_state_walk()
1640 list_move_tail(&walk->all, &x->all); in xfrm_state_walk()
1643 walk->seq++; in xfrm_state_walk()
1645 if (walk->seq == 0) { in xfrm_state_walk()
[all …]
Dxfrm_user.c869 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; in xfrm_dump_sa_done() local
873 xfrm_state_walk_done(walk, net); in xfrm_dump_sa_done()
881 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; in xfrm_dump_sa() local
917 xfrm_state_walk_init(walk, proto, filter); in xfrm_dump_sa()
920 (void) xfrm_state_walk(net, walk, dump_one_state, &info); in xfrm_dump_sa()
1473 xp->walk.dead = 1; in xfrm_policy_construct()
1625 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; in xfrm_dump_policy_done() local
1628 xfrm_policy_walk_done(walk, net); in xfrm_dump_policy_done()
1635 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; in xfrm_dump_policy() local
1648 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); in xfrm_dump_policy()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/
Dinterval_tree.c217 struct interval_node *walk = root; in interval_find() local
220 while (walk) { in interval_find()
221 rc = extent_compare(ex, &walk->in_extent); in interval_find()
225 walk = walk->in_left; in interval_find()
227 walk = walk->in_right; in interval_find()
230 return walk; in interval_find()
/linux-4.1.27/arch/x86/crypto/sha-mb/
Dsha1_mb.c388 nbytes = crypto_ahash_walk_done(&rctx->walk, 0); in sha_finish_walk()
394 if (crypto_ahash_walk_last(&rctx->walk)) { in sha_finish_walk()
402 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); in sha_finish_walk()
513 nbytes = crypto_ahash_walk_first(req, &rctx->walk); in sha1_mb_update()
520 if (crypto_ahash_walk_last(&rctx->walk)) in sha1_mb_update()
527 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); in sha1_mb_update()
571 nbytes = crypto_ahash_walk_first(req, &rctx->walk); in sha1_mb_finup()
578 if (crypto_ahash_walk_last(&rctx->walk)) { in sha1_mb_finup()
590 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); in sha1_mb_finup()
/linux-4.1.27/net/sched/
Dcls_tcindex.c141 struct tcindex_filter __rcu **walk; in tcindex_delete() local
152 walk = p->h + i; in tcindex_delete()
153 for (f = rtnl_dereference(*walk); f; in tcindex_delete()
154 walk = &f->next, f = rtnl_dereference(*walk)) { in tcindex_delete()
162 rcu_assign_pointer(*walk, rtnl_dereference(f->next)); in tcindex_delete()
561 .walk = tcindex_walk,
Dsch_ingress.c124 .walk = ingress_walk,
Dact_api.c297 if (!act->walk) in tcf_register_action()
298 act->walk = tcf_generic_walker; in tcf_register_action()
804 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); in tca_action_flush()
1063 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); in tc_dump_action()
Dcls_cgroup.c216 .walk = cls_cgroup_walk,
Dcls_api.c486 if (tp->ops->walk == NULL) in tc_dump_tfilter()
494 tp->ops->walk(tp, &arg.w); in tc_dump_tfilter()
Dsch_mq.c232 .walk = mq_walk,
Dcls_basic.c294 .walk = basic_walk,
Dsch_prio.c370 .walk = prio_walk,
Dsch_red.c357 .walk = red_walk,
Dsch_multiq.c406 .walk = multiq_walk,
Dact_police.c356 .walk = tcf_act_police_walker
Dcls_bpf.c480 .walk = cls_bpf_walk,
Dcls_fw.c421 .walk = fw_walk,
Dsch_mqprio.c399 .walk = mqprio_walk,
Dsch_dsmark.c477 .walk = dsmark_walk,
Dsch_tbf.c547 .walk = tbf_walk,
Dsch_api.c164 if (!(cops->get && cops->put && cops->walk && cops->leaf)) in register_qdisc()
1084 q->ops->cl_ops->walk(q, &arg.w); in check_loop()
1753 q->ops->cl_ops->walk(q, &arg.w); in tc_dump_tclass_qdisc()
Dsch_drr.c500 .walk = drr_walk,
Dsch_choke.c605 .walk = choke_walk,
Dsch_fq_codel.c591 .walk = fq_codel_walk,
Dcls_route.c657 .walk = route4_walk,
Dsch_atm.c659 .walk = atm_tc_walk,
Dcls_flow.c676 .walk = flow_walk,
Dsch_sfb.c689 .walk = sfb_walk,
Dcls_rsvp.h716 .walk = rsvp_walk,
Dsch_sfq.c910 .walk = sfq_walk,
Dsch_netem.c1084 .walk = netem_walk,
Dcls_u32.c1061 .walk = u32_walk,
Dsch_qfq.c1558 .walk = qfq_walk,
Dsch_htb.c1596 .walk = htb_walk,
Dsch_hfsc.c1721 .walk = hfsc_walk
Dsch_cbq.c2026 .walk = cbq_walk,
/linux-4.1.27/drivers/vfio/pci/
Dvfio_pci.c402 struct vfio_pci_walk_info *walk = data; in vfio_pci_walk_wrapper() local
404 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot)) in vfio_pci_walk_wrapper()
405 walk->ret = walk->fn(pdev, walk->data); in vfio_pci_walk_wrapper()
407 return walk->ret; in vfio_pci_walk_wrapper()
415 struct vfio_pci_walk_info walk = { in vfio_pci_for_each_slot_or_bus() local
419 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk); in vfio_pci_for_each_slot_or_bus()
421 return walk.ret; in vfio_pci_for_each_slot_or_bus()
/linux-4.1.27/ipc/
Dsem.c790 struct list_head *walk; in wake_const_ops() local
799 walk = pending_list->next; in wake_const_ops()
800 while (walk != pending_list) { in wake_const_ops()
803 q = container_of(walk, struct sem_queue, list); in wake_const_ops()
804 walk = walk->next; in wake_const_ops()
891 struct list_head *walk; in update_queue() local
901 walk = pending_list->next; in update_queue()
902 while (walk != pending_list) { in update_queue()
905 q = container_of(walk, struct sem_queue, list); in update_queue()
906 walk = walk->next; in update_queue()
Dmqueue.c544 struct ext_wait_queue *walk; in wq_add() local
548 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { in wq_add()
549 if (walk->task->static_prio <= current->static_prio) { in wq_add()
550 list_add_tail(&ewp->list, &walk->list); in wq_add()
/linux-4.1.27/net/atm/
Dclip.c88 struct clip_vcc **walk; in unlink_clip_vcc() local
96 for (walk = &entry->vccs; *walk; walk = &(*walk)->next) in unlink_clip_vcc()
97 if (*walk == clip_vcc) { in unlink_clip_vcc()
100 *walk = clip_vcc->next; /* atomic */ in unlink_clip_vcc()
Dcommon.c321 struct atm_vcc *walk; in check_ci() local
324 walk = atm_sk(s); in check_ci()
325 if (walk->dev != vcc->dev) in check_ci()
327 if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi && in check_ci()
328 walk->vci == vci && ((walk->qos.txtp.traffic_class != in check_ci()
330 (walk->qos.rxtp.traffic_class != ATM_NONE && in check_ci()
/linux-4.1.27/net/l2tp/
Dl2tp_debugfs.c107 struct hlist_node *walk; in l2tp_dfs_seq_tunnel_show() local
112 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { in l2tp_dfs_seq_tunnel_show()
115 session = hlist_entry(walk, struct l2tp_session, hlist); in l2tp_dfs_seq_tunnel_show()
Dl2tp_core.c1241 struct hlist_node *walk; in l2tp_tunnel_closeall() local
1253 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { in l2tp_tunnel_closeall()
1254 session = hlist_entry(walk, struct l2tp_session, hlist); in l2tp_tunnel_closeall()
/linux-4.1.27/arch/s390/mm/
Dpgtable.c1230 unsigned long next, struct mm_walk *walk) in __s390_enable_skey() argument
1242 ptep_flush_direct(walk->mm, addr, pte); in __s390_enable_skey()
1257 struct mm_walk walk = { .pte_entry = __s390_enable_skey }; in s390_enable_skey() local
1277 walk.mm = mm; in s390_enable_skey()
1278 walk_page_range(0, TASK_SIZE, &walk); in s390_enable_skey()
1290 unsigned long next, struct mm_walk *walk) in __s390_reset_cmma() argument
1302 struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; in s390_reset_cmma() local
1305 walk.mm = mm; in s390_reset_cmma()
1306 walk_page_range(0, TASK_SIZE, &walk); in s390_reset_cmma()
/linux-4.1.27/drivers/crypto/ux500/cryp/
Dcryp_core.c883 struct ablkcipher_walk walk; in ablk_crypt() local
898 ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); in ablk_crypt()
899 ret = ablkcipher_walk_phys(areq, &walk); in ablk_crypt()
907 while ((nbytes = walk.nbytes) > 0) { in ablk_crypt()
908 ctx->iv = walk.iv; in ablk_crypt()
909 src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); in ablk_crypt()
912 dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); in ablk_crypt()
922 ret = ablkcipher_walk_done(areq, &walk, nbytes); in ablk_crypt()
926 ablkcipher_walk_complete(&walk); in ablk_crypt()
/linux-4.1.27/arch/powerpc/mm/
Dsubpage-prot.c135 unsigned long end, struct mm_walk *walk) in subpage_walk_pmd_entry() argument
137 struct vm_area_struct *vma = walk->vma; in subpage_walk_pmd_entry()
/linux-4.1.27/tools/testing/selftests/net/
Dpsock_tpacket.c83 void (*walk)(int sock, struct ring *ring); member
600 ring->walk = walk_v1_v2; in __v1_v2_fill()
620 ring->walk = walk_v3; in __v3_fill()
704 ring->walk(sock, ring); in walk_ring()
/linux-4.1.27/include/net/
Dxfrm.h511 struct xfrm_policy_walk_entry walk; member
540 struct xfrm_policy_walk_entry walk; member
1425 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1427 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1429 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1576 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1577 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1580 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
Dact_api.h97 int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); member
Dsch_generic.h167 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); member
227 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); member
/linux-4.1.27/include/linux/
Dmm.h1134 unsigned long next, struct mm_walk *walk);
1136 unsigned long next, struct mm_walk *walk);
1138 struct mm_walk *walk);
1141 struct mm_walk *walk);
1143 struct mm_walk *walk);
1150 struct mm_walk *walk);
1151 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
/linux-4.1.27/scripts/gdb/linux/
Dsymbols.py79 for root, dirs, files in os.walk(path):
/linux-4.1.27/Documentation/networking/
Dfib_trie.txt76 it is run to optimize and reorganize. It will walk the trie upwards
103 slower than the corresponding fib_hash function, as we have to walk the
Dtcp.txt102 On a timer we walk the retransmit list to send any retransmits, update the
Dpacket_mmap.txt1038 frames to be updated resp. the frame handed over to the application, iv) walk
Dfilter.txt422 will walk through the pcap file continuing from the current packet and
/linux-4.1.27/fs/jffs2/
DREADME.Locking88 erase_completion_lock. So you can walk the list only while holding the
89 erase_completion_lock, and can drop the lock temporarily mid-walk as
/linux-4.1.27/net/netfilter/
Dnft_rbtree.c260 .walk = nft_rbtree_walk,
Dnft_hash.c374 .walk = nft_hash_walk,
Dnf_tables_api.c2879 set->ops->walk(ctx, set, &iter); in nf_tables_bind_set()
3120 set->ops->walk(&ctx, set, &args.iter); in nf_tables_dump_set()
4174 set->ops->walk(ctx, set, &iter); in nf_tables_check_loops()
/linux-4.1.27/drivers/crypto/ux500/hash/
Dhash_core.c1087 struct crypto_hash_walk walk; in hash_hw_update() local
1088 int msg_length = crypto_hash_walk_first(req, &walk); in hash_hw_update()
1111 data_buffer = walk.data; in hash_hw_update()
1121 msg_length = crypto_hash_walk_done(&walk, 0); in hash_hw_update()
/linux-4.1.27/arch/ia64/kernel/
Defi.c305 walk (efi_freemem_callback_t callback, void *arg, u64 attr) in walk() function
329 walk(callback, arg, EFI_MEMORY_WB); in efi_memmap_walk()
339 walk(callback, arg, EFI_MEMORY_UC); in efi_memmap_walk_uc()
/linux-4.1.27/Documentation/
Drobust-futex-ABI.txt87 the kernel will walk this list, mark any such locks with a bit
118 list 'head' is, and to walk the list on thread exit, handling locks
DBUG-HUNTING122 And then walk through that file, one routine at a time and
Dclk.txt116 Let's walk through enabling this clk from driver code:
Dsysfs-rules.txt160 by its subsystem value. You need to walk up the chain until you find
/linux-4.1.27/Documentation/virtual/kvm/
Dmmu.txt285 - walk shadow page table
293 - if needed, walk the guest page tables to determine the guest translation
299 - walk the shadow page table to find the spte for the translation,
311 - walk the shadow page hierarchy and drop affected translations
/linux-4.1.27/drivers/net/ethernet/sun/
Dsungem.c663 int walk = entry; in gem_tx() local
668 walk = NEXT_TX(walk); in gem_tx()
669 if (walk == limit) in gem_tx()
671 if (walk == last) in gem_tx()
/linux-4.1.27/Documentation/cgroups/
Dfreezer-subsystem.txt13 walk /proc or invoke a kernel interface to gather information about the
/linux-4.1.27/Documentation/block/
Dbiovecs.txt53 it had to walk two different bios at the same time, keeping both bi_idx and
/linux-4.1.27/Documentation/locking/
Drt-mutex-design.txt401 High level overview of the PI chain walk
404 The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain.
449 walk is only needed when a new top pi waiter is made to a task.
555 Taking of a mutex (The walk through)
558 OK, now let's take a look at the detailed walk through of what happens when
/linux-4.1.27/net/key/
Daf_key.c1867 struct xfrm_policy_walk walk; in gen_reqid() local
1877 xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); in gen_reqid()
1878 rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); in gen_reqid()
1879 xfrm_policy_walk_done(&walk, net); in gen_reqid()
2298 xp->walk.dead = 1; in pfkey_spdadd()
3270 xp->walk.dead = 1; in pfkey_compile_policy()
/linux-4.1.27/include/net/netfilter/
Dnf_tables.h259 void (*walk)(const struct nft_ctx *ctx, member
/linux-4.1.27/Documentation/timers/
Dhrtimers.txt97 queued timers, without having to walk the rbtree.
/linux-4.1.27/Documentation/vm/
Dunevictable-lru.txt507 for the munlock case, calls __munlock_vma_pages_range() to walk the page table
621 processing. Again, these functions walk the respective reverse maps looking
639 Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's
Dtranshuge.txt347 pagetable walk). If the second pmd_trans_huge returns false, you
/linux-4.1.27/fs/btrfs/
Draid56.c664 int walk = 0; in lock_stripe_add() local
668 walk++; in lock_stripe_add()
/linux-4.1.27/scripts/
Danalyze_suspend.py2833 for dirname, dirnames, filenames in os.walk('/sys/devices'):
2903 for dirname, dirnames, filenames in os.walk('/sys/devices'):
3330 for dirname, dirnames, filenames in os.walk(subdir):
/linux-4.1.27/net/ipv4/
Dtcp_input.c1790 goto walk; in tcp_sacktag_write_queue()
1807 walk: in tcp_sacktag_write_queue()
/linux-4.1.27/drivers/scsi/aic7xxx/
Daic79xx.seq556 * manually walk the list counting MAXCMDCNT elements
722 * Brute force walk.
/linux-4.1.27/arch/arm/
DKconfig1161 r3p*) erratum. A speculative memory access may cause a page table walk
/linux-4.1.27/Documentation/scsi/
DChangeLog.1992-1997829 * scsi.c: When incrementing usage count, walk block linked list
/linux-4.1.27/Documentation/virtual/uml/
DUserModeLinux-HOWTO.txt2757 is at module_list. If it's not, walk down the next links, looking at