/linux-4.1.27/crypto/ |
D | blkcipher.c | 40 struct blkcipher_walk *walk); 42 struct blkcipher_walk *walk); 44 static inline void blkcipher_map_src(struct blkcipher_walk *walk) in blkcipher_map_src() argument 46 walk->src.virt.addr = scatterwalk_map(&walk->in); in blkcipher_map_src() 49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk) in blkcipher_map_dst() argument 51 walk->dst.virt.addr = scatterwalk_map(&walk->out); in blkcipher_map_dst() 54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) in blkcipher_unmap_src() argument 56 scatterwalk_unmap(walk->src.virt.addr); in blkcipher_unmap_src() 59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) in blkcipher_unmap_dst() argument 61 scatterwalk_unmap(walk->dst.virt.addr); in blkcipher_unmap_dst() [all …]
|
D | scatterwalk.c | 33 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) in scatterwalk_start() argument 35 walk->sg = sg; in scatterwalk_start() 39 walk->offset = sg->offset; in scatterwalk_start() 43 void *scatterwalk_map(struct scatter_walk *walk) in scatterwalk_map() argument 45 return kmap_atomic(scatterwalk_page(walk)) + in scatterwalk_map() 46 offset_in_page(walk->offset); in scatterwalk_map() 50 static void scatterwalk_pagedone(struct scatter_walk *walk, int out, in scatterwalk_pagedone() argument 56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); in scatterwalk_pagedone() 62 walk->offset += PAGE_SIZE - 1; in scatterwalk_pagedone() 63 walk->offset &= PAGE_MASK; in scatterwalk_pagedone() [all …]
|
D | ahash.c | 43 static int hash_walk_next(struct crypto_hash_walk *walk) in hash_walk_next() argument 45 unsigned int alignmask = walk->alignmask; in hash_walk_next() 46 unsigned int offset = walk->offset; in hash_walk_next() 47 unsigned int nbytes = min(walk->entrylen, in hash_walk_next() 50 if (walk->flags & CRYPTO_ALG_ASYNC) in hash_walk_next() 51 walk->data = kmap(walk->pg); in hash_walk_next() 53 walk->data = kmap_atomic(walk->pg); in hash_walk_next() 54 walk->data += offset; in hash_walk_next() 63 walk->entrylen -= nbytes; in hash_walk_next() 67 static int hash_walk_new_entry(struct crypto_hash_walk *walk) in hash_walk_new_entry() argument [all …]
|
D | ablkcipher.c | 47 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) in __ablkcipher_walk_complete() argument 51 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { in __ablkcipher_walk_complete() 59 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, in ablkcipher_queue_write() argument 62 p->dst = walk->out; in ablkcipher_queue_write() 63 list_add_tail(&p->entry, &walk->buffers); in ablkcipher_queue_write() 76 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, in ablkcipher_done_slow() argument 82 unsigned int len_this_page = scatterwalk_pagelen(&walk->out); in ablkcipher_done_slow() 86 scatterwalk_advance(&walk->out, n); in ablkcipher_done_slow() 90 scatterwalk_start(&walk->out, sg_next(walk->out.sg)); in ablkcipher_done_slow() 96 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, in ablkcipher_done_fast() argument [all …]
|
D | cbc.c | 43 struct blkcipher_walk *walk, in crypto_cbc_encrypt_segment() argument 49 unsigned int nbytes = walk->nbytes; in crypto_cbc_encrypt_segment() 50 u8 *src = walk->src.virt.addr; in crypto_cbc_encrypt_segment() 51 u8 *dst = walk->dst.virt.addr; in crypto_cbc_encrypt_segment() 52 u8 *iv = walk->iv; in crypto_cbc_encrypt_segment() 67 struct blkcipher_walk *walk, in crypto_cbc_encrypt_inplace() argument 73 unsigned int nbytes = walk->nbytes; in crypto_cbc_encrypt_inplace() 74 u8 *src = walk->src.virt.addr; in crypto_cbc_encrypt_inplace() 75 u8 *iv = walk->iv; in crypto_cbc_encrypt_inplace() 85 memcpy(walk->iv, iv, bsize); in crypto_cbc_encrypt_inplace() [all …]
|
D | pcbc.c | 46 struct blkcipher_walk *walk, in crypto_pcbc_encrypt_segment() argument 52 unsigned int nbytes = walk->nbytes; in crypto_pcbc_encrypt_segment() 53 u8 *src = walk->src.virt.addr; in crypto_pcbc_encrypt_segment() 54 u8 *dst = walk->dst.virt.addr; in crypto_pcbc_encrypt_segment() 55 u8 *iv = walk->iv; in crypto_pcbc_encrypt_segment() 71 struct blkcipher_walk *walk, in crypto_pcbc_encrypt_inplace() argument 77 unsigned int nbytes = walk->nbytes; in crypto_pcbc_encrypt_inplace() 78 u8 *src = walk->src.virt.addr; in crypto_pcbc_encrypt_inplace() 79 u8 *iv = walk->iv; in crypto_pcbc_encrypt_inplace() 92 memcpy(walk->iv, iv, bsize); in crypto_pcbc_encrypt_inplace() [all …]
|
D | salsa20_generic.c | 181 struct blkcipher_walk walk; in encrypt() local 186 blkcipher_walk_init(&walk, dst, src, nbytes); in encrypt() 187 err = blkcipher_walk_virt_block(desc, &walk, 64); in encrypt() 189 salsa20_ivsetup(ctx, walk.iv); in encrypt() 191 if (likely(walk.nbytes == nbytes)) in encrypt() 193 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, in encrypt() 194 walk.src.virt.addr, nbytes); in encrypt() 195 return blkcipher_walk_done(desc, &walk, 0); in encrypt() 198 while (walk.nbytes >= 64) { in encrypt() 199 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, in encrypt() [all …]
|
D | ctr.c | 55 static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, in crypto_ctr_crypt_final() argument 60 u8 *ctrblk = walk->iv; in crypto_ctr_crypt_final() 63 u8 *src = walk->src.virt.addr; in crypto_ctr_crypt_final() 64 u8 *dst = walk->dst.virt.addr; in crypto_ctr_crypt_final() 65 unsigned int nbytes = walk->nbytes; in crypto_ctr_crypt_final() 74 static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, in crypto_ctr_crypt_segment() argument 80 u8 *ctrblk = walk->iv; in crypto_ctr_crypt_segment() 81 u8 *src = walk->src.virt.addr; in crypto_ctr_crypt_segment() 82 u8 *dst = walk->dst.virt.addr; in crypto_ctr_crypt_segment() 83 unsigned int nbytes = walk->nbytes; in crypto_ctr_crypt_segment() [all …]
|
D | ecb.c | 42 struct blkcipher_walk *walk, in crypto_ecb_crypt() argument 50 err = blkcipher_walk_virt(desc, walk); in crypto_ecb_crypt() 52 while ((nbytes = walk->nbytes)) { in crypto_ecb_crypt() 53 u8 *wsrc = walk->src.virt.addr; in crypto_ecb_crypt() 54 u8 *wdst = walk->dst.virt.addr; in crypto_ecb_crypt() 63 err = blkcipher_walk_done(desc, walk, nbytes); in crypto_ecb_crypt() 73 struct blkcipher_walk walk; in crypto_ecb_encrypt() local 78 blkcipher_walk_init(&walk, dst, src, nbytes); in crypto_ecb_encrypt() 79 return crypto_ecb_crypt(desc, &walk, child, in crypto_ecb_encrypt() 87 struct blkcipher_walk walk; in crypto_ecb_decrypt() local [all …]
|
D | crypto_null.c | 77 struct blkcipher_walk walk; in skcipher_null_crypt() local 80 blkcipher_walk_init(&walk, dst, src, nbytes); in skcipher_null_crypt() 81 err = blkcipher_walk_virt(desc, &walk); in skcipher_null_crypt() 83 while (walk.nbytes) { in skcipher_null_crypt() 84 if (walk.src.virt.addr != walk.dst.virt.addr) in skcipher_null_crypt() 85 memcpy(walk.dst.virt.addr, walk.src.virt.addr, in skcipher_null_crypt() 86 walk.nbytes); in skcipher_null_crypt() 87 err = blkcipher_walk_done(desc, &walk, 0); in skcipher_null_crypt()
|
D | arc4.c | 99 struct blkcipher_walk walk; in ecb_arc4_crypt() local 102 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_arc4_crypt() 104 err = blkcipher_walk_virt(desc, &walk); in ecb_arc4_crypt() 106 while (walk.nbytes > 0) { in ecb_arc4_crypt() 107 u8 *wsrc = walk.src.virt.addr; in ecb_arc4_crypt() 108 u8 *wdst = walk.dst.virt.addr; in ecb_arc4_crypt() 110 arc4_crypt(ctx, wdst, wsrc, walk.nbytes); in ecb_arc4_crypt() 112 err = blkcipher_walk_done(desc, &walk, 0); in ecb_arc4_crypt()
|
D | xts.c | 175 struct blkcipher_walk walk; in xts_crypt() local 183 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); in xts_crypt() 185 err = blkcipher_walk_virt(desc, &walk); in xts_crypt() 186 nbytes = walk.nbytes; in xts_crypt() 191 src = (be128 *)walk.src.virt.addr; in xts_crypt() 192 dst = (be128 *)walk.dst.virt.addr; in xts_crypt() 195 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); in xts_crypt() 225 *(be128 *)walk.iv = *t; in xts_crypt() 227 err = blkcipher_walk_done(desc, &walk, nbytes); in xts_crypt() 228 nbytes = walk.nbytes; in xts_crypt() [all …]
|
D | lrw.c | 224 struct blkcipher_walk walk; in lrw_crypt() local 232 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); in lrw_crypt() 234 err = blkcipher_walk_virt(desc, &walk); in lrw_crypt() 235 nbytes = walk.nbytes; in lrw_crypt() 239 nblocks = min(walk.nbytes / bsize, max_blks); in lrw_crypt() 240 src = (be128 *)walk.src.virt.addr; in lrw_crypt() 241 dst = (be128 *)walk.dst.virt.addr; in lrw_crypt() 244 iv = (be128 *)walk.iv; in lrw_crypt() 282 err = blkcipher_walk_done(desc, &walk, nbytes); in lrw_crypt() 283 nbytes = walk.nbytes; in lrw_crypt() [all …]
|
D | shash.c | 222 struct crypto_hash_walk walk; in shash_ahash_update() local 225 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; in shash_ahash_update() 226 nbytes = crypto_hash_walk_done(&walk, nbytes)) in shash_ahash_update() 227 nbytes = crypto_shash_update(desc, walk.data, nbytes); in shash_ahash_update() 245 struct crypto_hash_walk walk; in shash_ahash_finup() local 248 nbytes = crypto_hash_walk_first(req, &walk); in shash_ahash_finup() 253 nbytes = crypto_hash_walk_last(&walk) ? in shash_ahash_finup() 254 crypto_shash_finup(desc, walk.data, nbytes, in shash_ahash_finup() 256 crypto_shash_update(desc, walk.data, nbytes); in shash_ahash_finup() 257 nbytes = crypto_hash_walk_done(&walk, nbytes); in shash_ahash_finup() [all …]
|
D | ccm.c | 207 struct scatter_walk walk; in get_data_to_compute() local 211 scatterwalk_start(&walk, sg); in get_data_to_compute() 214 n = scatterwalk_clamp(&walk, len); in get_data_to_compute() 216 scatterwalk_start(&walk, sg_next(walk.sg)); in get_data_to_compute() 217 n = scatterwalk_clamp(&walk, len); in get_data_to_compute() 219 data_src = scatterwalk_map(&walk); in get_data_to_compute() 225 scatterwalk_advance(&walk, n); in get_data_to_compute() 226 scatterwalk_done(&walk, 0, len); in get_data_to_compute()
|
/linux-4.1.27/mm/ |
D | pagewalk.c | 7 struct mm_walk *walk) in walk_pte_range() argument 14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range() 28 struct mm_walk *walk) in walk_pmd_range() argument 38 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range() 39 if (walk->pte_hole) in walk_pmd_range() 40 err = walk->pte_hole(addr, next, walk); in walk_pmd_range() 49 if (walk->pmd_entry) in walk_pmd_range() 50 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 58 if (!walk->pte_entry) in walk_pmd_range() 61 split_huge_page_pmd_mm(walk->mm, addr, pmd); in walk_pmd_range() [all …]
|
D | mincore.c | 23 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument 27 unsigned char *vec = walk->private; in mincore_hugetlb() 36 walk->private = vec; in mincore_hugetlb() 104 struct mm_walk *walk) in mincore_unmapped_range() argument 106 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range() 107 walk->vma, walk->private); in mincore_unmapped_range() 112 struct mm_walk *walk) in mincore_pte_range() argument 115 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() 117 unsigned char *vec = walk->private; in mincore_pte_range() 131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range() [all …]
|
D | madvise.c | 139 unsigned long end, struct mm_walk *walk) in swapin_walk_pmd_entry() argument 142 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry() 176 struct mm_walk walk = { in force_swapin_readahead() local 182 walk_page_range(start, end, &walk); in force_swapin_readahead()
|
D | mempolicy.c | 486 unsigned long end, struct mm_walk *walk) in queue_pages_pte_range() argument 488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() 490 struct queue_pages *qp = walk->private; in queue_pages_pte_range() 500 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range() 527 struct mm_walk *walk) in queue_pages_hugetlb() argument 530 struct queue_pages *qp = walk->private; in queue_pages_hugetlb() 537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb() 587 struct mm_walk *walk) in queue_pages_test_walk() argument 589 struct vm_area_struct *vma = walk->vma; in queue_pages_test_walk() 590 struct queue_pages *qp = walk->private; in queue_pages_test_walk()
|
D | memcontrol.c | 4893 struct mm_walk *walk) in mem_cgroup_count_precharge_pte_range() argument 4895 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range() 5057 struct mm_walk *walk) in mem_cgroup_move_charge_pte_range() argument 5060 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
|
/linux-4.1.27/arch/x86/crypto/ |
D | glue_helper.c | 37 struct blkcipher_walk *walk) in __glue_ecb_crypt_128bit() argument 45 err = blkcipher_walk_virt(desc, walk); in __glue_ecb_crypt_128bit() 47 while ((nbytes = walk->nbytes)) { in __glue_ecb_crypt_128bit() 48 u8 *wsrc = walk->src.virt.addr; in __glue_ecb_crypt_128bit() 49 u8 *wdst = walk->dst.virt.addr; in __glue_ecb_crypt_128bit() 74 err = blkcipher_walk_done(desc, walk, nbytes); in __glue_ecb_crypt_128bit() 85 struct blkcipher_walk walk; in glue_ecb_crypt_128bit() local 87 blkcipher_walk_init(&walk, dst, src, nbytes); in glue_ecb_crypt_128bit() 88 return __glue_ecb_crypt_128bit(gctx, desc, &walk); in glue_ecb_crypt_128bit() 94 struct blkcipher_walk *walk) in __glue_cbc_encrypt_128bit() argument [all …]
|
D | blowfish_glue.c | 80 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument 89 err = blkcipher_walk_virt(desc, walk); in ecb_crypt() 91 while ((nbytes = walk->nbytes)) { in ecb_crypt() 92 u8 *wsrc = walk->src.virt.addr; in ecb_crypt() 93 u8 *wdst = walk->dst.virt.addr; in ecb_crypt() 119 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt() 128 struct blkcipher_walk walk; in ecb_encrypt() local 130 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 131 return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); in ecb_encrypt() 137 struct blkcipher_walk walk; in ecb_decrypt() local [all …]
|
D | cast5_avx_glue.c | 60 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument 72 err = blkcipher_walk_virt(desc, walk); in ecb_crypt() 75 while ((nbytes = walk->nbytes)) { in ecb_crypt() 76 u8 *wsrc = walk->src.virt.addr; in ecb_crypt() 77 u8 *wdst = walk->dst.virt.addr; in ecb_crypt() 107 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt() 117 struct blkcipher_walk walk; in ecb_encrypt() local 119 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 120 return ecb_crypt(desc, &walk, true); in ecb_encrypt() 126 struct blkcipher_walk walk; in ecb_decrypt() local [all …]
|
D | des3_ede_glue.c | 86 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, in ecb_crypt() argument 93 err = blkcipher_walk_virt(desc, walk); in ecb_crypt() 95 while ((nbytes = walk->nbytes)) { in ecb_crypt() 96 u8 *wsrc = walk->src.virt.addr; in ecb_crypt() 97 u8 *wdst = walk->dst.virt.addr; in ecb_crypt() 124 err = blkcipher_walk_done(desc, walk, nbytes); in ecb_crypt() 134 struct blkcipher_walk walk; in ecb_encrypt() local 136 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 137 return ecb_crypt(desc, &walk, ctx->enc_expkey); in ecb_encrypt() 144 struct blkcipher_walk walk; in ecb_decrypt() local [all …]
|
D | salsa20_glue.c | 52 struct blkcipher_walk walk; in encrypt() local 57 blkcipher_walk_init(&walk, dst, src, nbytes); in encrypt() 58 err = blkcipher_walk_virt_block(desc, &walk, 64); in encrypt() 60 salsa20_ivsetup(ctx, walk.iv); in encrypt() 62 if (likely(walk.nbytes == nbytes)) in encrypt() 64 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, in encrypt() 65 walk.dst.virt.addr, nbytes); in encrypt() 66 return blkcipher_walk_done(desc, &walk, 0); in encrypt() 69 while (walk.nbytes >= 64) { in encrypt() 70 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, in encrypt() [all …]
|
D | aesni-intel_glue.c | 378 struct blkcipher_walk walk; in ecb_encrypt() local 381 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 382 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt() 386 while ((nbytes = walk.nbytes)) { in ecb_encrypt() 387 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt() 390 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_encrypt() 402 struct blkcipher_walk walk; in ecb_decrypt() local 405 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt() 406 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt() 410 while ((nbytes = walk.nbytes)) { in ecb_decrypt() [all …]
|
/linux-4.1.27/arch/arm/crypto/ |
D | aesbs-glue.c | 109 struct blkcipher_walk walk; in aesbs_cbc_encrypt() local 112 blkcipher_walk_init(&walk, dst, src, nbytes); in aesbs_cbc_encrypt() 113 err = blkcipher_walk_virt(desc, &walk); in aesbs_cbc_encrypt() 115 while (walk.nbytes) { in aesbs_cbc_encrypt() 116 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; in aesbs_cbc_encrypt() 117 u8 *src = walk.src.virt.addr; in aesbs_cbc_encrypt() 119 if (walk.dst.virt.addr == walk.src.virt.addr) { in aesbs_cbc_encrypt() 120 u8 *iv = walk.iv; in aesbs_cbc_encrypt() 128 memcpy(walk.iv, iv, AES_BLOCK_SIZE); in aesbs_cbc_encrypt() 130 u8 *dst = walk.dst.virt.addr; in aesbs_cbc_encrypt() [all …]
|
D | aes-ce-glue.c | 170 struct blkcipher_walk walk; in ecb_encrypt() local 175 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 176 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt() 179 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { in ecb_encrypt() 180 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt() 182 err = blkcipher_walk_done(desc, &walk, in ecb_encrypt() 183 walk.nbytes % AES_BLOCK_SIZE); in ecb_encrypt() 193 struct blkcipher_walk walk; in ecb_decrypt() local 198 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt() 199 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt() [all …]
|
/linux-4.1.27/arch/arm64/crypto/ |
D | aes-glue.c | 104 struct blkcipher_walk walk; in ecb_encrypt() local 108 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 109 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt() 112 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_encrypt() 113 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, in ecb_encrypt() 115 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); in ecb_encrypt() 126 struct blkcipher_walk walk; in ecb_decrypt() local 130 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt() 131 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt() 134 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_decrypt() [all …]
|
D | aes-ce-ccm-glue.c | 111 struct scatter_walk walk; in ccm_calculate_auth_mac() local 127 scatterwalk_start(&walk, req->assoc); in ccm_calculate_auth_mac() 130 u32 n = scatterwalk_clamp(&walk, len); in ccm_calculate_auth_mac() 134 scatterwalk_start(&walk, sg_next(walk.sg)); in ccm_calculate_auth_mac() 135 n = scatterwalk_clamp(&walk, len); in ccm_calculate_auth_mac() 137 p = scatterwalk_map(&walk); in ccm_calculate_auth_mac() 143 scatterwalk_advance(&walk, n); in ccm_calculate_auth_mac() 144 scatterwalk_done(&walk, 0, len); in ccm_calculate_auth_mac() 153 struct blkcipher_walk walk; in ccm_encrypt() local 171 blkcipher_walk_init(&walk, req->dst, req->src, len); in ccm_encrypt() [all …]
|
/linux-4.1.27/arch/sparc/mm/ |
D | extable.c | 19 const struct exception_table_entry *walk; in search_extable() local 39 for (walk = start; walk <= last; walk++) { in search_extable() 40 if (walk->fixup == 0) { in search_extable() 42 walk++; in search_extable() 47 if (walk->fixup == -1) in search_extable() 50 if (walk->insn == value) in search_extable() 51 return walk; in search_extable() 55 for (walk = start; walk <= (last - 1); walk++) { in search_extable() 56 if (walk->fixup) in search_extable() 59 if (walk[0].insn <= value && walk[1].insn > value) in search_extable() [all …]
|
/linux-4.1.27/include/crypto/ |
D | scatterwalk.h | 58 static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) in scatterwalk_pagelen() argument 60 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; in scatterwalk_pagelen() 61 unsigned int len_this_page = offset_in_page(~walk->offset) + 1; in scatterwalk_pagelen() 65 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, in scatterwalk_clamp() argument 68 unsigned int len_this_page = scatterwalk_pagelen(walk); in scatterwalk_clamp() 72 static inline void scatterwalk_advance(struct scatter_walk *walk, in scatterwalk_advance() argument 75 walk->offset += nbytes; in scatterwalk_advance() 78 static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, in scatterwalk_aligned() argument 81 return !(walk->offset & alignmask); in scatterwalk_aligned() 84 static inline struct page *scatterwalk_page(struct scatter_walk *walk) in scatterwalk_page() argument [all …]
|
D | algapi.h | 190 struct blkcipher_walk *walk, int err); 192 struct blkcipher_walk *walk); 194 struct blkcipher_walk *walk); 196 struct blkcipher_walk *walk, 199 struct blkcipher_walk *walk, 204 struct ablkcipher_walk *walk, int err); 206 struct ablkcipher_walk *walk); 207 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); 309 static inline void blkcipher_walk_init(struct blkcipher_walk *walk, in blkcipher_walk_init() argument 314 walk->in.sg = src; in blkcipher_walk_init() [all …]
|
D | mcryptd.h | 59 struct crypto_hash_walk walk; member
|
/linux-4.1.27/arch/s390/crypto/ |
D | des_s390.c | 86 u8 *key, struct blkcipher_walk *walk) in ecb_desall_crypt() argument 88 int ret = blkcipher_walk_virt(desc, walk); in ecb_desall_crypt() 91 while ((nbytes = walk->nbytes)) { in ecb_desall_crypt() 94 u8 *out = walk->dst.virt.addr; in ecb_desall_crypt() 95 u8 *in = walk->src.virt.addr; in ecb_desall_crypt() 102 ret = blkcipher_walk_done(desc, walk, nbytes); in ecb_desall_crypt() 109 struct blkcipher_walk *walk) in cbc_desall_crypt() argument 112 int ret = blkcipher_walk_virt(desc, walk); in cbc_desall_crypt() 113 unsigned int nbytes = walk->nbytes; in cbc_desall_crypt() 122 memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); in cbc_desall_crypt() [all …]
|
D | aes_s390.c | 316 struct blkcipher_walk *walk) in ecb_aes_crypt() argument 318 int ret = blkcipher_walk_virt(desc, walk); in ecb_aes_crypt() 321 while ((nbytes = walk->nbytes)) { in ecb_aes_crypt() 324 u8 *out = walk->dst.virt.addr; in ecb_aes_crypt() 325 u8 *in = walk->src.virt.addr; in ecb_aes_crypt() 332 ret = blkcipher_walk_done(desc, walk, nbytes); in ecb_aes_crypt() 343 struct blkcipher_walk walk; in ecb_aes_encrypt() local 348 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_encrypt() 349 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); in ecb_aes_encrypt() 357 struct blkcipher_walk walk; in ecb_aes_decrypt() local [all …]
|
/linux-4.1.27/arch/powerpc/crypto/ |
D | aes-spe-glue.c | 183 struct blkcipher_walk walk; in ppc_ecb_encrypt() local 188 blkcipher_walk_init(&walk, dst, src, nbytes); in ppc_ecb_encrypt() 189 err = blkcipher_walk_virt(desc, &walk); in ppc_ecb_encrypt() 191 while ((nbytes = walk.nbytes)) { in ppc_ecb_encrypt() 197 ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, in ppc_ecb_encrypt() 201 err = blkcipher_walk_done(desc, &walk, ubytes); in ppc_ecb_encrypt() 211 struct blkcipher_walk walk; in ppc_ecb_decrypt() local 216 blkcipher_walk_init(&walk, dst, src, nbytes); in ppc_ecb_decrypt() 217 err = blkcipher_walk_virt(desc, &walk); in ppc_ecb_decrypt() 219 while ((nbytes = walk.nbytes)) { in ppc_ecb_decrypt() [all …]
|
/linux-4.1.27/arch/sparc/crypto/ |
D | aes_glue.c | 220 struct blkcipher_walk walk; in ecb_encrypt() local 223 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_encrypt() 224 err = blkcipher_walk_virt(desc, &walk); in ecb_encrypt() 228 while ((nbytes = walk.nbytes)) { in ecb_encrypt() 233 (const u64 *)walk.src.virt.addr, in ecb_encrypt() 234 (u64 *) walk.dst.virt.addr, in ecb_encrypt() 238 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_encrypt() 249 struct blkcipher_walk walk; in ecb_decrypt() local 253 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_decrypt() 254 err = blkcipher_walk_virt(desc, &walk); in ecb_decrypt() [all …]
|
D | des_glue.c | 98 struct blkcipher_walk walk; in __ecb_crypt() local 101 blkcipher_walk_init(&walk, dst, src, nbytes); in __ecb_crypt() 102 err = blkcipher_walk_virt(desc, &walk); in __ecb_crypt() 109 while ((nbytes = walk.nbytes)) { in __ecb_crypt() 113 des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr, in __ecb_crypt() 114 (u64 *) walk.dst.virt.addr, in __ecb_crypt() 118 err = blkcipher_walk_done(desc, &walk, nbytes); in __ecb_crypt() 146 struct blkcipher_walk walk; in cbc_encrypt() local 149 blkcipher_walk_init(&walk, dst, src, nbytes); in cbc_encrypt() 150 err = blkcipher_walk_virt(desc, &walk); in cbc_encrypt() [all …]
|
D | camellia_glue.c | 90 struct blkcipher_walk walk; in __ecb_crypt() local 99 blkcipher_walk_init(&walk, dst, src, nbytes); in __ecb_crypt() 100 err = blkcipher_walk_virt(desc, &walk); in __ecb_crypt() 108 while ((nbytes = walk.nbytes)) { in __ecb_crypt() 115 src64 = (const u64 *)walk.src.virt.addr; in __ecb_crypt() 116 dst64 = (u64 *) walk.dst.virt.addr; in __ecb_crypt() 120 err = blkcipher_walk_done(desc, &walk, nbytes); in __ecb_crypt() 153 struct blkcipher_walk walk; in cbc_encrypt() local 162 blkcipher_walk_init(&walk, dst, src, nbytes); in cbc_encrypt() 163 err = blkcipher_walk_virt(desc, &walk); in cbc_encrypt() [all …]
|
/linux-4.1.27/drivers/crypto/vmx/ |
D | aes_ctr.c | 91 struct blkcipher_walk *walk) in p8_aes_ctr_final() argument 93 u8 *ctrblk = walk->iv; in p8_aes_ctr_final() 95 u8 *src = walk->src.virt.addr; in p8_aes_ctr_final() 96 u8 *dst = walk->dst.virt.addr; in p8_aes_ctr_final() 97 unsigned int nbytes = walk->nbytes; in p8_aes_ctr_final() 115 struct blkcipher_walk walk; in p8_aes_ctr_crypt() local 127 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_ctr_crypt() 128 ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); in p8_aes_ctr_crypt() 129 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { in p8_aes_ctr_crypt() 133 aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, in p8_aes_ctr_crypt() [all …]
|
D | aes_cbc.c | 98 struct blkcipher_walk walk; in p8_aes_cbc_encrypt() local 114 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_cbc_encrypt() 115 ret = blkcipher_walk_virt(desc, &walk); in p8_aes_cbc_encrypt() 116 while ((nbytes = walk.nbytes)) { in p8_aes_cbc_encrypt() 117 aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, in p8_aes_cbc_encrypt() 118 nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); in p8_aes_cbc_encrypt() 120 ret = blkcipher_walk_done(desc, &walk, nbytes); in p8_aes_cbc_encrypt() 134 struct blkcipher_walk walk; in p8_aes_cbc_decrypt() local 150 blkcipher_walk_init(&walk, dst, src, nbytes); in p8_aes_cbc_decrypt() 151 ret = blkcipher_walk_virt(desc, &walk); in p8_aes_cbc_decrypt() [all …]
|
/linux-4.1.27/drivers/atm/ |
D | idt77105.c | 85 struct idt77105_priv *walk; in idt77105_stats_timer_func() local 90 for (walk = idt77105_all; walk; walk = walk->next) { in idt77105_stats_timer_func() 91 dev = walk->dev; in idt77105_stats_timer_func() 93 stats = &walk->stats; in idt77105_stats_timer_func() 114 struct idt77105_priv *walk; in idt77105_restart_timer_func() local 119 for (walk = idt77105_all; walk; walk = walk->next) { in idt77105_restart_timer_func() 120 dev = walk->dev; in idt77105_restart_timer_func() 134 PUT( walk->old_mcr ,MCR); in idt77105_restart_timer_func() 326 struct idt77105_priv *walk, *prev; in idt77105_stop() local 334 for (prev = NULL, walk = idt77105_all ; in idt77105_stop() [all …]
|
D | suni.c | 58 struct suni_priv *walk; in suni_hz() local 62 for (walk = sunis; walk; walk = walk->next) { in suni_hz() 63 dev = walk->dev; in suni_hz() 64 stats = &walk->sonet_stats; in suni_hz() 344 struct suni_priv **walk; in suni_stop() local 349 for (walk = &sunis; *walk != PRIV(dev); in suni_stop() 350 walk = &PRIV((*walk)->dev)->next); in suni_stop() 351 *walk = PRIV((*walk)->dev)->next; in suni_stop()
|
/linux-4.1.27/drivers/crypto/ |
D | padlock-aes.c | 347 struct blkcipher_walk walk; in ecb_aes_encrypt() local 353 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_encrypt() 354 err = blkcipher_walk_virt(desc, &walk); in ecb_aes_encrypt() 357 while ((nbytes = walk.nbytes)) { in ecb_aes_encrypt() 358 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, in ecb_aes_encrypt() 362 err = blkcipher_walk_done(desc, &walk, nbytes); in ecb_aes_encrypt() 376 struct blkcipher_walk walk; in ecb_aes_decrypt() local 382 blkcipher_walk_init(&walk, dst, src, nbytes); in ecb_aes_decrypt() 383 err = blkcipher_walk_virt(desc, &walk); in ecb_aes_decrypt() 386 while ((nbytes = walk.nbytes)) { in ecb_aes_decrypt() [all …]
|
D | geode-aes.c | 309 struct blkcipher_walk walk; in geode_cbc_decrypt() local 315 blkcipher_walk_init(&walk, dst, src, nbytes); in geode_cbc_decrypt() 316 err = blkcipher_walk_virt(desc, &walk); in geode_cbc_decrypt() 317 op->iv = walk.iv; in geode_cbc_decrypt() 319 while ((nbytes = walk.nbytes)) { in geode_cbc_decrypt() 320 op->src = walk.src.virt.addr, in geode_cbc_decrypt() 321 op->dst = walk.dst.virt.addr; in geode_cbc_decrypt() 329 err = blkcipher_walk_done(desc, &walk, nbytes); in geode_cbc_decrypt() 341 struct blkcipher_walk walk; in geode_cbc_encrypt() local 347 blkcipher_walk_init(&walk, dst, src, nbytes); in geode_cbc_encrypt() [all …]
|
D | n2_core.c | 513 struct crypto_hash_walk walk; in n2_do_async_digest() local 536 nbytes = crypto_hash_walk_first(req, &walk); in n2_do_async_digest() 555 ent->src_addr = __pa(walk.data); in n2_do_async_digest() 563 nbytes = crypto_hash_walk_done(&walk, 0); in n2_do_async_digest() 568 ent->src_addr = __pa(walk.data); in n2_do_async_digest() 576 nbytes = crypto_hash_walk_done(&walk, 0); in n2_do_async_digest() 672 struct ablkcipher_walk walk; member 711 struct ablkcipher_walk walk; member 876 struct ablkcipher_walk *walk = &rctx->walk; in n2_compute_chunks() local 883 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); in n2_compute_chunks() [all …]
|
D | hifn_795x.c | 660 struct hifn_cipher_walk walk; member 1392 t = &rctx->walk.cache[0]; in hifn_setup_dma() 1395 if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_setup_dma() 1588 rctx->walk.flags = 0; in hifn_setup_session() 1596 rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; in hifn_setup_session() 1602 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_setup_session() 1603 err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); in hifn_setup_session() 1608 sg_num = hifn_cipher_walk(req, &rctx->walk); in hifn_setup_session() 1668 rctx.walk.cache[0].length = 0; in hifn_test() 1774 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { in hifn_process_ready() [all …]
|
D | omap-des.c | 389 struct scatter_walk walk; in sg_copy_buf() local 394 scatterwalk_start(&walk, sg); in sg_copy_buf() 395 scatterwalk_advance(&walk, start); in sg_copy_buf() 396 scatterwalk_copychunks(buf, &walk, nbytes, out); in sg_copy_buf() 397 scatterwalk_done(&walk, out, 0); in sg_copy_buf()
|
D | omap-aes.c | 405 struct scatter_walk walk; in sg_copy_buf() local 410 scatterwalk_start(&walk, sg); in sg_copy_buf() 411 scatterwalk_advance(&walk, start); in sg_copy_buf() 412 scatterwalk_copychunks(buf, &walk, nbytes, out); in sg_copy_buf() 413 scatterwalk_done(&walk, out, 0); in sg_copy_buf()
|
/linux-4.1.27/include/crypto/internal/ |
D | hash.h | 55 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); 57 struct crypto_hash_walk *walk); 59 struct crypto_hash_walk *walk); 61 struct crypto_hash_walk *walk, 64 static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, in crypto_ahash_walk_done() argument 67 return crypto_hash_walk_done(walk, err); in crypto_ahash_walk_done() 70 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) in crypto_hash_walk_last() argument 72 return !(walk->entrylen | walk->total); in crypto_hash_walk_last() 75 static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) in crypto_ahash_walk_last() argument 77 return crypto_hash_walk_last(walk); in crypto_ahash_walk_last()
|
/linux-4.1.27/Documentation/filesystems/ |
D | path-lookup.txt | 5 performing a path walk. Typically, for every open(), stat() etc., the path name 17 thus in every component during path look-up. Since 2.5.10 onwards, fast-walk 30 are path-walk intensive tend to do path lookups starting from a common dentry 34 Since 2.6.38, RCU is used to make a significant part of the entire path walk 36 even stores into cachelines of common dentries). This is known as "rcu-walk" 56 permissions on the parent inode to be able to walk into it. 67 - find the start point of the walk; 92 point to perform the next step of our path walk against. 162 still at 2. Now when it follows 2's 'next' pointer, it will walk off into 180 start the next part of the path walk from). [all …]
|
D | porting | 354 via rcu-walk path walk (basically, if the file can have had a path name in the 364 vfs now tries to do path walking in "rcu-walk mode", which avoids 368 filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so 370 the benefits of rcu-walk mode. We will begin to add filesystem callbacks that 371 are rcu-walk aware, shown below. Filesystems should take advantage of this 377 the filesystem provides it), which requires dropping out of rcu-walk mode. This 378 may now be called in rcu-walk mode (nd->flags & LOOKUP_RCU). -ECHILD should be 379 returned if the filesystem cannot handle rcu-walk. See 383 on many or all directory inodes on the way down a path walk (to check for 384 exec permission). These must now be rcu-walk aware (flags & IPERM_FLAG_RCU).
|
D | vfs.txt | 446 (i.e. page that was installed when the symbolic link walk 448 walk). 453 May be called in rcu-walk mode (mask & MAY_NOT_BLOCK). If in rcu-walk 457 If a situation is encountered that rcu-walk cannot handle, return 458 -ECHILD and it will be called again in ref-walk mode. 947 d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU). 948 If in rcu-walk mode, the filesystem must revalidate the dentry without 953 If a situation is encountered that rcu-walk cannot handle, return 954 -ECHILD and it will be called again in ref-walk mode. 957 This is called when a path-walk ends at dentry that was not acquired by [all …]
|
D | Locking | 25 rename_lock ->d_lock may block rcu-walk 26 d_revalidate: no no yes (ref-walk) maybe 36 d_manage: no no yes (ref-walk) maybe 89 permission: no (may not block if called in rcu-walk mode)
|
D | autofs4-mount-control.txt | 23 needs to walk back up the mount tree to construct a path, such as 65 trigger. So when we walk on the path we mount shark:/autofs/export1 "on
|
D | xfs-self-describing-metadata.txt | 105 object, we don't know what inode it belongs to and hence have to walk the entire
|
D | xfs-delayed-logging-design.txt | 402 it. The fact that we walk the log items (in the CIL) just to chain the log 668 sequencing also requires the same lock, list walk, and blocking mechanism to
|
/linux-4.1.27/fs/ |
D | select.c | 801 struct poll_list *walk; in do_poll() local 804 for (walk = list; walk != NULL; walk = walk->next) { in do_poll() 807 pfd = walk->entries; in do_poll() 808 pfd_end = pfd + walk->len; in do_poll() 880 struct poll_list *walk = head; in do_sys_poll() local 888 walk->next = NULL; in do_sys_poll() 889 walk->len = len; in do_sys_poll() 893 if (copy_from_user(walk->entries, ufds + nfds-todo, in do_sys_poll() 894 sizeof(struct pollfd) * walk->len)) in do_sys_poll() 897 todo -= walk->len; in do_sys_poll() [all …]
|
/linux-4.1.27/security/ |
D | device_cgroup.c | 96 struct dev_exception_item *excopy, *walk; in dev_exception_add() local 104 list_for_each_entry(walk, &dev_cgroup->exceptions, list) { in dev_exception_add() 105 if (walk->type != ex->type) in dev_exception_add() 107 if (walk->major != ex->major) in dev_exception_add() 109 if (walk->minor != ex->minor) in dev_exception_add() 112 walk->access |= ex->access; in dev_exception_add() 128 struct dev_exception_item *walk, *tmp; in dev_exception_rm() local 132 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) { in dev_exception_rm() 133 if (walk->type != ex->type) in dev_exception_rm() 135 if (walk->major != ex->major) in dev_exception_rm() [all …]
|
/linux-4.1.27/fs/proc/ |
D | task_mmu.c | 484 struct mm_walk *walk) in smaps_pte_entry() argument 486 struct mem_size_stats *mss = walk->private; in smaps_pte_entry() 487 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() 508 struct mm_walk *walk) in smaps_pmd_entry() argument 510 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry() 511 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() 524 struct mm_walk *walk) in smaps_pmd_entry() argument 530 struct mm_walk *walk) in smaps_pte_range() argument 532 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() 537 smaps_pmd_entry(pmd, addr, walk); in smaps_pte_range() [all …]
|
/linux-4.1.27/arch/openrisc/kernel/ |
D | dma.c | 33 unsigned long next, struct mm_walk *walk) in page_set_nocache() argument 54 unsigned long next, struct mm_walk *walk) in page_clear_nocache() argument 90 struct mm_walk walk = { in or1k_dma_alloc() local 109 if (walk_page_range(va, va + size, &walk)) { in or1k_dma_alloc() 123 struct mm_walk walk = { in or1k_dma_free() local 130 WARN_ON(walk_page_range(va, va + size, &walk)); in or1k_dma_free()
|
/linux-4.1.27/fs/fat/ |
D | namei_msdos.c | 26 unsigned char *walk; in msdos_format_name() local 43 for (walk = res; len && walk - res < 8; walk++) { in msdos_format_name() 62 if ((res == walk) && (c == 0xE5)) in msdos_format_name() 67 *walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c; in msdos_format_name() 80 while (walk - res < 8) in msdos_format_name() 81 *walk++ = ' '; in msdos_format_name() 82 while (len > 0 && walk - res < MSDOS_NAME) { in msdos_format_name() 101 *walk++ = c - 32; in msdos_format_name() 103 *walk++ = c; in msdos_format_name() 110 while (walk - res < MSDOS_NAME) in msdos_format_name() [all …]
|
D | inode.c | 426 unsigned char exe_extensions[] = "EXECOMBAT", *walk; in is_exec() local 428 for (walk = exe_extensions; *walk; walk += 3) in is_exec() 429 if (!strncmp(extension, walk, 3)) in is_exec()
|
/linux-4.1.27/kernel/locking/ |
D | rtmutex.h | 33 enum rtmutex_chainwalk walk) in debug_rt_mutex_detect_deadlock() argument 35 return walk == RT_MUTEX_FULL_CHAINWALK; in debug_rt_mutex_detect_deadlock()
|
D | rtmutex-debug.h | 31 enum rtmutex_chainwalk walk) in debug_rt_mutex_detect_deadlock() argument
|
/linux-4.1.27/drivers/crypto/nx/ |
D | nx.c | 167 struct scatter_walk walk; in nx_walk_and_build() local 174 scatterwalk_start(&walk, sg_src); in nx_walk_and_build() 185 scatterwalk_advance(&walk, start - offset); in nx_walk_and_build() 188 n = scatterwalk_clamp(&walk, len); in nx_walk_and_build() 192 scatterwalk_start(&walk, sg_next(walk.sg)); in nx_walk_and_build() 193 n = scatterwalk_clamp(&walk, len); in nx_walk_and_build() 195 dst = scatterwalk_map(&walk); in nx_walk_and_build() 201 scatterwalk_advance(&walk, n); in nx_walk_and_build() 202 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); in nx_walk_and_build()
|
D | nx-aes-gcm.c | 130 struct scatter_walk walk; in nx_gca() local 137 scatterwalk_start(&walk, req->assoc); in nx_gca() 138 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); in nx_gca() 139 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); in nx_gca()
|
/linux-4.1.27/net/xfrm/ |
D | xfrm_policy.c | 186 if (unlikely(xp->walk.dead)) in xfrm_policy_timer() 250 if (unlikely(pol->walk.dead)) in xfrm_policy_flo_get() 262 return !pol->walk.dead; in xfrm_policy_flo_check() 288 INIT_LIST_HEAD(&policy->walk.all); in xfrm_policy_alloc() 308 BUG_ON(!policy->walk.dead); in xfrm_policy_destroy() 332 policy->walk.dead = 1; in xfrm_policy_kill() 627 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { in xfrm_hash_rebuild() 996 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, in xfrm_policy_walk() argument 1004 if (walk->type >= XFRM_POLICY_TYPE_MAX && in xfrm_policy_walk() 1005 walk->type != XFRM_POLICY_TYPE_ANY) in xfrm_policy_walk() [all …]
|
D | xfrm_state.c | 1614 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, in xfrm_state_walk() argument 1622 if (walk->seq != 0 && list_empty(&walk->all)) in xfrm_state_walk() 1626 if (list_empty(&walk->all)) in xfrm_state_walk() 1629 x = list_entry(&walk->all, struct xfrm_state_walk, all); in xfrm_state_walk() 1634 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) in xfrm_state_walk() 1636 if (!__xfrm_state_filter_match(state, walk->filter)) in xfrm_state_walk() 1638 err = func(state, walk->seq, data); in xfrm_state_walk() 1640 list_move_tail(&walk->all, &x->all); in xfrm_state_walk() 1643 walk->seq++; in xfrm_state_walk() 1645 if (walk->seq == 0) { in xfrm_state_walk() [all …]
|
D | xfrm_user.c | 869 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; in xfrm_dump_sa_done() local 873 xfrm_state_walk_done(walk, net); in xfrm_dump_sa_done() 881 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; in xfrm_dump_sa() local 917 xfrm_state_walk_init(walk, proto, filter); in xfrm_dump_sa() 920 (void) xfrm_state_walk(net, walk, dump_one_state, &info); in xfrm_dump_sa() 1473 xp->walk.dead = 1; in xfrm_policy_construct() 1625 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; in xfrm_dump_policy_done() local 1628 xfrm_policy_walk_done(walk, net); in xfrm_dump_policy_done() 1635 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; in xfrm_dump_policy() local 1648 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); in xfrm_dump_policy() [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/ |
D | interval_tree.c | 217 struct interval_node *walk = root; in interval_find() local 220 while (walk) { in interval_find() 221 rc = extent_compare(ex, &walk->in_extent); in interval_find() 225 walk = walk->in_left; in interval_find() 227 walk = walk->in_right; in interval_find() 230 return walk; in interval_find()
|
/linux-4.1.27/arch/x86/crypto/sha-mb/ |
D | sha1_mb.c | 388 nbytes = crypto_ahash_walk_done(&rctx->walk, 0); in sha_finish_walk() 394 if (crypto_ahash_walk_last(&rctx->walk)) { in sha_finish_walk() 402 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); in sha_finish_walk() 513 nbytes = crypto_ahash_walk_first(req, &rctx->walk); in sha1_mb_update() 520 if (crypto_ahash_walk_last(&rctx->walk)) in sha1_mb_update() 527 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); in sha1_mb_update() 571 nbytes = crypto_ahash_walk_first(req, &rctx->walk); in sha1_mb_finup() 578 if (crypto_ahash_walk_last(&rctx->walk)) { in sha1_mb_finup() 590 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); in sha1_mb_finup()
|
/linux-4.1.27/net/sched/ |
D | cls_tcindex.c | 141 struct tcindex_filter __rcu **walk; in tcindex_delete() local 152 walk = p->h + i; in tcindex_delete() 153 for (f = rtnl_dereference(*walk); f; in tcindex_delete() 154 walk = &f->next, f = rtnl_dereference(*walk)) { in tcindex_delete() 162 rcu_assign_pointer(*walk, rtnl_dereference(f->next)); in tcindex_delete() 561 .walk = tcindex_walk,
|
D | sch_ingress.c | 124 .walk = ingress_walk,
|
D | act_api.c | 297 if (!act->walk) in tcf_register_action() 298 act->walk = tcf_generic_walker; in tcf_register_action() 804 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); in tca_action_flush() 1063 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); in tc_dump_action()
|
D | cls_cgroup.c | 216 .walk = cls_cgroup_walk,
|
D | cls_api.c | 486 if (tp->ops->walk == NULL) in tc_dump_tfilter() 494 tp->ops->walk(tp, &arg.w); in tc_dump_tfilter()
|
D | sch_mq.c | 232 .walk = mq_walk,
|
D | cls_basic.c | 294 .walk = basic_walk,
|
D | sch_prio.c | 370 .walk = prio_walk,
|
D | sch_red.c | 357 .walk = red_walk,
|
D | sch_multiq.c | 406 .walk = multiq_walk,
|
D | act_police.c | 356 .walk = tcf_act_police_walker
|
D | cls_bpf.c | 480 .walk = cls_bpf_walk,
|
D | cls_fw.c | 421 .walk = fw_walk,
|
D | sch_mqprio.c | 399 .walk = mqprio_walk,
|
D | sch_dsmark.c | 477 .walk = dsmark_walk,
|
D | sch_tbf.c | 547 .walk = tbf_walk,
|
D | sch_api.c | 164 if (!(cops->get && cops->put && cops->walk && cops->leaf)) in register_qdisc() 1084 q->ops->cl_ops->walk(q, &arg.w); in check_loop() 1753 q->ops->cl_ops->walk(q, &arg.w); in tc_dump_tclass_qdisc()
|
D | sch_drr.c | 500 .walk = drr_walk,
|
D | sch_choke.c | 605 .walk = choke_walk,
|
D | sch_fq_codel.c | 591 .walk = fq_codel_walk,
|
D | cls_route.c | 657 .walk = route4_walk,
|
D | sch_atm.c | 659 .walk = atm_tc_walk,
|
D | cls_flow.c | 676 .walk = flow_walk,
|
D | sch_sfb.c | 689 .walk = sfb_walk,
|
D | cls_rsvp.h | 716 .walk = rsvp_walk,
|
D | sch_sfq.c | 910 .walk = sfq_walk,
|
D | sch_netem.c | 1084 .walk = netem_walk,
|
D | cls_u32.c | 1061 .walk = u32_walk,
|
D | sch_qfq.c | 1558 .walk = qfq_walk,
|
D | sch_htb.c | 1596 .walk = htb_walk,
|
D | sch_hfsc.c | 1721 .walk = hfsc_walk
|
D | sch_cbq.c | 2026 .walk = cbq_walk,
|
/linux-4.1.27/drivers/vfio/pci/ |
D | vfio_pci.c | 402 struct vfio_pci_walk_info *walk = data; in vfio_pci_walk_wrapper() local 404 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot)) in vfio_pci_walk_wrapper() 405 walk->ret = walk->fn(pdev, walk->data); in vfio_pci_walk_wrapper() 407 return walk->ret; in vfio_pci_walk_wrapper() 415 struct vfio_pci_walk_info walk = { in vfio_pci_for_each_slot_or_bus() local 419 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk); in vfio_pci_for_each_slot_or_bus() 421 return walk.ret; in vfio_pci_for_each_slot_or_bus()
|
/linux-4.1.27/ipc/ |
D | sem.c | 790 struct list_head *walk; in wake_const_ops() local 799 walk = pending_list->next; in wake_const_ops() 800 while (walk != pending_list) { in wake_const_ops() 803 q = container_of(walk, struct sem_queue, list); in wake_const_ops() 804 walk = walk->next; in wake_const_ops() 891 struct list_head *walk; in update_queue() local 901 walk = pending_list->next; in update_queue() 902 while (walk != pending_list) { in update_queue() 905 q = container_of(walk, struct sem_queue, list); in update_queue() 906 walk = walk->next; in update_queue()
|
D | mqueue.c | 544 struct ext_wait_queue *walk; in wq_add() local 548 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { in wq_add() 549 if (walk->task->static_prio <= current->static_prio) { in wq_add() 550 list_add_tail(&ewp->list, &walk->list); in wq_add()
|
/linux-4.1.27/net/atm/ |
D | clip.c | 88 struct clip_vcc **walk; in unlink_clip_vcc() local 96 for (walk = &entry->vccs; *walk; walk = &(*walk)->next) in unlink_clip_vcc() 97 if (*walk == clip_vcc) { in unlink_clip_vcc() 100 *walk = clip_vcc->next; /* atomic */ in unlink_clip_vcc()
|
D | common.c | 321 struct atm_vcc *walk; in check_ci() local 324 walk = atm_sk(s); in check_ci() 325 if (walk->dev != vcc->dev) in check_ci() 327 if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi && in check_ci() 328 walk->vci == vci && ((walk->qos.txtp.traffic_class != in check_ci() 330 (walk->qos.rxtp.traffic_class != ATM_NONE && in check_ci()
|
/linux-4.1.27/net/l2tp/ |
D | l2tp_debugfs.c | 107 struct hlist_node *walk; in l2tp_dfs_seq_tunnel_show() local 112 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { in l2tp_dfs_seq_tunnel_show() 115 session = hlist_entry(walk, struct l2tp_session, hlist); in l2tp_dfs_seq_tunnel_show()
|
D | l2tp_core.c | 1241 struct hlist_node *walk; in l2tp_tunnel_closeall() local 1253 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { in l2tp_tunnel_closeall() 1254 session = hlist_entry(walk, struct l2tp_session, hlist); in l2tp_tunnel_closeall()
|
/linux-4.1.27/arch/s390/mm/ |
D | pgtable.c | 1230 unsigned long next, struct mm_walk *walk) in __s390_enable_skey() argument 1242 ptep_flush_direct(walk->mm, addr, pte); in __s390_enable_skey() 1257 struct mm_walk walk = { .pte_entry = __s390_enable_skey }; in s390_enable_skey() local 1277 walk.mm = mm; in s390_enable_skey() 1278 walk_page_range(0, TASK_SIZE, &walk); in s390_enable_skey() 1290 unsigned long next, struct mm_walk *walk) in __s390_reset_cmma() argument 1302 struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; in s390_reset_cmma() local 1305 walk.mm = mm; in s390_reset_cmma() 1306 walk_page_range(0, TASK_SIZE, &walk); in s390_reset_cmma()
|
/linux-4.1.27/drivers/crypto/ux500/cryp/ |
D | cryp_core.c | 883 struct ablkcipher_walk walk; in ablk_crypt() local 898 ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); in ablk_crypt() 899 ret = ablkcipher_walk_phys(areq, &walk); in ablk_crypt() 907 while ((nbytes = walk.nbytes) > 0) { in ablk_crypt() 908 ctx->iv = walk.iv; in ablk_crypt() 909 src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); in ablk_crypt() 912 dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); in ablk_crypt() 922 ret = ablkcipher_walk_done(areq, &walk, nbytes); in ablk_crypt() 926 ablkcipher_walk_complete(&walk); in ablk_crypt()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | subpage-prot.c | 135 unsigned long end, struct mm_walk *walk) in subpage_walk_pmd_entry() argument 137 struct vm_area_struct *vma = walk->vma; in subpage_walk_pmd_entry()
|
/linux-4.1.27/tools/testing/selftests/net/ |
D | psock_tpacket.c | 83 void (*walk)(int sock, struct ring *ring); member 600 ring->walk = walk_v1_v2; in __v1_v2_fill() 620 ring->walk = walk_v3; in __v3_fill() 704 ring->walk(sock, ring); in walk_ring()
|
/linux-4.1.27/include/net/ |
D | xfrm.h | 511 struct xfrm_policy_walk_entry walk; member 540 struct xfrm_policy_walk_entry walk; member 1425 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 1427 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 1429 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net); 1576 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type); 1577 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1580 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
|
D | act_api.h | 97 int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); member
|
D | sch_generic.h | 167 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); member 227 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); member
|
/linux-4.1.27/include/linux/ |
D | mm.h | 1134 unsigned long next, struct mm_walk *walk); 1136 unsigned long next, struct mm_walk *walk); 1138 struct mm_walk *walk); 1141 struct mm_walk *walk); 1143 struct mm_walk *walk); 1150 struct mm_walk *walk); 1151 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
|
/linux-4.1.27/scripts/gdb/linux/ |
D | symbols.py | 79 for root, dirs, files in os.walk(path):
|
/linux-4.1.27/Documentation/networking/ |
D | fib_trie.txt | 76 it is run to optimize and reorganize. It will walk the trie upwards 103 slower than the corresponding fib_hash function, as we have to walk the
|
D | tcp.txt | 102 On a timer we walk the retransmit list to send any retransmits, update the
|
D | packet_mmap.txt | 1038 frames to be updated resp. the frame handed over to the application, iv) walk
|
D | filter.txt | 422 will walk through the pcap file continuing from the current packet and
|
/linux-4.1.27/fs/jffs2/ |
D | README.Locking | 88 erase_completion_lock. So you can walk the list only while holding the 89 erase_completion_lock, and can drop the lock temporarily mid-walk as
|
/linux-4.1.27/net/netfilter/ |
D | nft_rbtree.c | 260 .walk = nft_rbtree_walk,
|
D | nft_hash.c | 374 .walk = nft_hash_walk,
|
D | nf_tables_api.c | 2879 set->ops->walk(ctx, set, &iter); in nf_tables_bind_set() 3120 set->ops->walk(&ctx, set, &args.iter); in nf_tables_dump_set() 4174 set->ops->walk(ctx, set, &iter); in nf_tables_check_loops()
|
/linux-4.1.27/drivers/crypto/ux500/hash/ |
D | hash_core.c | 1087 struct crypto_hash_walk walk; in hash_hw_update() local 1088 int msg_length = crypto_hash_walk_first(req, &walk); in hash_hw_update() 1111 data_buffer = walk.data; in hash_hw_update() 1121 msg_length = crypto_hash_walk_done(&walk, 0); in hash_hw_update()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | efi.c | 305 walk (efi_freemem_callback_t callback, void *arg, u64 attr) in walk() function 329 walk(callback, arg, EFI_MEMORY_WB); in efi_memmap_walk() 339 walk(callback, arg, EFI_MEMORY_UC); in efi_memmap_walk_uc()
|
/linux-4.1.27/Documentation/ |
D | robust-futex-ABI.txt | 87 the kernel will walk this list, mark any such locks with a bit 118 list 'head' is, and to walk the list on thread exit, handling locks
|
D | BUG-HUNTING | 122 And then walk through that file, one routine at a time and
|
D | clk.txt | 116 Let's walk through enabling this clk from driver code:
|
D | sysfs-rules.txt | 160 by its subsystem value. You need to walk up the chain until you find
|
/linux-4.1.27/Documentation/virtual/kvm/ |
D | mmu.txt | 285 - walk shadow page table 293 - if needed, walk the guest page tables to determine the guest translation 299 - walk the shadow page table to find the spte for the translation, 311 - walk the shadow page hierarchy and drop affected translations
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | sungem.c | 663 int walk = entry; in gem_tx() local 668 walk = NEXT_TX(walk); in gem_tx() 669 if (walk == limit) in gem_tx() 671 if (walk == last) in gem_tx()
|
/linux-4.1.27/Documentation/cgroups/ |
D | freezer-subsystem.txt | 13 walk /proc or invoke a kernel interface to gather information about the
|
/linux-4.1.27/Documentation/block/ |
D | biovecs.txt | 53 it had to walk two different bios at the same time, keeping both bi_idx and
|
/linux-4.1.27/Documentation/locking/ |
D | rt-mutex-design.txt | 401 High level overview of the PI chain walk 404 The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain. 449 walk is only needed when a new top pi waiter is made to a task. 555 Taking of a mutex (The walk through) 558 OK, now let's take a look at the detailed walk through of what happens when
|
/linux-4.1.27/net/key/ |
D | af_key.c | 1867 struct xfrm_policy_walk walk; in gen_reqid() local 1877 xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); in gen_reqid() 1878 rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); in gen_reqid() 1879 xfrm_policy_walk_done(&walk, net); in gen_reqid() 2298 xp->walk.dead = 1; in pfkey_spdadd() 3270 xp->walk.dead = 1; in pfkey_compile_policy()
|
/linux-4.1.27/include/net/netfilter/ |
D | nf_tables.h | 259 void (*walk)(const struct nft_ctx *ctx, member
|
/linux-4.1.27/Documentation/timers/ |
D | hrtimers.txt | 97 queued timers, without having to walk the rbtree.
|
/linux-4.1.27/Documentation/vm/ |
D | unevictable-lru.txt | 507 for the munlock case, calls __munlock_vma_pages_range() to walk the page table 621 processing. Again, these functions walk the respective reverse maps looking 639 Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's
|
D | transhuge.txt | 347 pagetable walk). If the second pmd_trans_huge returns false, you
|
/linux-4.1.27/fs/btrfs/ |
D | raid56.c | 664 int walk = 0; in lock_stripe_add() local 668 walk++; in lock_stripe_add()
|
/linux-4.1.27/scripts/ |
D | analyze_suspend.py | 2833 for dirname, dirnames, filenames in os.walk('/sys/devices'): 2903 for dirname, dirnames, filenames in os.walk('/sys/devices'): 3330 for dirname, dirnames, filenames in os.walk(subdir):
|
/linux-4.1.27/net/ipv4/ |
D | tcp_input.c | 1790 goto walk; in tcp_sacktag_write_queue() 1807 walk: in tcp_sacktag_write_queue()
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
D | aic79xx.seq | 556 * manually walk the list counting MAXCMDCNT elements 722 * Brute force walk.
|
/linux-4.1.27/arch/arm/ |
D | Kconfig | 1161 r3p*) erratum. A speculative memory access may cause a page table walk
|
/linux-4.1.27/Documentation/scsi/ |
D | ChangeLog.1992-1997 | 829 * scsi.c: When incrementing usage count, walk block linked list
|
/linux-4.1.27/Documentation/virtual/uml/ |
D | UserModeLinux-HOWTO.txt | 2757 is at module_list. If it's not, walk down the next links, looking at
|