/linux-4.1.27/fs/minix/ |
D | itree_common.c | 151 Indirect *partial; in get_block() local 159 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 162 if (!partial) { in get_block() 166 partial = chain+depth-1; /* the whole chain */ in get_block() 173 while (partial > chain) { in get_block() 174 brelse(partial->bh); in get_block() 175 partial--; in get_block() 189 left = (chain + depth) - partial; in get_block() 190 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block() 194 if (splice_branch(inode, chain, partial, left) < 0) in get_block() [all …]
|
/linux-4.1.27/fs/ext4/ |
D | indirect.c | 243 Indirect *partial) in ext4_find_goal() argument 251 goal = ext4_find_near(inode, partial); in ext4_find_goal() 521 Indirect *partial; in ext4_ind_map_blocks() local 537 partial = ext4_get_branch(inode, depth, offsets, chain, &err); in ext4_ind_map_blocks() 540 if (!partial) { in ext4_ind_map_blocks() 580 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); in ext4_ind_map_blocks() 583 indirect_blks = (chain + depth) - partial - 1; in ext4_ind_map_blocks() 589 ar.len = ext4_blks_to_allocate(partial, indirect_blks, in ext4_ind_map_blocks() 596 offsets + (partial - chain), partial); in ext4_ind_map_blocks() 606 err = ext4_splice_branch(handle, &ar, partial, indirect_blks); in ext4_ind_map_blocks() [all …]
|
D | move_extent.c | 186 int i, err, nr = 0, partial = 0; in mext_page_mkuptodate() local 204 partial = 1; in mext_page_mkuptodate() 237 if (!partial) in mext_page_mkuptodate()
|
D | inode.c | 833 int *partial, in ext4_walk_page_buffers() argument 849 if (partial && !buffer_uptodate(bh)) in ext4_walk_page_buffers() 850 *partial = 1; in ext4_walk_page_buffers() 1220 int partial = 0; in ext4_journalled_write_end() local 1241 to, &partial, write_end_fn); in ext4_journalled_write_end() 1242 if (!partial) in ext4_journalled_write_end()
|
D | ext4.h | 2288 int *partial,
|
/linux-4.1.27/include/crypto/ |
D | sha1_base.h | 40 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_update() local 44 if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { in sha1_base_do_update() 47 if (partial) { in sha1_base_do_update() 48 int p = SHA1_BLOCK_SIZE - partial; in sha1_base_do_update() 50 memcpy(sctx->buffer + partial, data, p); in sha1_base_do_update() 64 partial = 0; in sha1_base_do_update() 67 memcpy(sctx->buffer + partial, data, len); in sha1_base_do_update() 78 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_finalize() local 80 sctx->buffer[partial++] = 0x80; in sha1_base_do_finalize() 81 if (partial > bit_offset) { in sha1_base_do_finalize() [all …]
|
D | sha256_base.h | 61 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in sha256_base_do_update() local 65 if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { in sha256_base_do_update() 68 if (partial) { in sha256_base_do_update() 69 int p = SHA256_BLOCK_SIZE - partial; in sha256_base_do_update() 71 memcpy(sctx->buf + partial, data, p); in sha256_base_do_update() 85 partial = 0; in sha256_base_do_update() 88 memcpy(sctx->buf + partial, data, len); in sha256_base_do_update() 99 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in sha256_base_do_finalize() local 101 sctx->buf[partial++] = 0x80; in sha256_base_do_finalize() 102 if (partial > bit_offset) { in sha256_base_do_finalize() [all …]
|
D | sha512_base.h | 61 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_update() local 67 if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { in sha512_base_do_update() 70 if (partial) { in sha512_base_do_update() 71 int p = SHA512_BLOCK_SIZE - partial; in sha512_base_do_update() 73 memcpy(sctx->buf + partial, data, p); in sha512_base_do_update() 87 partial = 0; in sha512_base_do_update() 90 memcpy(sctx->buf + partial, data, len); in sha512_base_do_update() 101 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_finalize() local 103 sctx->buf[partial++] = 0x80; in sha512_base_do_finalize() 104 if (partial > bit_offset) { in sha512_base_do_finalize() [all …]
|
D | vmac.h | 59 u8 partial[VMAC_NHBYTES]; /* partial block */ member
|
D | public_key.h | 105 bool partial);
|
/linux-4.1.27/fs/sysv/ |
D | itree.c | 208 Indirect *partial; in get_block() local 217 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 221 if (!partial) { in get_block() 226 partial = chain+depth-1; /* the whole chain */ in get_block() 233 while (partial > chain) { in get_block() 234 brelse(partial->bh); in get_block() 235 partial--; in get_block() 249 left = (chain + depth) - partial; in get_block() 250 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block() 254 if (splice_branch(inode, chain, partial, left) < 0) in get_block() [all …]
|
/linux-4.1.27/arch/arm64/crypto/ |
D | ghash-ce-glue.c | 51 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; in ghash_update() local 55 if ((partial + len) >= GHASH_BLOCK_SIZE) { in ghash_update() 59 if (partial) { in ghash_update() 60 int p = GHASH_BLOCK_SIZE - partial; in ghash_update() 62 memcpy(ctx->buf + partial, src, p); in ghash_update() 72 partial ? ctx->buf : NULL); in ghash_update() 75 partial = 0; in ghash_update() 78 memcpy(ctx->buf + partial, src, len); in ghash_update() 85 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; in ghash_final() local 87 if (partial) { in ghash_final() [all …]
|
/linux-4.1.27/drivers/crypto/ |
D | padlock-sha.c | 309 unsigned int partial, done; in padlock_sha1_update_nano() local 317 partial = sctx->count & 0x3f; in padlock_sha1_update_nano() 323 if ((partial + len) >= SHA1_BLOCK_SIZE) { in padlock_sha1_update_nano() 326 if (partial) { in padlock_sha1_update_nano() 327 done = -partial; in padlock_sha1_update_nano() 328 memcpy(sctx->buffer + partial, data, in padlock_sha1_update_nano() 351 partial = 0; in padlock_sha1_update_nano() 354 memcpy(sctx->buffer + partial, src, len - done); in padlock_sha1_update_nano() 362 unsigned int partial, padlen; in padlock_sha1_final_nano() local 369 partial = state->count & 0x3f; in padlock_sha1_final_nano() [all …]
|
/linux-4.1.27/arch/powerpc/crypto/ |
D | sha1.c | 46 unsigned int partial, done; in sha1_update() local 49 partial = sctx->count & 0x3f; in sha1_update() 54 if ((partial + len) > 63) { in sha1_update() 57 if (partial) { in sha1_update() 58 done = -partial; in sha1_update() 59 memcpy(sctx->buffer + partial, data, done + 64); in sha1_update() 70 partial = 0; in sha1_update() 72 memcpy(sctx->buffer + partial, src, len - done); in sha1_update()
|
/linux-4.1.27/arch/sparc/crypto/ |
D | md5_glue.c | 46 unsigned int len, unsigned int partial) in __md5_sparc64_update() argument 51 if (partial) { in __md5_sparc64_update() 52 done = MD5_HMAC_BLOCK_SIZE - partial; in __md5_sparc64_update() 53 memcpy((u8 *)sctx->block + partial, data, done); in __md5_sparc64_update() 70 unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; in md5_sparc64_update() local 73 if (partial + len < MD5_HMAC_BLOCK_SIZE) { in md5_sparc64_update() 75 memcpy((u8 *)sctx->block + partial, data, len); in md5_sparc64_update() 77 __md5_sparc64_update(sctx, data, len, partial); in md5_sparc64_update()
|
D | sha1_glue.c | 41 unsigned int len, unsigned int partial) in __sha1_sparc64_update() argument 46 if (partial) { in __sha1_sparc64_update() 47 done = SHA1_BLOCK_SIZE - partial; in __sha1_sparc64_update() 48 memcpy(sctx->buffer + partial, data, done); in __sha1_sparc64_update() 65 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_sparc64_update() local 68 if (partial + len < SHA1_BLOCK_SIZE) { in sha1_sparc64_update() 70 memcpy(sctx->buffer + partial, data, len); in sha1_sparc64_update() 72 __sha1_sparc64_update(sctx, data, len, partial); in sha1_sparc64_update()
|
D | sha256_glue.c | 62 unsigned int len, unsigned int partial) in __sha256_sparc64_update() argument 67 if (partial) { in __sha256_sparc64_update() 68 done = SHA256_BLOCK_SIZE - partial; in __sha256_sparc64_update() 69 memcpy(sctx->buf + partial, data, done); in __sha256_sparc64_update() 86 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in sha256_sparc64_update() local 89 if (partial + len < SHA256_BLOCK_SIZE) { in sha256_sparc64_update() 91 memcpy(sctx->buf + partial, data, len); in sha256_sparc64_update() 93 __sha256_sparc64_update(sctx, data, len, partial); in sha256_sparc64_update()
|
D | sha512_glue.c | 61 unsigned int len, unsigned int partial) in __sha512_sparc64_update() argument 67 if (partial) { in __sha512_sparc64_update() 68 done = SHA512_BLOCK_SIZE - partial; in __sha512_sparc64_update() 69 memcpy(sctx->buf + partial, data, done); in __sha512_sparc64_update() 86 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_sparc64_update() local 89 if (partial + len < SHA512_BLOCK_SIZE) { in sha512_sparc64_update() 92 memcpy(sctx->buf + partial, data, len); in sha512_sparc64_update() 94 __sha512_sparc64_update(sctx, data, len, partial); in sha512_sparc64_update()
|
/linux-4.1.27/fs/ext2/ |
D | inode.c | 327 Indirect *partial) in ext2_find_goal() argument 342 return ext2_find_near(inode, partial); in ext2_find_goal() 625 Indirect *partial; in ext2_get_blocks() local 641 partial = ext2_get_branch(inode, depth, offsets, chain, &err); in ext2_get_blocks() 643 if (!partial) { in ext2_get_blocks() 689 if (err == -EAGAIN || !verify_chain(chain, partial)) { in ext2_get_blocks() 690 while (partial > chain) { in ext2_get_blocks() 691 brelse(partial->bh); in ext2_get_blocks() 692 partial--; in ext2_get_blocks() 694 partial = ext2_get_branch(inode, depth, offsets, chain, &err); in ext2_get_blocks() [all …]
|
/linux-4.1.27/arch/arm/crypto/ |
D | ghash-ce-glue.c | 58 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; in ghash_update() local 62 if ((partial + len) >= GHASH_BLOCK_SIZE) { in ghash_update() 66 if (partial) { in ghash_update() 67 int p = GHASH_BLOCK_SIZE - partial; in ghash_update() 69 memcpy(ctx->buf + partial, src, p); in ghash_update() 79 partial ? ctx->buf : NULL); in ghash_update() 82 partial = 0; in ghash_update() 85 memcpy(ctx->buf + partial, src, len); in ghash_update() 92 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; in ghash_final() local 94 if (partial) { in ghash_final() [all …]
|
D | sha512_neon_glue.c | 97 unsigned int len, unsigned int partial) in __sha512_neon_update() argument 106 if (partial) { in __sha512_neon_update() 107 done = SHA512_BLOCK_SIZE - partial; in __sha512_neon_update() 108 memcpy(sctx->buf + partial, data, done); in __sha512_neon_update() 130 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_neon_update() local 134 if (partial + len < SHA512_BLOCK_SIZE) { in sha512_neon_update() 138 memcpy(sctx->buf + partial, data, len); in sha512_neon_update() 147 res = __sha512_neon_update(desc, data, len, partial); in sha512_neon_update()
|
/linux-4.1.27/arch/mips/cavium-octeon/crypto/ |
D | octeon-sha1.c | 95 unsigned int partial; in __octeon_sha1_update() local 99 partial = sctx->count % SHA1_BLOCK_SIZE; in __octeon_sha1_update() 104 if ((partial + len) >= SHA1_BLOCK_SIZE) { in __octeon_sha1_update() 105 if (partial) { in __octeon_sha1_update() 106 done = -partial; in __octeon_sha1_update() 107 memcpy(sctx->buffer + partial, data, in __octeon_sha1_update() 118 partial = 0; in __octeon_sha1_update() 120 memcpy(sctx->buffer + partial, src, len - done); in __octeon_sha1_update()
|
D | octeon-sha256.c | 107 unsigned int partial; in __octeon_sha256_update() local 111 partial = sctx->count % SHA256_BLOCK_SIZE; in __octeon_sha256_update() 116 if ((partial + len) >= SHA256_BLOCK_SIZE) { in __octeon_sha256_update() 117 if (partial) { in __octeon_sha256_update() 118 done = -partial; in __octeon_sha256_update() 119 memcpy(sctx->buf + partial, data, in __octeon_sha256_update() 130 partial = 0; in __octeon_sha256_update() 132 memcpy(sctx->buf + partial, src, len - done); in __octeon_sha256_update()
|
/linux-4.1.27/drivers/usb/storage/ |
D | freecom.c | 227 unsigned int partial; in freecom_transport() local 262 FCM_STATUS_PACKET_LENGTH, &partial); in freecom_transport() 263 usb_stor_dbg(us, "foo Status result %d %u\n", result, partial); in freecom_transport() 267 US_DEBUG(pdump(us, (void *)fst, partial)); in freecom_transport() 302 FCM_STATUS_PACKET_LENGTH, &partial); in freecom_transport() 304 usb_stor_dbg(us, "bar Status result %d %u\n", result, partial); in freecom_transport() 308 US_DEBUG(pdump(us, (void *)fst, partial)); in freecom_transport() 311 if (partial != 4) in freecom_transport() 364 FCM_PACKET_LENGTH, &partial); in freecom_transport() 365 US_DEBUG(pdump(us, (void *)fst, partial)); in freecom_transport() [all …]
|
D | initializers.c | 67 unsigned int partial; in usb_stor_ucr61s2b_init() local 81 US_BULK_CB_WRAP_LEN, &partial); in usb_stor_ucr61s2b_init() 87 US_BULK_CS_WRAP_LEN, &partial); in usb_stor_ucr61s2b_init()
|
D | karma.c | 104 int result, partial; in rio_karma_send_command() local 118 us->iobuf, RIO_SEND_LEN, &partial); in rio_karma_send_command() 123 data->recv, RIO_RECV_LEN, &partial); in rio_karma_send_command()
|
D | transport.c | 265 unsigned int length, int result, unsigned int partial) in interpret_urb_result() argument 268 result, partial, length); in interpret_urb_result() 273 if (partial != length) { in interpret_urb_result() 468 unsigned int partial; in usb_stor_bulk_srb() local 471 &partial); in usb_stor_bulk_srb() 473 scsi_set_resid(srb, scsi_bufflen(srb) - partial); in usb_stor_bulk_srb() 491 unsigned int partial; in usb_stor_bulk_transfer_sg() local 498 length_left, &partial); in usb_stor_bulk_transfer_sg() 499 length_left -= partial; in usb_stor_bulk_transfer_sg() 503 length_left, &partial); in usb_stor_bulk_transfer_sg() [all …]
|
D | ene_ub6250.c | 504 unsigned int cswlen = 0, partial = 0; in ene_send_scsi_cmd() local 529 transfer_length, 0, &partial); in ene_send_scsi_cmd()
|
/linux-4.1.27/drivers/usb/misc/ |
D | rio500.c | 279 unsigned int partial; in write_rio() local 320 obuf, thistime, &partial, 5000); in write_rio() 324 result, thistime, partial); in write_rio() 335 } else if (!result && partial) { in write_rio() 336 obuf += partial; in write_rio() 337 thistime -= partial; in write_rio() 367 unsigned int partial; in read_rio() local 401 ibuf, this_read, &partial, in read_rio() 406 result, this_read, partial); in read_rio() 408 if (partial) { in read_rio() [all …]
|
/linux-4.1.27/fs/ext3/ |
D | inode.c | 505 Indirect *partial) in ext3_find_goal() argument 520 return ext3_find_near(inode, partial); in ext3_find_goal() 865 Indirect *partial; in ext3_get_blocks_handle() local 882 partial = ext3_get_branch(inode, depth, offsets, chain, &err); in ext3_get_blocks_handle() 885 if (!partial) { in ext3_get_blocks_handle() 937 if (err == -EAGAIN || !verify_chain(chain, partial)) { in ext3_get_blocks_handle() 938 while (partial > chain) { in ext3_get_blocks_handle() 939 brelse(partial->bh); in ext3_get_blocks_handle() 940 partial--; in ext3_get_blocks_handle() 942 partial = ext3_get_branch(inode, depth, offsets, chain, &err); in ext3_get_blocks_handle() [all …]
|
/linux-4.1.27/fs/adfs/ |
D | dir_fplus.c | 141 unsigned int buffer, partial, remainder; in dir_memcpy() local 146 partial = sb->s_blocksize - offset; in dir_memcpy() 148 if (partial >= len) in dir_memcpy() 153 remainder = len - partial; in dir_memcpy() 157 partial); in dir_memcpy() 159 memcpy(c + partial, in dir_memcpy()
|
/linux-4.1.27/fs/ |
D | splice.c | 210 buf->offset = spd->partial[page_nr].offset; in splice_to_pipe() 211 buf->len = spd->partial[page_nr].len; in splice_to_pipe() 212 buf->private = spd->partial[page_nr].private; in splice_to_pipe() 286 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); in splice_grow_spd() 288 if (spd->pages && spd->partial) in splice_grow_spd() 292 kfree(spd->partial); in splice_grow_spd() 302 kfree(spd->partial); in splice_shrink_spd() 313 struct partial_page partial[PIPE_DEF_BUFFERS]; in __generic_file_splice_read() local 320 .partial = partial, in __generic_file_splice_read() 485 spd.partial[page_nr].offset = loff; in __generic_file_splice_read() [all …]
|
D | Kconfig.binfmt | 47 bool "Write ELF core dumps with partial segments"
|
D | buffer.c | 1968 int partial = 0; in __block_commit_write() local 1980 partial = 1; in __block_commit_write() 1997 if (!partial) in __block_commit_write()
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-kernel-slab | 95 allocation from a partial or new slab. It can be written to 178 The deactivate_to_head file shows how many times a partial cpu 179 slab was deactivated and added to the head of its node's partial 189 The deactivate_to_tail file shows how many times a partial cpu 190 slab was deactivated and added to the tail of its node's partial 211 partial list. It can be written to clear the current count. 254 its node's partial list. It can be written to clear the current 276 using the slow path (i.e. to a full or partial slab). It can 296 remain on a node's partial list to avoid the overhead of 325 objects are on partial slabs and from which nodes they are [all …]
|
D | sysfs-gpio | 11 Kernel code may export it for complete or partial access.
|
D | dev-kmsg | 37 there are never partial messages received by read().
|
D | sysfs-block-bcache | 29 counted per bio. A partial cache hit counts as a miss.
|
D | sysfs-fs-nilfs2 | 134 Show offset of next partial segment in the current
|
/linux-4.1.27/tools/vm/ |
D | slabinfo.c | 35 unsigned long partial, objects, slabs, objects_partial, objects_total; member 524 s->slab_size, s->slabs - s->partial - s->cpu_slabs, in report() 527 page_size << s->order, s->partial, onoff(s->poison), in report() 566 s->partial, s->cpu_slabs); in slabcache() 610 s->slabs ? (s->partial * 100) / s->slabs : 100, in slabcache() 817 percentage_partial_slabs = s->partial * 100 / s->slabs; in totals() 829 if (s->partial < min_partial) in totals() 830 min_partial = s->partial; in totals() 854 if (s->partial > max_partial) in totals() 855 max_partial = s->partial; in totals() [all …]
|
/linux-4.1.27/fs/reiserfs/ |
D | file.c | 180 int partial = 0; in reiserfs_commit_page() local 210 partial = 1; in reiserfs_commit_page() 240 if (!partial) in reiserfs_commit_page()
|
D | inode.c | 2532 int partial = 0; in reiserfs_write_full_page() local 2689 partial = 1; in reiserfs_write_full_page() 2694 if (!partial) in reiserfs_write_full_page()
|
/linux-4.1.27/arch/x86/purgatory/ |
D | sha256.c | 228 unsigned int partial, done; in sha256_update() local 231 partial = sctx->count & 0x3f; in sha256_update() 236 if ((partial + len) > 63) { in sha256_update() 237 if (partial) { in sha256_update() 238 done = -partial; in sha256_update() 239 memcpy(sctx->buf + partial, data, done + 64); in sha256_update() 249 partial = 0; in sha256_update() 251 memcpy(sctx->buf + partial, src, len - done); in sha256_update()
|
/linux-4.1.27/net/netfilter/ |
D | xt_dccp.c | 59 goto partial; in dccp_find_option() 77 partial: in dccp_find_option()
|
/linux-4.1.27/Documentation/ABI/stable/ |
D | sysfs-driver-ib_srp | 42 * allow_ext_sg, whether ib_srp is allowed to include a partial 44 list. If a partial memory descriptor list has been included 50 implementations support partial memory descriptor lists the 55 only safe with partial memory descriptor list support enabled 87 Description: Whether ib_srp is allowed to include a partial memory
|
/linux-4.1.27/fs/ocfs2/ |
D | move_extents.c | 46 int partial; member 230 int ret, credits = 0, extra_blocks = 0, partial = context->partial; in ocfs2_defrag_extent() local 313 if (!partial) { in ocfs2_defrag_extent() 328 if (partial && (new_len != *len)) in ocfs2_defrag_extent() 1043 context->partial = 1; in ocfs2_ioctl_move_extents()
|
D | aops.h | 42 int *partial,
|
D | aops.c | 429 int *partial, in walk_page_buffers() argument 446 if (partial && !buffer_uptodate(bh)) in walk_page_buffers() 447 *partial = 1; in walk_page_buffers()
|
D | refcounttree.c | 2929 int ret = 0, partial; in ocfs2_duplicate_clusters_by_page() local 2990 from, to, &partial, in ocfs2_duplicate_clusters_by_page()
|
D | alloc.c | 6597 int ret, partial = 0; in ocfs2_map_and_dirty_page() local 6612 from, to, &partial, in ocfs2_map_and_dirty_page() 6622 if (!partial) in ocfs2_map_and_dirty_page()
|
/linux-4.1.27/include/linux/ |
D | splice.h | 55 struct partial_page *partial; /* pages[] may not be contig */ member
|
D | slub_def.h | 44 struct page *partial; /* Partially allocated frozen slabs */ member
|
D | shdma-base.h | 53 size_t partial; member
|
/linux-4.1.27/Documentation/filesystems/ |
D | ncpfs.txt | 9 Related products are linware and mars_nwe, which will give Linux partial
|
D | directory-locking | 51 First of all, at any moment we have a partial ordering of the
|
D | logfs.txt | 34 only a partial segment has been written, the segment number, the
|
D | sysfs.txt | 188 This allows userspace to do partial reads and forward seeks
|
D | relay.txt | 114 memory space. Note that you can't do a partial mmap - you
|
D | coda.txt | 205 manager in Windows 95. The VFS is responsible for partial processing
|
/linux-4.1.27/mm/ |
D | slub.c | 1538 list_add_tail(&page->lru, &n->partial); in __add_partial() 1540 list_add(&page->lru, &n->partial); in __add_partial() 1634 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node() 1973 while ((page = c->partial)) { in unfreeze_partials() 1977 c->partial = page->next; in unfreeze_partials() 2047 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial() 2075 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial() 2127 return c->page || c->partial; in has_cpu_slab() 2169 list_for_each_entry(page, &n->partial, lru) in count_partial() 2383 if (c->partial) { in __slab_alloc() [all …]
|
D | slab.h | 350 struct list_head partial; member
|
D | shmem.c | 1634 struct partial_page partial[PIPE_DEF_BUFFERS]; in shmem_file_splice_read() local 1641 .partial = partial, in shmem_file_splice_read() 1717 spd.partial[page_nr].offset = loff; in shmem_file_splice_read() 1718 spd.partial[page_nr].len = this_len; in shmem_file_splice_read()
|
/linux-4.1.27/Documentation/video4linux/ |
D | CARDLIST.ivtv | 15 15 -> GOTVIEW PCI DVD (partial support only) [12ab:0600]
|
D | omap3isp.txt | 60 partial frames.
|
/linux-4.1.27/Documentation/i2c/ |
D | i2c-protocol | 67 point. For example, setting I2C_M_NOSTART on the second partial message 70 If you set the I2C_M_NOSTART variable for the first partial message,
|
D | i2c-stub | 27 SMBus block operations. Writes can be partial. Block read commands always
|
/linux-4.1.27/kernel/ |
D | relay.c | 1228 struct partial_page partial[PIPE_DEF_BUFFERS]; in subbuf_splice_actor() local 1233 .partial = partial, in subbuf_splice_actor() 1267 spd.partial[spd.nr_pages].offset = poff; in subbuf_splice_actor() 1274 spd.partial[spd.nr_pages].len = this_len; in subbuf_splice_actor() 1275 spd.partial[spd.nr_pages].private = private; in subbuf_splice_actor()
|
/linux-4.1.27/crypto/asymmetric_keys/ |
D | x509_public_key.c | 77 bool partial) in x509_request_asymmetric_key() argument 87 if (partial) { in x509_request_asymmetric_key()
|
/linux-4.1.27/drivers/crypto/qat/qat_common/ |
D | icp_qat_fw_la.h | 125 ciph_iv, ciphcfg, partial) \ argument 144 ((partial & QAT_LA_PARTIAL_MASK) << \
|
/linux-4.1.27/fs/ntfs/ |
D | Kconfig | 7 Saying Y or M here enables read support. There is partial, but 55 This enables the partial, but safe, write support in the NTFS driver.
|
D | file.c | 1421 bool partial; in ntfs_commit_pages_after_non_resident_write() local 1426 partial = false; in ntfs_commit_pages_after_non_resident_write() 1433 partial = true; in ntfs_commit_pages_after_non_resident_write() 1443 if (!partial && !PageUptodate(page)) in ntfs_commit_pages_after_non_resident_write()
|
/linux-4.1.27/crypto/ |
D | vmac.c | 575 memcpy(ctx->partial + ctx->partial_size, p, min); in vmac_update() 581 vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); in vmac_update() 588 memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), in vmac_update() 611 memset(ctx->partial + ctx->partial_size, 0, in vmac_final() 614 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); in vmac_final()
|
/linux-4.1.27/arch/c6x/lib/ |
D | mpyll.S | 23 ;; First compute partial results using 32-bit parts of x and y:
|
/linux-4.1.27/Documentation/sound/alsa/soc/ |
D | codec.txt | 149 SNDRV_CTL_POWER_D1: /* partial On */ 150 SNDRV_CTL_POWER_D2: /* partial On */
|
/linux-4.1.27/Documentation/sh/ |
D | register-banks.txt | 7 The SH-3 and SH-4 CPU families traditionally include a single partial register
|
/linux-4.1.27/arch/alpha/lib/ |
D | ev6-memset.S | 80 ldq_u $4,0($16) # L : Fetch first partial 257 ldq_u $4,0($16) # L : Fetch first partial 444 ldq_u $4,0($16) # L : Fetch first partial
|
D | memchr.S | 121 # last quad may or may not be partial).
|
D | stxncpy.S | 174 cmpbge zero, t2, t8 # e0 : find nulls in second partial 230 or t0, t1, t0 # e0 : first (partial) source word complete
|
D | ev6-stxncpy.S | 216 cmpbge zero, t2, t8 # E : find nulls in second partial 274 or t0, t1, t0 # E : first (partial) source word complete
|
D | ev6-memchr.S | 139 # last quad may or may not be partial).
|
D | stxcpy.S | 199 or t0, t1, t1 # e1 : first (partial) source word complete
|
D | ev6-stxcpy.S | 227 or t0, t1, t1 # E : first (partial) source word complete (stall)
|
/linux-4.1.27/include/trace/events/ |
D | ext4.h | 2068 __field( long long, partial ) 2079 __entry->partial = partial_cluster; 2094 (long long) __entry->partial) 2107 __field( long long, partial ) 2117 __entry->partial = partial_cluster; 2132 (long long) __entry->partial) 2190 int depth, long long partial, __le16 eh_entries), 2192 TP_ARGS(inode, start, end, depth, partial, eh_entries), 2200 __field( long long, partial ) 2210 __entry->partial = partial; [all …]
|
/linux-4.1.27/drivers/dma/ |
D | qcom_bam_dma.c | 923 u32 partial = MAX_DESCRIPTORS - bchan->tail; in bam_start_dma() local 926 partial * sizeof(struct bam_desc_hw)); in bam_start_dma() 927 memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * in bam_start_dma()
|
/linux-4.1.27/arch/mips/kernel/ |
D | entry.S | 99 restore_partial: # restore partial frame
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_io.c | 378 int partial; in osc_trunc_check() local 383 partial = cl_offset(clob, start) < size; in osc_trunc_check() 388 cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, in osc_trunc_check()
|
D | osc_cache.c | 946 bool partial) in osc_extent_truncate() argument 987 (sub->cp_index == trunc_index && partial)) { in osc_extent_truncate() 1015 EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial, in osc_extent_truncate() 1017 ext, "trunc_index %lu, partial %d\n", trunc_index, partial); in osc_extent_truncate() 2632 bool partial; in osc_cache_truncate_start() local 2636 partial = size > cl_offset(osc2cl(obj), index); in osc_cache_truncate_start() 2696 rc = osc_extent_truncate(ext, index, partial); in osc_cache_truncate_start() 2710 "trunc index = %lu/%d.\n", index, partial); in osc_cache_truncate_start() 2713 partial = false; in osc_cache_truncate_start()
|
/linux-4.1.27/Documentation/networking/ |
D | dccp.txt | 108 partial checksum coverage (RFC 4340, sec. 9.2). The default is that checksums 115 values between 1..15 indicate partial coverage. 118 of 0 means that all packets with a partial coverage will be discarded.
|
D | udplite.txt | 52 using partial checksum coverage and so emulate UDP mode (full coverage). 54 To make use of the partial checksum coverage facilities requires setting a 73 required to enable traffic with partial checksum coverage. Its function is
|
D | bonding.txt | 217 802.3ad aggregations when partial failure of the active aggregator 942 or partial support for bonding, then provide information on enabling
|
/linux-4.1.27/sound/usb/line6/ |
D | driver.c | 95 int partial; in line6_send_raw_message() local 104 &partial, LINE6_TIMEOUT * HZ); in line6_send_raw_message()
|
/linux-4.1.27/Documentation/sound/alsa/ |
D | OSS-Emulation.txt | 143 partial-frag write also partial fragments (affects playback only) 177 The partial-frag and no-silence commands have been added recently.
|
D | compress_offload.txt | 174 - partial drain
|
/linux-4.1.27/drivers/scsi/qla2xxx/ |
D | qla_iocb.c | 878 uint32_t *partial) in qla24xx_get_one_block_sg() argument 899 *partial = 0; in qla24xx_get_one_block_sg() 903 *partial = 1; in qla24xx_get_one_block_sg() 931 uint32_t partial; in qla24xx_walk_and_build_sglist_no_difb() local 960 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { in qla24xx_walk_and_build_sglist_no_difb() 1015 if (partial == 0) { in qla24xx_walk_and_build_sglist_no_difb() 1026 partial = 1; /* So as to not re-enter this block */ in qla24xx_walk_and_build_sglist_no_difb() 1668 uint32_t partial; in qla24xx_dif_start_scsi() local 1677 cmd->device->sector_size, &sgx, &partial)) in qla24xx_dif_start_scsi()
|
/linux-4.1.27/drivers/net/usb/ |
D | rndis_host.c | 109 int partial; in rndis_command() local 145 ¬ification, sizeof(notification), &partial, in rndis_command()
|
/linux-4.1.27/fs/logfs/ |
D | readwrite.c | 621 block->partial = 0; in initialize_block_counters() 637 block->partial++; in initialize_block_counters() 696 block->partial += !!ptr - !!oldptr; in block_set_pointer() 1404 empty1 = block->partial == 0; in ptr_change() 1454 if (child_wc.ofs || logfs_block(ipage)->partial) in __logfs_write_rec() 1813 if (logfs_block(ipage)->partial) in __logfs_truncate_rec()
|
D | logfs.h | 307 int partial; member
|
/linux-4.1.27/Documentation/ |
D | xillybus.txt | 168 partial completion is allowed. 282 that the partial data in buffer will arrive anyhow, despite the buffer not 295 partial DMA buffers is somewhat different, though. The user can tell the 303 Note that the issue of partial buffer flushing is irrelevant for pipes having
|
D | DMA-API.txt | 282 and <size> parameters are provided to do partial page mapping, it is 362 those passed into the single mapping API to do a partial sync. 483 memory or doing partial flushes. 494 Do a partial sync of memory that was allocated by
|
D | kmemleak.txt | 147 kmemleak_free_part - notify of a partial memory block freeing
|
D | memory-barriers.txt | 326 Memory barriers are such interventions. They impose a perceived partial 349 A write barrier is a partial ordering on stores only; it is not required 369 A data dependency barrier is a partial ordering on interdependent loads 403 A read barrier is a partial ordering on loads only; it is not required to 420 A general memory barrier is a partial ordering over both loads and stores. 888 Firstly, write barriers act as partial orderings on store operations. 926 Secondly, data dependency barriers act as partial orderings on data-dependent 1013 And thirdly, a read barrier acts as a partial order on loads. Consider the 1062 then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
|
D | dma-buf-sharing.txt | 302 the partial chunks at the beginning and end but may return stale or bogus 303 data outside of the range (in these partial chunks).
|
D | bcache.txt | 293 partial hit is counted as a miss.
|
/linux-4.1.27/net/core/ |
D | skbuff.c | 1816 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce() 1817 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce() 1838 spd->partial[spd->nr_pages - 1].len += *len; in spd_fill_page() 1843 spd->partial[spd->nr_pages].len = *len; in spd_fill_page() 1844 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page() 1933 struct partial_page partial[MAX_SKB_FRAGS]; in skb_splice_bits() local 1937 .partial = partial, in skb_splice_bits()
|
/linux-4.1.27/Documentation/filesystems/cifs/ |
D | TODO | 8 is a partial list of the known problems and missing features:
|
D | CHANGES | 200 on smp system corrupts sequence number. Do not reread unnecessarily partial page 528 Fix prepare write of partial pages to read in data from server if possible. 862 Fix data corruption (in partial page after truncate) that caused fsx to
|
D | README | 254 A partial list of the supported mount options follows:
|
/linux-4.1.27/Documentation/firmware_class/ |
D | README | 42 3), kernel: Discard any previous partial load.
|
/linux-4.1.27/Documentation/vm/ |
D | slub.txt | 107 these are in the cpu slabs and the partial slabs. Full slabs are not 114 list_lock once in a while to deal with partial slabs. That overhead is
|
/linux-4.1.27/drivers/dma/sh/ |
D | shdma-base.c | 542 new->partial = 0; in shdma_add_desc() 770 desc->partial = ops->get_partial(schan, desc); in shdma_terminate_all()
|
/linux-4.1.27/net/ipv4/ |
D | tcp_output.c | 1533 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, in tcp_nagle_check() argument 1536 return partial && in tcp_nagle_check() 1569 u32 partial, needed, window, max_len; in tcp_mss_split_point() local 1582 partial = needed % mss_now; in tcp_mss_split_point() 1587 if (tcp_nagle_check(partial != 0, tp, nonagle)) in tcp_mss_split_point() 1588 return needed - partial; in tcp_mss_split_point()
|
/linux-4.1.27/kernel/trace/ |
D | trace.c | 4856 .partial = partial_def, in tracing_splice_read_pipe() 4907 spd.partial[i].offset = 0; in tracing_splice_read_pipe() 4908 spd.partial[i].len = trace_seq_used(&iter->seq); in tracing_splice_read_pipe() 5698 (struct buffer_ref *)spd->partial[i].private; in buffer_spd_release() 5705 spd->partial[i].private = 0; in buffer_spd_release() 5719 .partial = partial_def, in tracing_buffers_splice_read() 5788 spd.partial[i].len = PAGE_SIZE; in tracing_buffers_splice_read() 5789 spd.partial[i].offset = 0; in tracing_buffers_splice_read() 5790 spd.partial[i].private = (unsigned long)ref; in tracing_buffers_splice_read()
|
/linux-4.1.27/fs/ceph/ |
D | mds_client.c | 1435 struct ceph_msg *msg, *partial = NULL; in ceph_add_cap_releases() local 1456 partial = msg; in ceph_add_cap_releases() 1475 if (partial) { in ceph_add_cap_releases() 1476 head = partial->front.iov_base; in ceph_add_cap_releases() 1478 dout(" queueing partial %p with %d/%d\n", partial, num, in ceph_add_cap_releases() 1480 list_move_tail(&partial->list_head, in ceph_add_cap_releases()
|
/linux-4.1.27/Documentation/filesystems/pohmelfs/ |
D | network_protocol.txt | 47 like partial size of the embedded objects or creation flags.
|
/linux-4.1.27/arch/m68k/fpsp040/ |
D | decbin.S | 153 mulul #TEN,%d1 |mul partial product by one digit place
|
D | setox.S | 473 movew 4(%a0),%d0 | ...expo. and partial sig. of |X| 640 movew 4(%a0),%d0 | ...expo. and partial sig. of |X|
|
/linux-4.1.27/Documentation/powerpc/ |
D | transactional_memory.txt | 17 guaranteed to either complete atomically or roll back and undo any partial
|
/linux-4.1.27/Documentation/arm/Samsung-S3C24XX/ |
D | Overview.txt | 16 revive this effort, partial support can be retrieved from earlier Linux
|
/linux-4.1.27/Documentation/scsi/ |
D | FlashPoint.txt | 89 many purchasers of new systems, it was only a partial solution to the
|
/linux-4.1.27/Documentation/devicetree/bindings/ |
D | xilinx.txt | 275 (ICAP). The ICAP enables partial reconfiguration of the FPGA,
|
/linux-4.1.27/Documentation/scheduler/ |
D | sched-design-CFS.txt | 157 This is the (partial) list of the hooks:
|
/linux-4.1.27/Documentation/frv/ |
D | features.txt | 228 correspond to at least a partial page of WorkRAM. So the first device file
|
/linux-4.1.27/Documentation/ide/ |
D | ChangeLog.ide-tape.1995-2002 | 242 * Changed handling of partial data transfers: they do not
|
/linux-4.1.27/Documentation/crypto/ |
D | asymmetric-keys.txt | 53 partial match. The key type may also use other criteria to refer to a key.
|
/linux-4.1.27/drivers/staging/media/davinci_vpfe/ |
D | dm365_resizer.c | 135 void *output_spec, unsigned char partial, in configure_resizer_out_params() argument 154 if (partial) { in configure_resizer_out_params()
|
/linux-4.1.27/Documentation/input/ |
D | alps.txt | 204 The last two bytes represent a partial bitmap packet, with 3 full packets
|
D | multi-touch-protocol.txt | 163 are divided into categories, to allow for partial implementation. The
|
/linux-4.1.27/arch/mips/lib/ |
D | csum_partial.S | 693 move t2, zero # partial word
|
/linux-4.1.27/drivers/tty/serial/ |
D | sh-sci.c | 1444 sh_desc->partial, sh_desc->cookie); in work_fn_rx() 1447 count = sci_dma_rx_push(s, sh_desc->partial); in work_fn_rx()
|
/linux-4.1.27/Documentation/ioctl/ |
D | ioctl-number.txt | 22 many drivers share a partial letter with other drivers.
|
/linux-4.1.27/Documentation/filesystems/caching/ |
D | cachefiles.txt | 76 - The use of bmap() to detect a partial page at the end of the file.
|
D | netfs-api.txt | 738 Note that partial updates may happen automatically at other times, such as when
|
/linux-4.1.27/ |
D | Makefile | 630 $(call cc-option,-fno-partial-inlining)
|
D | CREDITS | 1 This is at least a partial credits-file of people that have
|
/linux-4.1.27/firmware/keyspan_pda/ |
D | keyspan_pda.S | 666 ;; TODO: shut down timer to avoid partial-char glitch
|
D | xircom_pgs.S | 704 ;; TODO: shut down timer to avoid partial-char glitch
|
/linux-4.1.27/Documentation/RCU/ |
D | checklist.txt | 276 is only a partial solution, though.)
|
/linux-4.1.27/drivers/usb/host/ |
D | Kconfig | 673 bool "partial ISO support"
|
/linux-4.1.27/init/ |
D | Kconfig | 1719 bool "SLUB per cpu partial cache" 1721 Per cpu partial caches accellerate objects allocation and freeing
|
/linux-4.1.27/drivers/staging/speakup/ |
D | spkguide.txt | 586 partial Speakup sys system has been created which we can take advantage 961 One final warning. If you try to load a partial map, you will quickly
|
/linux-4.1.27/Documentation/block/ |
D | biodoc.txt | 327 completion of partial transfers. The driver has to modify these fields
|
/linux-4.1.27/Documentation/devicetree/ |
D | booting-without-of.txt | 1232 See appendix A for an example partial SOC node definition for the
|
/linux-4.1.27/arch/m68k/ifpsp060/src/ |
D | fplsp.S | 7113 mov.w 4(%a0),%d1 # expo. and partial sig. of |X| 7278 mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
|