/linux-4.1.27/arch/mips/ar7/ |
D | prom.c | 160 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 162 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 167 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 169 value = chunks[i].data; in parse_psp_env() 170 if (chunks[i].num) { in parse_psp_env() 171 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 178 i += chunks[i].len; in parse_psp_env()
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 277 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 284 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 285 if (p->chunks == NULL) { in radeon_cs_parser_init() 298 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 300 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 303 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 305 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 309 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 311 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 315 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init() [all …]
|
D | r600_cs.c | 2347 drm_free_large(parser->chunks[i].kdata); in r600_cs_parser_fini() 2348 kfree(parser->chunks); in r600_cs_parser_fini()
|
D | radeon.h | 1075 struct radeon_cs_chunk *chunks; member
|
/linux-4.1.27/lib/ |
D | genalloc.c | 159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 199 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_virt() 219 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 245 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 286 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc() 365 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free() 398 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 421 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in addr_in_gen_pool() 445 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 464 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
D | scatterlist.c | 369 unsigned int chunks; in sg_alloc_table_from_pages() local 376 chunks = 1; in sg_alloc_table_from_pages() 379 ++chunks; in sg_alloc_table_from_pages() 381 ret = sg_alloc_table(sgt, chunks, gfp_mask); in sg_alloc_table_from_pages()
|
D | debugobjects.c | 668 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; in __debug_check_no_obj_freed() local 680 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); in __debug_check_no_obj_freed() 681 chunks >>= ODEBUG_CHUNK_SHIFT; in __debug_check_no_obj_freed() 683 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { in __debug_check_no_obj_freed()
|
/linux-4.1.27/drivers/infiniband/hw/usnic/ |
D | usnic_vnic.c | 30 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 103 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 104 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 208 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 214 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 244 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 274 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 367 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 380 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 416 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
/linux-4.1.27/net/sctp/ |
D | chunk.c | 58 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 80 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 100 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 292 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 328 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 337 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
D | auth.c | 189 sctp_chunks_param_t *chunks, in sctp_auth_make_key_vector() argument 200 if (chunks) in sctp_auth_make_key_vector() 201 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 212 if (chunks) { in sctp_auth_make_key_vector() 213 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 656 switch (param->chunks[i]) { in __sctp_auth_cid() 664 if (param->chunks[i] == chunk) in __sctp_auth_cid() 780 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
|
D | endpointola.c | 105 auth_chunks->chunks[0] = SCTP_CID_ASCONF; in sctp_endpoint_init() 106 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; in sctp_endpoint_init()
|
D | sm_make_chunk.c | 1538 list_for_each_entry(lchunk, &msg->chunks, frag_list) { in sctp_chunk_assign_ssn() 1972 switch (param.ext->chunks[i]) { in sctp_verify_ext_param() 2005 switch (param.ext->chunks[i]) { in sctp_process_ext_param()
|
D | sm_sideeffect.c | 1037 list_for_each_entry(chunk, &msg->chunks, frag_list) { in sctp_cmd_send_msg()
|
D | socket.c | 1953 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { in sctp_sendmsg() 5652 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_peer_auth_chunks() 5700 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_local_auth_chunks()
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
D | svc_rdma_marshal.c | 317 void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks) in svc_rdma_xdr_encode_write_list() argument 328 ary->wc_nchunks = htonl(chunks); in svc_rdma_xdr_encode_write_list() 331 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero; in svc_rdma_xdr_encode_write_list() 334 ary->wc_array[chunks].wc_target.rs_length = xdr_zero; in svc_rdma_xdr_encode_write_list() 338 int chunks) in svc_rdma_xdr_encode_reply_array() argument 341 ary->wc_nchunks = htonl(chunks); in svc_rdma_xdr_encode_reply_array()
|
/linux-4.1.27/kernel/ |
D | audit_tree.c | 15 struct list_head chunks; member 82 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 343 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 436 list_add(&p->list, &tree->chunks); in tag_chunk() 496 while (!list_empty(&victim->chunks)) { in prune_one() 499 p = list_entry(victim->chunks.next, struct node, list); in prune_one() 518 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 523 list_add(p, &tree->chunks); in trim_marked() 527 while (!list_empty(&tree->chunks)) { in trim_marked() 530 node = list_entry(tree->chunks.next, struct node, list); in trim_marked() [all …]
|
/linux-4.1.27/mm/ |
D | zbud.c | 341 int chunks, i, freechunks; in zbud_alloc() local 350 chunks = size_to_chunks(size); in zbud_alloc() 355 for_each_unbuddied_list(i, chunks) { in zbud_alloc() 380 zhdr->first_chunks = chunks; in zbud_alloc() 382 zhdr->last_chunks = chunks; in zbud_alloc()
|
D | Kconfig | 387 The NOMMU mmap() frequently needs to allocate large contiguous chunks 389 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
|
/linux-4.1.27/drivers/md/ |
D | bitmap.c | 754 unsigned long chunks, int with_super, in bitmap_storage_alloc() argument 761 bytes = DIV_ROUND_UP(chunks, 8); in bitmap_storage_alloc() 1023 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in bitmap_init_from_disk() local 1033 chunks = bitmap->counts.chunks; in bitmap_init_from_disk() 1040 for (i = 0; i < chunks ; i++) { in bitmap_init_from_disk() 1072 for (i = 0; i < chunks; i++) { in bitmap_init_from_disk() 1137 bit_cnt, chunks); in bitmap_init_from_disk() 1253 for (j = 0; j < counts->chunks; j++) { in bitmap_daemon_work() 1887 for (j = 0; j < counts->chunks; j++) { in bitmap_copy_from_slot() 1960 unsigned long chunks; in bitmap_resize() local [all …]
|
D | bitmap.h | 188 unsigned long chunks; /* Total number of data member
|
/linux-4.1.27/drivers/net/wireless/ti/wlcore/ |
D | boot.c | 251 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 256 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 259 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 261 while (chunks--) { in wlcore_boot_upload_firmware() 272 chunks, addr, len); in wlcore_boot_upload_firmware()
|
/linux-4.1.27/drivers/dma/sh/ |
D | rcar-dmac.c | 81 struct list_head chunks; member 109 struct rcar_dmac_xfer_chunk chunks[0]; member 117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ 352 chunk = list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer() 448 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit() 480 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc() 511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put() 604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc() 701 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc() 910 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg() [all …]
|
D | shdma-base.c | 100 if (chunk->chunks == 1) { in shdma_tx_submit() 358 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup() 374 BUG_ON(desc->chunks != 1); in __ld_cleanup() 570 int chunks = 0; in shdma_prep_sg() local 575 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg() 615 new->chunks = 1; in shdma_prep_sg() 617 new->chunks = chunks--; in shdma_prep_sg()
|
/linux-4.1.27/Documentation/device-mapper/ |
D | striped.txt | 5 device across one or more underlying devices. Data is written in "chunks", 6 with consecutive chunks rotating among the underlying devices. This can
|
D | snapshot.txt | 14 In the first two cases, dm copies only the chunks of data that get 35 A snapshot of the <origin> block device is created. Changed chunks of 56 Creates a merging snapshot that takes control of the changed chunks 58 procedure, and merges these chunks back into the <origin>. Once merging
|
D | linear.txt | 35 # Split a device into 4M chunks and then join them together in reverse order.
|
/linux-4.1.27/Documentation/x86/x86_64/ |
D | fake-numa-for-cpusets | 6 you can create fake NUMA nodes that represent contiguous chunks of memory and 18 four equal chunks of 512M each that we can now use to assign to cpusets. As
|
/linux-4.1.27/include/linux/ |
D | sctp.h | 311 __u8 chunks[0]; member 323 __u8 chunks[0]; member
|
D | genalloc.h | 57 struct list_head chunks; /* list of chunks in this pool */ member
|
D | shdma-base.h | 55 int chunks; member
|
/linux-4.1.27/drivers/mtd/nand/ |
D | davinci_nand.c | 776 int chunks = info->mtd.writesize / 512; in nand_davinci_probe() local 778 if (!chunks || info->mtd.oobsize < 16) { in nand_davinci_probe() 788 if (chunks == 1) { in nand_davinci_probe() 794 if (chunks == 4) { in nand_davinci_probe()
|
/linux-4.1.27/Documentation/spi/ |
D | spi-sc18is602 | 31 similar large accesses have to be split into multiple chunks of no more than
|
/linux-4.1.27/scripts/ |
D | checkpatch.pl | 1189 my (@chunks); 1195 push(@chunks, [ $condition, $statement ]); 1197 return ($level, $linenr, @chunks); 1208 push(@chunks, [ $condition, $statement ]); 1211 return ($level, $linenr, @chunks); 4479 my ($level, $endln, @chunks) = 4483 if ($#chunks > 0 && $level == 0) { 4489 for my $chunk (@chunks) { 4550 my ($level, $endln, @chunks) = 4554 my ($cond, $block) = @{$chunks[0]}; [all …]
|
/linux-4.1.27/arch/mn10300/lib/ |
D | memset.S | 45 # we want to transfer as much as we can in chunks of 32 bytes
|
D | memcpy.S | 41 # we want to transfer as much as we can in chunks of 32 bytes
|
D | do_csum.S | 57 # we want to checksum as much as we can in chunks of 32 bytes
|
D | memmove.S | 47 # we want to transfer as much as we can in chunks of 32 bytes
|
/linux-4.1.27/fs/xfs/ |
D | xfs_buf_item.c | 760 int chunks; in xfs_buf_item_init() local 793 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), in xfs_buf_item_init() 795 map_size = DIV_ROUND_UP(chunks, NBWORD); in xfs_buf_item_init()
|
/linux-4.1.27/drivers/net/wireless/ath/carl9170/ |
D | tx.c | 193 unsigned int chunks; in carl9170_alloc_dev_space() local 198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); in carl9170_alloc_dev_space() 199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { in carl9170_alloc_dev_space() 200 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space() 209 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
|
/linux-4.1.27/Documentation/usb/ |
D | ehci.txt | 192 In typical situations, a usb_bulk_msg() loop writing out 4 KB chunks is 195 than the I/O. If that same loop used 16 KB chunks, it'd be better; a 196 sequence of 128 KB chunks would waste a lot less.
|
/linux-4.1.27/Documentation/devicetree/bindings/iommu/ |
D | samsung,sysmmu.txt | 4 physical memory chunks visible as a contiguous region to DMA-capable peripheral
|
/linux-4.1.27/Documentation/mmc/ |
D | mmc-dev-attrs.txt | 43 be desirable to do it in smaller chunks for three reasons:
|
D | mmc-async-req.txt | 61 request in two chunks, prepare the first chunk and start the request,
|
/linux-4.1.27/Documentation/networking/ |
D | i40e.txt | 51 shown that by coalescing Rx traffic into larger chunks of data, CPU
|
D | ixgbe.txt | 227 shown that by coalescing Rx traffic into larger chunks of data, CPU
|
D | ip-sysctl.txt | 1660 provides the ability to send and receive authenticated chunks and is 1692 The maximum number of retransmissions of INIT and COOKIE-ECHO chunks 1740 The interval (in milliseconds) between HEARTBEAT chunks. These chunks
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-driver-wacom | 71 1024 byte binary is split up into 16x 64 byte chunks. Each 64
|
/linux-4.1.27/Documentation/filesystems/ |
D | btrfs.txt | 142 data chunks. Off by default. 200 The ssd_spread mount option attempts to allocate into big chunks
|
D | sysv-fs.txt | 82 the next free block. Rather, the free blocks are organized in chunks
|
D | ceph.txt | 28 across storage nodes in large chunks to distribute workload and
|
D | ramfs-rootfs-initramfs.txt | 54 synthetic block devices, now from files instead of from chunks of memory.
|
D | proc.txt | 753 available. In this case, there are 0 chunks of 2^0*PAGE_SIZE available in 754 ZONE_DMA, 4 chunks of 2^1*PAGE_SIZE in ZONE_DMA, 101 chunks of 2^4*PAGE_SIZE
|
/linux-4.1.27/Documentation/mtd/nand/ |
D | pxa3xx-nand.txt | 37 (with some additional controller-specific magic) and read two chunks of 2080B
|
/linux-4.1.27/include/net/sctp/ |
D | structs.h | 360 struct sctp_chunks_param *chunks; member 524 struct list_head chunks; member
|
/linux-4.1.27/Documentation/dmaengine/ |
D | provider.txt | 57 The latter are usually programmed using a collection of chunks to 135 -> Your device is able to report which chunks have been 379 Transfer: A collection of chunks (be it contiguous or not)
|
/linux-4.1.27/Documentation/ |
D | memory-hotplug.txt | 92 into chunks of the same size. These chunks are called "sections". The size of 96 Memory sections are combined into chunks referred to as "memory blocks". The
|
D | ramoops.txt | 31 The memory area is divided into "record_size" chunks (also rounded down to
|
D | dell_rbu.txt | 44 In case of packet mechanism the single memory can be broken in smaller chunks
|
D | dma-buf-sharing.txt | 277 an api similar to kmap. Accessing a dma_buf is done in aligned chunks of 302 the partial chunks at the beginning and end but may return stale or bogus 303 data outside of the range (in these partial chunks).
|
D | md.txt | 178 This is the size in bytes for 'chunks' and is only relevant to 180 of the array is conceptually divided into chunks and consecutive 181 chunks are striped onto neighbouring devices.
|
D | HOWTO | 506 The Linux kernel community does not gladly accept large chunks of code 551 chunks that they may get already accepted, even when your whole task is
|
D | IPMI.txt | 92 different ways. Because of that, it's broken into many chunks of 93 code. These chunks (by module name) are:
|
D | assoc_array.txt | 360 The index key is read in chunks of machine word. Each chunk is subdivided into
|
D | CodingStyle | 82 Statements longer than 80 columns will be broken into sensible chunks, unless
|
D | kernel-parameters.txt | 1062 nochunk: disable reading files in "chunks" in the EFI
|
/linux-4.1.27/drivers/video/fbdev/matrox/ |
D | matroxfb_base.h | 257 unsigned int chunks; member
|
D | matroxfb_base.c | 325 pos += minfo->curr.ydstorg.chunks; in matrox_pan_var() 783 minfo->curr.ydstorg.chunks = ydstorg >> (isInterleave(minfo) ? 3 : 2); in matroxfb_set_par() 818 pos += minfo->curr.ydstorg.chunks; in matroxfb_set_par()
|
/linux-4.1.27/fs/ocfs2/ |
D | quota_local.c | 347 int i, chunks = le32_to_cpu(ldinfo->dqi_chunks); in ocfs2_recovery_load_quota() local 350 for (i = 0; i < chunks; i++) { in ocfs2_recovery_load_quota()
|
/linux-4.1.27/include/uapi/drm/ |
D | radeon_drm.h | 983 uint64_t chunks; member
|
/linux-4.1.27/drivers/tty/vt/ |
D | defkeymap.c_shipped | 153 * the default and allocate dynamically in chunks of 512 bytes.
|
/linux-4.1.27/Documentation/early-userspace/ |
D | README | 107 Eventually, several more chunks of kernel functionality will hopefully
|
/linux-4.1.27/Documentation/scsi/ |
D | st.txt | 184 Buffer allocation uses chunks of memory having sizes 2^n * (page 208 Scatter/gather buffers (buffers that consist of chunks non-contiguous 212 three kinds of chunks:
|
/linux-4.1.27/drivers/iommu/ |
D | Kconfig | 232 non-linear physical memory chunks as linear memory in their
|
/linux-4.1.27/arch/cris/arch-v10/ |
D | README.mm | 103 chunks of memory not possible using the normal kmalloc physical RAM
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_cache.c | 1025 int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk; in osc_extent_truncate() local 1037 ++chunks; in osc_extent_truncate() 1041 grants = chunks << cli->cl_chunkbits; in osc_extent_truncate()
|
/linux-4.1.27/Documentation/dvb/ |
D | README.dvb-usb | 24 - TODO: a I2C-chunker. It creates device-specific chunks of register-accesses
|
/linux-4.1.27/Documentation/cpu-freq/ |
D | governors.txt | 205 increase in 5% chunks of your maximum cpu frequency. You can change this
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
D | wmi-tlv.c | 1114 struct wmi_host_mem_chunks *chunks; in ath10k_wmi_tlv_op_gen_init() local 1146 chunks = (void *)tlv->value; in ath10k_wmi_tlv_op_gen_init() 1206 ath10k_wmi_put_host_mem_chunks(ar, chunks); in ath10k_wmi_tlv_op_gen_init()
|
D | wmi.c | 3772 struct wmi_host_mem_chunks *chunks) in ath10k_wmi_put_host_mem_chunks() argument 3777 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); in ath10k_wmi_put_host_mem_chunks() 3780 chunk = &chunks->items[i]; in ath10k_wmi_put_host_mem_chunks()
|
D | wmi.h | 4895 struct wmi_host_mem_chunks *chunks);
|
/linux-4.1.27/Documentation/serial/ |
D | tty.txt | 202 smaller chunks.
|
/linux-4.1.27/Documentation/block/ |
D | data-integrity.txt | 117 space is limited, the block interface allows tagging bigger chunks by
|
D | biodoc.txt | 373 forced such requests to be broken up into small chunks before being passed 390 greater than PAGE_SIZE chunks in one shot) 466 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE) 822 The ll_rw_kio() routine breaks up the kiobuf into page sized chunks and
|
/linux-4.1.27/fs/befs/ |
D | ChangeLog | 233 So it does i/o in much larger chunks. It is the correct linux way. It
|
/linux-4.1.27/Documentation/filesystems/caching/ |
D | fscache.txt | 86 It instead serves the cache out in PAGE_SIZE chunks as and when requested by
|
/linux-4.1.27/Documentation/mtd/ |
D | nand_ecc.txt | 269 to write our code in such a way that we process data in 32 bit chunks.
|