Home
last modified time | relevance | path

Searched refs:chunks (Results 1 – 86 of 86) sorted by relevance

/linux-4.1.27/arch/mips/ar7/
Dprom.c160 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local
162 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env()
167 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env()
169 value = chunks[i].data; in parse_psp_env()
170 if (chunks[i].num) { in parse_psp_env()
171 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env()
178 i += chunks[i].len; in parse_psp_env()
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_cs.c277 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init()
284 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init()
285 if (p->chunks == NULL) { in radeon_cs_parser_init()
298 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init()
300 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init()
303 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init()
305 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
309 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init()
311 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
315 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init()
[all …]
Dr600_cs.c2347 drm_free_large(parser->chunks[i].kdata); in r600_cs_parser_fini()
2348 kfree(parser->chunks); in r600_cs_parser_fini()
Dradeon.h1075 struct radeon_cs_chunk *chunks; member
/linux-4.1.27/lib/
Dgenalloc.c159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
199 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_virt()
219 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
245 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy()
286 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc()
365 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free()
398 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
421 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in addr_in_gen_pool()
445 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
464 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
Dscatterlist.c369 unsigned int chunks; in sg_alloc_table_from_pages() local
376 chunks = 1; in sg_alloc_table_from_pages()
379 ++chunks; in sg_alloc_table_from_pages()
381 ret = sg_alloc_table(sgt, chunks, gfp_mask); in sg_alloc_table_from_pages()
Ddebugobjects.c668 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; in __debug_check_no_obj_freed() local
680 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); in __debug_check_no_obj_freed()
681 chunks >>= ODEBUG_CHUNK_SHIFT; in __debug_check_no_obj_freed()
683 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { in __debug_check_no_obj_freed()
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_vnic.c30 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member
103 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump()
104 chunk = &vnic->chunks[i]; in usnic_vnic_dump()
208 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt()
214 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt()
244 src = &vnic->chunks[type]; in usnic_vnic_get_resources()
274 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources()
367 &vnic->chunks[res_type]); in usnic_vnic_discover_resources()
380 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources()
416 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
/linux-4.1.27/net/sctp/
Dchunk.c58 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init()
80 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
100 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy()
292 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
328 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
337 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
Dauth.c189 sctp_chunks_param_t *chunks, in sctp_auth_make_key_vector() argument
200 if (chunks) in sctp_auth_make_key_vector()
201 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector()
212 if (chunks) { in sctp_auth_make_key_vector()
213 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector()
656 switch (param->chunks[i]) { in __sctp_auth_cid()
664 if (param->chunks[i] == chunk) in __sctp_auth_cid()
780 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
Dendpointola.c105 auth_chunks->chunks[0] = SCTP_CID_ASCONF; in sctp_endpoint_init()
106 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; in sctp_endpoint_init()
Dsm_make_chunk.c1538 list_for_each_entry(lchunk, &msg->chunks, frag_list) { in sctp_chunk_assign_ssn()
1972 switch (param.ext->chunks[i]) { in sctp_verify_ext_param()
2005 switch (param.ext->chunks[i]) { in sctp_process_ext_param()
Dsm_sideeffect.c1037 list_for_each_entry(chunk, &msg->chunks, frag_list) { in sctp_cmd_send_msg()
Dsocket.c1953 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { in sctp_sendmsg()
5652 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_peer_auth_chunks()
5700 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_local_auth_chunks()
/linux-4.1.27/net/sunrpc/xprtrdma/
Dsvc_rdma_marshal.c317 void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks) in svc_rdma_xdr_encode_write_list() argument
328 ary->wc_nchunks = htonl(chunks); in svc_rdma_xdr_encode_write_list()
331 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero; in svc_rdma_xdr_encode_write_list()
334 ary->wc_array[chunks].wc_target.rs_length = xdr_zero; in svc_rdma_xdr_encode_write_list()
338 int chunks) in svc_rdma_xdr_encode_reply_array() argument
341 ary->wc_nchunks = htonl(chunks); in svc_rdma_xdr_encode_reply_array()
/linux-4.1.27/kernel/
Daudit_tree.c15 struct list_head chunks; member
82 INIT_LIST_HEAD(&tree->chunks); in alloc_tree()
343 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk()
436 list_add(&p->list, &tree->chunks); in tag_chunk()
496 while (!list_empty(&victim->chunks)) { in prune_one()
499 p = list_entry(victim->chunks.next, struct node, list); in prune_one()
518 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
523 list_add(p, &tree->chunks); in trim_marked()
527 while (!list_empty(&tree->chunks)) { in trim_marked()
530 node = list_entry(tree->chunks.next, struct node, list); in trim_marked()
[all …]
/linux-4.1.27/mm/
Dzbud.c341 int chunks, i, freechunks; in zbud_alloc() local
350 chunks = size_to_chunks(size); in zbud_alloc()
355 for_each_unbuddied_list(i, chunks) { in zbud_alloc()
380 zhdr->first_chunks = chunks; in zbud_alloc()
382 zhdr->last_chunks = chunks; in zbud_alloc()
DKconfig387 The NOMMU mmap() frequently needs to allocate large contiguous chunks
389 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
/linux-4.1.27/drivers/md/
Dbitmap.c754 unsigned long chunks, int with_super, in bitmap_storage_alloc() argument
761 bytes = DIV_ROUND_UP(chunks, 8); in bitmap_storage_alloc()
1023 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in bitmap_init_from_disk() local
1033 chunks = bitmap->counts.chunks; in bitmap_init_from_disk()
1040 for (i = 0; i < chunks ; i++) { in bitmap_init_from_disk()
1072 for (i = 0; i < chunks; i++) { in bitmap_init_from_disk()
1137 bit_cnt, chunks); in bitmap_init_from_disk()
1253 for (j = 0; j < counts->chunks; j++) { in bitmap_daemon_work()
1887 for (j = 0; j < counts->chunks; j++) { in bitmap_copy_from_slot()
1960 unsigned long chunks; in bitmap_resize() local
[all …]
Dbitmap.h188 unsigned long chunks; /* Total number of data member
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
Dboot.c251 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local
256 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware()
259 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware()
261 while (chunks--) { in wlcore_boot_upload_firmware()
272 chunks, addr, len); in wlcore_boot_upload_firmware()
/linux-4.1.27/drivers/dma/sh/
Drcar-dmac.c81 struct list_head chunks; member
109 struct rcar_dmac_xfer_chunk chunks[0]; member
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
352 chunk = list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
448 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
480 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
701 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
910 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
[all …]
Dshdma-base.c100 if (chunk->chunks == 1) { in shdma_tx_submit()
358 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup()
374 BUG_ON(desc->chunks != 1); in __ld_cleanup()
570 int chunks = 0; in shdma_prep_sg() local
575 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
615 new->chunks = 1; in shdma_prep_sg()
617 new->chunks = chunks--; in shdma_prep_sg()
/linux-4.1.27/Documentation/device-mapper/
Dstriped.txt5 device across one or more underlying devices. Data is written in "chunks",
6 with consecutive chunks rotating among the underlying devices. This can
Dsnapshot.txt14 In the first two cases, dm copies only the chunks of data that get
35 A snapshot of the <origin> block device is created. Changed chunks of
56 Creates a merging snapshot that takes control of the changed chunks
58 procedure, and merges these chunks back into the <origin>. Once merging
Dlinear.txt35 # Split a device into 4M chunks and then join them together in reverse order.
/linux-4.1.27/Documentation/x86/x86_64/
Dfake-numa-for-cpusets6 you can create fake NUMA nodes that represent contiguous chunks of memory and
18 four equal chunks of 512M each that we can now use to assign to cpusets. As
/linux-4.1.27/include/linux/
Dsctp.h311 __u8 chunks[0]; member
323 __u8 chunks[0]; member
Dgenalloc.h57 struct list_head chunks; /* list of chunks in this pool */ member
Dshdma-base.h55 int chunks; member
/linux-4.1.27/drivers/mtd/nand/
Ddavinci_nand.c776 int chunks = info->mtd.writesize / 512; in nand_davinci_probe() local
778 if (!chunks || info->mtd.oobsize < 16) { in nand_davinci_probe()
788 if (chunks == 1) { in nand_davinci_probe()
794 if (chunks == 4) { in nand_davinci_probe()
/linux-4.1.27/Documentation/spi/
Dspi-sc18is60231 similar large accesses have to be split into multiple chunks of no more than
/linux-4.1.27/scripts/
Dcheckpatch.pl1189 my (@chunks);
1195 push(@chunks, [ $condition, $statement ]);
1197 return ($level, $linenr, @chunks);
1208 push(@chunks, [ $condition, $statement ]);
1211 return ($level, $linenr, @chunks);
4479 my ($level, $endln, @chunks) =
4483 if ($#chunks > 0 && $level == 0) {
4489 for my $chunk (@chunks) {
4550 my ($level, $endln, @chunks) =
4554 my ($cond, $block) = @{$chunks[0]};
[all …]
/linux-4.1.27/arch/mn10300/lib/
Dmemset.S45 # we want to transfer as much as we can in chunks of 32 bytes
Dmemcpy.S41 # we want to transfer as much as we can in chunks of 32 bytes
Ddo_csum.S57 # we want to checksum as much as we can in chunks of 32 bytes
Dmemmove.S47 # we want to transfer as much as we can in chunks of 32 bytes
/linux-4.1.27/fs/xfs/
Dxfs_buf_item.c760 int chunks; in xfs_buf_item_init() local
793 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), in xfs_buf_item_init()
795 map_size = DIV_ROUND_UP(chunks, NBWORD); in xfs_buf_item_init()
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
Dtx.c193 unsigned int chunks; in carl9170_alloc_dev_space() local
198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); in carl9170_alloc_dev_space()
199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { in carl9170_alloc_dev_space()
200 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
209 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
/linux-4.1.27/Documentation/usb/
Dehci.txt192 In typical situations, a usb_bulk_msg() loop writing out 4 KB chunks is
195 than the I/O. If that same loop used 16 KB chunks, it'd be better; a
196 sequence of 128 KB chunks would waste a lot less.
/linux-4.1.27/Documentation/devicetree/bindings/iommu/
Dsamsung,sysmmu.txt4 physical memory chunks visible as a contiguous region to DMA-capable peripheral
/linux-4.1.27/Documentation/mmc/
Dmmc-dev-attrs.txt43 be desirable to do it in smaller chunks for three reasons:
Dmmc-async-req.txt61 request in two chunks, prepare the first chunk and start the request,
/linux-4.1.27/Documentation/networking/
Di40e.txt51 shown that by coalescing Rx traffic into larger chunks of data, CPU
Dixgbe.txt227 shown that by coalescing Rx traffic into larger chunks of data, CPU
Dip-sysctl.txt1660 provides the ability to send and receive authenticated chunks and is
1692 The maximum number of retransmissions of INIT and COOKIE-ECHO chunks
1740 The interval (in milliseconds) between HEARTBEAT chunks. These chunks
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-driver-wacom71 1024 byte binary is split up into 16x 64 byte chunks. Each 64
/linux-4.1.27/Documentation/filesystems/
Dbtrfs.txt142 data chunks. Off by default.
200 The ssd_spread mount option attempts to allocate into big chunks
Dsysv-fs.txt82 the next free block. Rather, the free blocks are organized in chunks
Dceph.txt28 across storage nodes in large chunks to distribute workload and
Dramfs-rootfs-initramfs.txt54 synthetic block devices, now from files instead of from chunks of memory.
Dproc.txt753 available. In this case, there are 0 chunks of 2^0*PAGE_SIZE available in
754 ZONE_DMA, 4 chunks of 2^1*PAGE_SIZE in ZONE_DMA, 101 chunks of 2^4*PAGE_SIZE
/linux-4.1.27/Documentation/mtd/nand/
Dpxa3xx-nand.txt37 (with some additional controller-specific magic) and read two chunks of 2080B
/linux-4.1.27/include/net/sctp/
Dstructs.h360 struct sctp_chunks_param *chunks; member
524 struct list_head chunks; member
/linux-4.1.27/Documentation/dmaengine/
Dprovider.txt57 The latter are usually programmed using a collection of chunks to
135 -> Your device is able to report which chunks have been
379 Transfer: A collection of chunks (be it contiguous or not)
/linux-4.1.27/Documentation/
Dmemory-hotplug.txt92 into chunks of the same size. These chunks are called "sections". The size of
96 Memory sections are combined into chunks referred to as "memory blocks". The
Dramoops.txt31 The memory area is divided into "record_size" chunks (also rounded down to
Ddell_rbu.txt44 In case of packet mechanism the single memory can be broken in smaller chunks
Ddma-buf-sharing.txt277 an api similar to kmap. Accessing a dma_buf is done in aligned chunks of
302 the partial chunks at the beginning and end but may return stale or bogus
303 data outside of the range (in these partial chunks).
Dmd.txt178 This is the size in bytes for 'chunks' and is only relevant to
180 of the array is conceptually divided into chunks and consecutive
181 chunks are striped onto neighbouring devices.
DHOWTO506 The Linux kernel community does not gladly accept large chunks of code
551 chunks that they may get already accepted, even when your whole task is
DIPMI.txt92 different ways. Because of that, it's broken into many chunks of
93 code. These chunks (by module name) are:
Dassoc_array.txt360 The index key is read in chunks of machine word. Each chunk is subdivided into
DCodingStyle82 Statements longer than 80 columns will be broken into sensible chunks, unless
Dkernel-parameters.txt1062 nochunk: disable reading files in "chunks" in the EFI
/linux-4.1.27/drivers/video/fbdev/matrox/
Dmatroxfb_base.h257 unsigned int chunks; member
Dmatroxfb_base.c325 pos += minfo->curr.ydstorg.chunks; in matrox_pan_var()
783 minfo->curr.ydstorg.chunks = ydstorg >> (isInterleave(minfo) ? 3 : 2); in matroxfb_set_par()
818 pos += minfo->curr.ydstorg.chunks; in matroxfb_set_par()
/linux-4.1.27/fs/ocfs2/
Dquota_local.c347 int i, chunks = le32_to_cpu(ldinfo->dqi_chunks); in ocfs2_recovery_load_quota() local
350 for (i = 0; i < chunks; i++) { in ocfs2_recovery_load_quota()
/linux-4.1.27/include/uapi/drm/
Dradeon_drm.h983 uint64_t chunks; member
/linux-4.1.27/drivers/tty/vt/
Ddefkeymap.c_shipped153 * the default and allocate dynamically in chunks of 512 bytes.
/linux-4.1.27/Documentation/early-userspace/
DREADME107 Eventually, several more chunks of kernel functionality will hopefully
/linux-4.1.27/Documentation/scsi/
Dst.txt184 Buffer allocation uses chunks of memory having sizes 2^n * (page
208 Scatter/gather buffers (buffers that consist of chunks non-contiguous
212 three kinds of chunks:
/linux-4.1.27/drivers/iommu/
DKconfig232 non-linear physical memory chunks as linear memory in their
/linux-4.1.27/arch/cris/arch-v10/
DREADME.mm103 chunks of memory not possible using the normal kmalloc physical RAM
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
Dosc_cache.c1025 int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk; in osc_extent_truncate() local
1037 ++chunks; in osc_extent_truncate()
1041 grants = chunks << cli->cl_chunkbits; in osc_extent_truncate()
/linux-4.1.27/Documentation/dvb/
DREADME.dvb-usb24 - TODO: a I2C-chunker. It creates device-specific chunks of register-accesses
/linux-4.1.27/Documentation/cpu-freq/
Dgovernors.txt205 increase in 5% chunks of your maximum cpu frequency. You can change this
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
Dwmi-tlv.c1114 struct wmi_host_mem_chunks *chunks; in ath10k_wmi_tlv_op_gen_init() local
1146 chunks = (void *)tlv->value; in ath10k_wmi_tlv_op_gen_init()
1206 ath10k_wmi_put_host_mem_chunks(ar, chunks); in ath10k_wmi_tlv_op_gen_init()
Dwmi.c3772 struct wmi_host_mem_chunks *chunks) in ath10k_wmi_put_host_mem_chunks() argument
3777 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); in ath10k_wmi_put_host_mem_chunks()
3780 chunk = &chunks->items[i]; in ath10k_wmi_put_host_mem_chunks()
Dwmi.h4895 struct wmi_host_mem_chunks *chunks);
/linux-4.1.27/Documentation/serial/
Dtty.txt202 smaller chunks.
/linux-4.1.27/Documentation/block/
Ddata-integrity.txt117 space is limited, the block interface allows tagging bigger chunks by
Dbiodoc.txt373 forced such requests to be broken up into small chunks before being passed
390 greater than PAGE_SIZE chunks in one shot)
466 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
822 The ll_rw_kio() routine breaks up the kiobuf into page sized chunks and
/linux-4.1.27/fs/befs/
DChangeLog233 So it does i/o in much larger chunks. It is the correct linux way. It
/linux-4.1.27/Documentation/filesystems/caching/
Dfscache.txt86 It instead serves the cache out in PAGE_SIZE chunks as and when requested by
/linux-4.1.27/Documentation/mtd/
Dnand_ecc.txt269 to write our code in such a way that we process data in 32 bit chunks.