Home
last modified time | relevance | path

Searched refs:chunks (Results 1 – 94 of 94) sorted by relevance

/linux-4.4.14/arch/mips/ar7/
Dprom.c160 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local
162 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env()
167 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env()
169 value = chunks[i].data; in parse_psp_env()
170 if (chunks[i].num) { in parse_psp_env()
171 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env()
178 i += chunks[i].len; in parse_psp_env()
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_cs.c277 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init()
284 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init()
285 if (p->chunks == NULL) { in radeon_cs_parser_init()
298 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init()
300 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init()
303 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init()
305 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
309 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init()
311 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
315 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init()
[all …]
Dr600_cs.c2347 drm_free_large(parser->chunks[i].kdata); in r600_cs_parser_fini()
2348 kfree(parser->chunks); in r600_cs_parser_fini()
Dradeon.h1077 struct radeon_cs_chunk *chunks; member
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_vnic.c45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member
118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump()
119 chunk = &vnic->chunks[i]; in usnic_vnic_dump()
223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt()
229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt()
259 src = &vnic->chunks[type]; in usnic_vnic_get_resources()
289 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources()
382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources()
395 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources()
431 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
/linux-4.4.14/lib/
Dgenalloc.c159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
200 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_virt()
220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
246 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy()
287 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc()
366 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free()
399 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
422 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in addr_in_gen_pool()
446 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
465 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
Dscatterlist.c397 unsigned int chunks; in sg_alloc_table_from_pages() local
404 chunks = 1; in sg_alloc_table_from_pages()
407 ++chunks; in sg_alloc_table_from_pages()
409 ret = sg_alloc_table(sgt, chunks, gfp_mask); in sg_alloc_table_from_pages()
Ddebugobjects.c668 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; in __debug_check_no_obj_freed() local
680 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); in __debug_check_no_obj_freed()
681 chunks >>= ODEBUG_CHUNK_SHIFT; in __debug_check_no_obj_freed()
683 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { in __debug_check_no_obj_freed()
DKconfig517 Provides a heler to split scatterlists into chunks, each chunk being a
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_cs.c188 chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); in amdgpu_cs_parser_init()
196 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init()
198 if (!p->chunks) { in amdgpu_cs_parser_init()
215 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init()
216 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init()
218 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init()
220 p->chunks[i].user_ptr = cdata; in amdgpu_cs_parser_init()
222 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); in amdgpu_cs_parser_init()
223 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init()
229 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init()
[all …]
Damdgpu.h1253 struct amdgpu_cs_chunk *chunks; member
/linux-4.4.14/net/sunrpc/xprtrdma/
Dsvc_rdma_marshal.c255 void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks) in svc_rdma_xdr_encode_write_list() argument
266 ary->wc_nchunks = cpu_to_be32(chunks); in svc_rdma_xdr_encode_write_list()
269 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero; in svc_rdma_xdr_encode_write_list()
272 ary->wc_array[chunks].wc_target.rs_length = xdr_zero; in svc_rdma_xdr_encode_write_list()
276 int chunks) in svc_rdma_xdr_encode_reply_array() argument
279 ary->wc_nchunks = cpu_to_be32(chunks); in svc_rdma_xdr_encode_reply_array()
/linux-4.4.14/net/sctp/
Dchunk.c58 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init()
80 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
100 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy()
292 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
328 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
337 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
Dauth.c189 sctp_chunks_param_t *chunks, in sctp_auth_make_key_vector() argument
200 if (chunks) in sctp_auth_make_key_vector()
201 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector()
212 if (chunks) { in sctp_auth_make_key_vector()
213 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector()
656 switch (param->chunks[i]) { in __sctp_auth_cid()
664 if (param->chunks[i] == chunk) in __sctp_auth_cid()
780 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
Dendpointola.c105 auth_chunks->chunks[0] = SCTP_CID_ASCONF; in sctp_endpoint_init()
106 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; in sctp_endpoint_init()
Dsm_make_chunk.c1538 list_for_each_entry(lchunk, &msg->chunks, frag_list) { in sctp_chunk_assign_ssn()
1972 switch (param.ext->chunks[i]) { in sctp_verify_ext_param()
2005 switch (param.ext->chunks[i]) { in sctp_process_ext_param()
Dsm_sideeffect.c1037 list_for_each_entry(chunk, &msg->chunks, frag_list) { in sctp_cmd_send_msg()
Dsocket.c1956 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { in sctp_sendmsg()
5639 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_peer_auth_chunks()
5687 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_local_auth_chunks()
/linux-4.4.14/kernel/
Daudit_tree.c15 struct list_head chunks; member
82 INIT_LIST_HEAD(&tree->chunks); in alloc_tree()
343 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk()
436 list_add(&p->list, &tree->chunks); in tag_chunk()
498 while (!list_empty(&victim->chunks)) { in prune_one()
501 p = list_entry(victim->chunks.next, struct node, list); in prune_one()
520 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
525 list_add(p, &tree->chunks); in trim_marked()
529 while (!list_empty(&tree->chunks)) { in trim_marked()
532 node = list_entry(tree->chunks.next, struct node, list); in trim_marked()
[all …]
/linux-4.4.14/mm/
Dzbud.c356 int chunks, i, freechunks; in zbud_alloc() local
365 chunks = size_to_chunks(size); in zbud_alloc()
370 for_each_unbuddied_list(i, chunks) { in zbud_alloc()
395 zhdr->first_chunks = chunks; in zbud_alloc()
397 zhdr->last_chunks = chunks; in zbud_alloc()
DKconfig370 The NOMMU mmap() frequently needs to allocate large contiguous chunks
372 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
/linux-4.4.14/drivers/md/
Dbitmap.c753 unsigned long chunks, int with_super, in bitmap_storage_alloc() argument
760 bytes = DIV_ROUND_UP(chunks, 8); in bitmap_storage_alloc()
1022 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in bitmap_init_from_disk() local
1032 chunks = bitmap->counts.chunks; in bitmap_init_from_disk()
1039 for (i = 0; i < chunks ; i++) { in bitmap_init_from_disk()
1071 for (i = 0; i < chunks; i++) { in bitmap_init_from_disk()
1136 bit_cnt, chunks); in bitmap_init_from_disk()
1252 for (j = 0; j < counts->chunks; j++) { in bitmap_daemon_work()
1882 for (j = 0; j < counts->chunks; j++) { in bitmap_copy_from_slot()
1955 unsigned long chunks; in bitmap_resize() local
[all …]
Dbitmap.h190 unsigned long chunks; /* Total number of data member
/linux-4.4.14/drivers/net/wireless/ti/wlcore/
Dboot.c251 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local
256 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware()
259 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware()
261 while (chunks--) { in wlcore_boot_upload_firmware()
272 chunks, addr, len); in wlcore_boot_upload_firmware()
/linux-4.4.14/drivers/dma/sh/
Drcar-dmac.c81 struct list_head chunks; member
109 struct rcar_dmac_xfer_chunk chunks[0]; member
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
352 chunk = list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
448 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
481 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
512 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
608 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
706 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
915 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
[all …]
Dshdma-base.c100 if (chunk->chunks == 1) { in shdma_tx_submit()
358 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup()
374 BUG_ON(desc->chunks != 1); in __ld_cleanup()
570 int chunks = 0; in shdma_prep_sg() local
575 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
615 new->chunks = 1; in shdma_prep_sg()
617 new->chunks = chunks--; in shdma_prep_sg()
/linux-4.4.14/Documentation/device-mapper/
Dstriped.txt5 device across one or more underlying devices. Data is written in "chunks",
6 with consecutive chunks rotating among the underlying devices. This can
Dsnapshot.txt14 In the first two cases, dm copies only the chunks of data that get
35 A snapshot of the <origin> block device is created. Changed chunks of
60 Creates a merging snapshot that takes control of the changed chunks
62 procedure, and merges these chunks back into the <origin>. Once merging
Dlinear.txt35 # Split a device into 4M chunks and then join them together in reverse order.
/linux-4.4.14/drivers/mtd/nand/
Ddavinci_nand.c799 int chunks = info->mtd.writesize / 512; in nand_davinci_probe() local
801 if (!chunks || info->mtd.oobsize < 16) { in nand_davinci_probe()
811 if (chunks == 1) { in nand_davinci_probe()
817 if (chunks == 4) { in nand_davinci_probe()
822 if (chunks == 8) { in nand_davinci_probe()
/linux-4.4.14/include/linux/
Dsctp.h311 __u8 chunks[0]; member
323 __u8 chunks[0]; member
Dgenalloc.h57 struct list_head chunks; /* list of chunks in this pool */ member
Dshdma-base.h55 int chunks; member
/linux-4.4.14/Documentation/x86/x86_64/
Dfake-numa-for-cpusets6 you can create fake NUMA nodes that represent contiguous chunks of memory and
18 four equal chunks of 512M each that we can now use to assign to cpusets. As
/linux-4.4.14/Documentation/spi/
Dspi-sc18is60231 similar large accesses have to be split into multiple chunks of no more than
/linux-4.4.14/scripts/
Dcheckpatch.pl1233 my (@chunks);
1239 push(@chunks, [ $condition, $statement ]);
1241 return ($level, $linenr, @chunks);
1252 push(@chunks, [ $condition, $statement ]);
1255 return ($level, $linenr, @chunks);
4675 my ($level, $endln, @chunks) =
4679 if ($#chunks > 0 && $level == 0) {
4685 for my $chunk (@chunks) {
4746 my ($level, $endln, @chunks) =
4750 my ($cond, $block) = @{$chunks[0]};
[all …]
/linux-4.4.14/arch/mn10300/lib/
Dmemset.S45 # we want to transfer as much as we can in chunks of 32 bytes
Dmemcpy.S41 # we want to transfer as much as we can in chunks of 32 bytes
Ddo_csum.S57 # we want to checksum as much as we can in chunks of 32 bytes
Dmemmove.S47 # we want to transfer as much as we can in chunks of 32 bytes
/linux-4.4.14/fs/xfs/
Dxfs_buf_item.c756 int chunks; in xfs_buf_item_init() local
793 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), in xfs_buf_item_init()
795 map_size = DIV_ROUND_UP(chunks, NBWORD); in xfs_buf_item_init()
/linux-4.4.14/drivers/net/wireless/ath/carl9170/
Dtx.c193 unsigned int chunks; in carl9170_alloc_dev_space() local
198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); in carl9170_alloc_dev_space()
199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { in carl9170_alloc_dev_space()
200 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
209 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
/linux-4.4.14/Documentation/usb/
Dehci.txt192 In typical situations, a usb_bulk_msg() loop writing out 4 KB chunks is
195 than the I/O. If that same loop used 16 KB chunks, it'd be better; a
196 sequence of 128 KB chunks would waste a lot less.
/linux-4.4.14/Documentation/mmc/
Dmmc-dev-attrs.txt43 be desirable to do it in smaller chunks for three reasons:
Dmmc-async-req.txt61 request in two chunks, prepare the first chunk and start the request,
/linux-4.4.14/Documentation/devicetree/bindings/iommu/
Dsamsung,sysmmu.txt4 physical memory chunks visible as a contiguous region to DMA-capable peripheral
/linux-4.4.14/Documentation/networking/
Di40e.txt51 shown that by coalescing Rx traffic into larger chunks of data, CPU
Dixgbe.txt227 shown that by coalescing Rx traffic into larger chunks of data, CPU
Dip-sysctl.txt1746 provides the ability to send and receive authenticated chunks and is
1778 The maximum number of retransmissions of INIT and COOKIE-ECHO chunks
1826 The interval (in milliseconds) between HEARTBEAT chunks. These chunks
/linux-4.4.14/include/uapi/drm/
Damdgpu_drm.h370 uint64_t chunks; member
Dradeon_drm.h983 uint64_t chunks; member
/linux-4.4.14/Documentation/filesystems/
Dbtrfs.txt142 data chunks. Off by default.
200 The ssd_spread mount option attempts to allocate into big chunks
Dsysv-fs.txt82 the next free block. Rather, the free blocks are organized in chunks
Dceph.txt28 across storage nodes in large chunks to distribute workload and
Dramfs-rootfs-initramfs.txt54 synthetic block devices, now from files instead of from chunks of memory.
Dproc.txt771 available. In this case, there are 0 chunks of 2^0*PAGE_SIZE available in
772 ZONE_DMA, 4 chunks of 2^1*PAGE_SIZE in ZONE_DMA, 101 chunks of 2^4*PAGE_SIZE
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-driver-wacom71 1024 byte binary is split up into 16x 64 byte chunks. Each 64
/linux-4.4.14/Documentation/mtd/nand/
Dpxa3xx-nand.txt37 (with some additional controller-specific magic) and read two chunks of 2080B
/linux-4.4.14/include/net/sctp/
Dstructs.h360 struct sctp_chunks_param *chunks; member
524 struct list_head chunks; member
/linux-4.4.14/Documentation/
Dmemory-hotplug.txt92 into chunks of the same size. These chunks are called "sections". The size of
96 Memory sections are combined into chunks referred to as "memory blocks". The
Dramoops.txt31 The memory area is divided into "record_size" chunks (also rounded down to
Ddell_rbu.txt44 In case of packet mechanism the single memory can be broken in smaller chunks
Ddma-buf-sharing.txt277 an api similar to kmap. Accessing a dma_buf is done in aligned chunks of
302 the partial chunks at the beginning and end but may return stale or bogus
303 data outside of the range (in these partial chunks).
Dmd.txt178 This is the size in bytes for 'chunks' and is only relevant to
180 of the array is conceptually divided into chunks and consecutive
181 chunks are striped onto neighbouring devices.
DHOWTO506 The Linux kernel community does not gladly accept large chunks of code
551 chunks that they may get already accepted, even when your whole task is
DIPMI.txt92 different ways. Because of that, it's broken into many chunks of
93 code. These chunks (by module name) are:
Dassoc_array.txt360 The index key is read in chunks of machine word. Each chunk is subdivided into
Dadding-syscalls.txt170 into separate chunks. These should include at least the following items as
DCodingStyle82 Statements longer than 80 columns will be broken into sensible chunks, unless
Dkernel-parameters.txt1102 nochunk: disable reading files in "chunks" in the EFI
/linux-4.4.14/Documentation/dmaengine/
Dprovider.txt57 The latter are usually programmed using a collection of chunks to
135 -> Your device is able to report which chunks have been
397 Transfer: A collection of chunks (be it contiguous or not)
/linux-4.4.14/Documentation/fpga/
Dfpga-mgr.txt164 case, this function is called multiple times for successive chunks.
/linux-4.4.14/drivers/video/fbdev/matrox/
Dmatroxfb_base.h237 unsigned int chunks; member
Dmatroxfb_base.c325 pos += minfo->curr.ydstorg.chunks; in matrox_pan_var()
776 minfo->curr.ydstorg.chunks = ydstorg >> (isInterleave(minfo) ? 3 : 2); in matroxfb_set_par()
811 pos += minfo->curr.ydstorg.chunks; in matroxfb_set_par()
/linux-4.4.14/fs/ocfs2/
Dquota_local.c346 int i, chunks = le32_to_cpu(ldinfo->dqi_chunks); in ocfs2_recovery_load_quota() local
349 for (i = 0; i < chunks; i++) { in ocfs2_recovery_load_quota()
/linux-4.4.14/drivers/tty/vt/
Ddefkeymap.c_shipped153 * the default and allocate dynamically in chunks of 512 bytes.
/linux-4.4.14/Documentation/early-userspace/
DREADME107 Eventually, several more chunks of kernel functionality will hopefully
/linux-4.4.14/Documentation/scsi/
Dst.txt243 Buffer allocation uses chunks of memory having sizes 2^n * (page
267 Scatter/gather buffers (buffers that consist of chunks non-contiguous
271 three kinds of chunks:
/linux-4.4.14/arch/cris/arch-v10/
DREADME.mm103 chunks of memory not possible using the normal kmalloc physical RAM
/linux-4.4.14/arch/s390/
DKconfig437 equal chunks which then are distributed over the configured number
441 chunks (i.e. memory size / fake size) and the number of supported
/linux-4.4.14/drivers/staging/lustre/lustre/osc/
Dosc_cache.c1027 int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk; in osc_extent_truncate() local
1038 ++chunks; in osc_extent_truncate()
1042 grants = chunks << cli->cl_chunkbits; in osc_extent_truncate()
/linux-4.4.14/Documentation/dvb/
DREADME.dvb-usb24 - TODO: a I2C-chunker. It creates device-specific chunks of register-accesses
/linux-4.4.14/drivers/iommu/
DKconfig252 non-linear physical memory chunks as linear memory in their
/linux-4.4.14/Documentation/cpu-freq/
Dgovernors.txt205 increase in 5% chunks of your maximum cpu frequency. You can change this
/linux-4.4.14/Documentation/nvdimm/
Dbtt.txt31 The BTT, however, splits the available space into chunks of up to 512 GiB,
/linux-4.4.14/Documentation/block/
Ddata-integrity.txt117 space is limited, the block interface allows tagging bigger chunks by
Dbiodoc.txt373 forced such requests to be broken up into small chunks before being passed
390 greater than PAGE_SIZE chunks in one shot)
466 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
822 The ll_rw_kio() routine breaks up the kiobuf into page sized chunks and
/linux-4.4.14/Documentation/serial/
Dtty.txt217 smaller chunks.
/linux-4.4.14/fs/befs/
DChangeLog233 So it does i/o in much larger chunks. It is the correct linux way. It
/linux-4.4.14/drivers/net/wireless/ath/ath10k/
Dwmi-tlv.c1330 struct wmi_host_mem_chunks *chunks; in ath10k_wmi_tlv_op_gen_init() local
1362 chunks = (void *)tlv->value; in ath10k_wmi_tlv_op_gen_init()
1422 ath10k_wmi_put_host_mem_chunks(ar, chunks); in ath10k_wmi_tlv_op_gen_init()
Dwmi.c5215 struct wmi_host_mem_chunks *chunks) in ath10k_wmi_put_host_mem_chunks() argument
5220 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); in ath10k_wmi_put_host_mem_chunks()
5223 chunk = &chunks->items[i]; in ath10k_wmi_put_host_mem_chunks()
Dwmi.h6274 struct wmi_host_mem_chunks *chunks);
/linux-4.4.14/Documentation/filesystems/caching/
Dfscache.txt86 It instead serves the cache out in PAGE_SIZE chunks as and when requested by
/linux-4.4.14/Documentation/mtd/
Dnand_ecc.txt269 to write our code in such a way that we process data in 32 bit chunks.