chunk 560 arch/arm/kernel/traps.c unsigned long chunk = min(PAGE_SIZE, end - start); chunk 565 arch/arm/kernel/traps.c ret = flush_cache_user_range(start, start + chunk); chunk 570 arch/arm/kernel/traps.c start += chunk; chunk 146 arch/arm64/crypto/ghash-ce-glue.c int chunk = min(blocks, MAX_BLOCKS); chunk 148 arch/arm64/crypto/ghash-ce-glue.c ghash_do_update(chunk, ctx->digest, src, key, chunk 152 arch/arm64/crypto/ghash-ce-glue.c blocks -= chunk; chunk 153 arch/arm64/crypto/ghash-ce-glue.c src += chunk * GHASH_BLOCK_SIZE; chunk 93 arch/arm64/crypto/sha256-glue.c unsigned int chunk = len; chunk 101 arch/arm64/crypto/sha256-glue.c chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE) chunk 102 arch/arm64/crypto/sha256-glue.c chunk = SHA256_BLOCK_SIZE - chunk 106 arch/arm64/crypto/sha256-glue.c sha256_base_do_update(desc, data, chunk, chunk 109 arch/arm64/crypto/sha256-glue.c data += chunk; chunk 110 arch/arm64/crypto/sha256-glue.c len -= chunk; chunk 30 arch/arm64/kernel/sys_compat.c unsigned long chunk = min(PAGE_SIZE, end - start); chunk 44 arch/arm64/kernel/sys_compat.c ret = __flush_cache_user_range(start, start + chunk); chunk 49 arch/arm64/kernel/sys_compat.c start += chunk; chunk 18 arch/mips/dec/prom/console.c unsigned int chunk = sizeof(buf) - 1; chunk 21 arch/mips/dec/prom/console.c if (chunk > c) chunk 22 arch/mips/dec/prom/console.c chunk = c; chunk 23 arch/mips/dec/prom/console.c memcpy(buf, s, chunk); chunk 24 arch/mips/dec/prom/console.c buf[chunk] = '\0'; chunk 26 arch/mips/dec/prom/console.c s += chunk; chunk 27 arch/mips/dec/prom/console.c c -= chunk; chunk 2419 arch/powerpc/kernel/prom_init.c unsigned long room, chunk; chunk 2429 arch/powerpc/kernel/prom_init.c chunk = alloc_up(room, 0); chunk 2430 arch/powerpc/kernel/prom_init.c if (chunk == 0) chunk 2433 arch/powerpc/kernel/prom_init.c *mem_end = chunk + room; chunk 63 arch/powerpc/mm/book3s64/iommu_api.c unsigned long entry, chunk; chunk 100 arch/powerpc/mm/book3s64/iommu_api.c chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) / chunk 102 arch/powerpc/mm/book3s64/iommu_api.c chunk = min(chunk, entries); chunk 103 arch/powerpc/mm/book3s64/iommu_api.c for (entry = 0; entry < entries; entry += chunk) { chunk 104 arch/powerpc/mm/book3s64/iommu_api.c unsigned long n = min(entries - entry, chunk); chunk 118 arch/powerpc/mm/mem.c unsigned long chunk) chunk 122 arch/powerpc/mm/mem.c for (i = start; i < stop; i += chunk) { chunk 123 arch/powerpc/mm/mem.c flush_dcache_range(i, min(stop, i + chunk)); chunk 389 arch/powerpc/platforms/pseries/vio.c size_t avail = 0, level, chunk, need; chunk 437 arch/powerpc/platforms/pseries/vio.c chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); chunk 438 arch/powerpc/platforms/pseries/vio.c chunk = min(chunk, (viodev->cmo.desired - chunk 440 arch/powerpc/platforms/pseries/vio.c viodev->cmo.entitled += chunk; chunk 515 arch/s390/crypto/prng.c int chunk, n, ret = 0; chunk 543 arch/s390/crypto/prng.c chunk = min_t(int, nbytes, prng_chunk_size); chunk 546 arch/s390/crypto/prng.c n = (chunk + 7) & -8; chunk 573 arch/s390/crypto/prng.c if (copy_to_user(ubuf, prng_data->buf, chunk)) { chunk 578 arch/s390/crypto/prng.c nbytes -= chunk; chunk 579 arch/s390/crypto/prng.c ret += chunk; chunk 580 arch/s390/crypto/prng.c ubuf += chunk; chunk 27 arch/um/drivers/mconsole_kern.h #define CONFIG_CHUNK(str, size, current, chunk, end) \ chunk 29 arch/um/drivers/mconsole_kern.h current += strlen(chunk); \ chunk 33 arch/um/drivers/mconsole_kern.h strcpy(str, chunk); \ chunk 34 arch/um/drivers/mconsole_kern.h str += strlen(chunk); \ chunk 333 arch/x86/xen/p2m.c unsigned int i, chunk; chunk 356 arch/x86/xen/p2m.c for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) { chunk 367 arch/x86/xen/p2m.c chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ? chunk 373 arch/x86/xen/p2m.c for (i = 1; i < chunk; i++) chunk 376 arch/x86/xen/p2m.c if (i < chunk) chunk 378 arch/x86/xen/p2m.c chunk = P2M_PER_PAGE; chunk 380 arch/x86/xen/p2m.c if (type == P2M_TYPE_PFN || i < chunk) { chunk 394 arch/x86/xen/p2m.c if (chunk == P2M_PER_PAGE) { chunk 341 arch/x86/xen/setup.c unsigned int i, chunk; chunk 350 arch/x86/xen/setup.c chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE; chunk 359 arch/x86/xen/setup.c xen_remap_buf.size = chunk; chunk 360 arch/x86/xen/setup.c for (i = 0; i < chunk; i++) chunk 367 arch/x86/xen/setup.c set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); chunk 369 arch/x86/xen/setup.c left -= chunk; chunk 1468 crypto/drbg.c unsigned int chunk = 0; chunk 1470 crypto/drbg.c chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); chunk 1472 crypto/drbg.c err = drbg_generate(drbg, buf + len, chunk, addtl); chunk 1476 crypto/drbg.c len += chunk; chunk 165 drivers/atm/fore200e.c fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) chunk 172 drivers/atm/fore200e.c chunk->alloc_size = size + alignment; chunk 173 drivers/atm/fore200e.c chunk->direction = direction; chunk 175 drivers/atm/fore200e.c chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL); chunk 176 drivers/atm/fore200e.c if (chunk->alloc_addr == NULL) chunk 180 drivers/atm/fore200e.c offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); chunk 182 drivers/atm/fore200e.c chunk->align_addr = chunk->alloc_addr + offset; chunk 184 drivers/atm/fore200e.c chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr, chunk 186 drivers/atm/fore200e.c if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) { chunk 187 drivers/atm/fore200e.c kfree(chunk->alloc_addr); chunk 197 drivers/atm/fore200e.c fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) chunk 199 drivers/atm/fore200e.c dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size, chunk 200 drivers/atm/fore200e.c chunk->direction); chunk 201 drivers/atm/fore200e.c kfree(chunk->alloc_addr); chunk 210 drivers/atm/fore200e.c fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, chunk 214 drivers/atm/fore200e.c chunk->alloc_size = size * nbr; chunk 215 drivers/atm/fore200e.c chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, chunk 216 drivers/atm/fore200e.c &chunk->dma_addr, GFP_KERNEL); chunk 217 drivers/atm/fore200e.c if (!chunk->alloc_addr) chunk 219 drivers/atm/fore200e.c chunk->align_addr = chunk->alloc_addr; chunk 227 drivers/atm/fore200e.c fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) chunk 229 drivers/atm/fore200e.c dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, chunk 230 drivers/atm/fore200e.c chunk->dma_addr); chunk 301 drivers/atm/fore200e.c struct chunk* data = &buffer[ nbr ].data; chunk 320 drivers/atm/fore200e.c struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; chunk 321 drivers/atm/fore200e.c struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; chunk 582 drivers/atm/fore200e.h struct chunk data; /* data buffer */ chunk 604 drivers/atm/fore200e.h struct chunk status; /* array of completion status */ chunk 614 drivers/atm/fore200e.h struct chunk tpd; /* array of tpds */ chunk 615 drivers/atm/fore200e.h struct chunk status; /* arry of completion status */ chunk 625 drivers/atm/fore200e.h struct chunk rpd; /* array of rpds */ chunk 626 drivers/atm/fore200e.h struct chunk status; /* array of completion status */ chunk 635 drivers/atm/fore200e.h struct chunk rbd_block; /* array of rbds */ chunk 636 drivers/atm/fore200e.h struct chunk status; /* array of completion status */ chunk 710 drivers/char/mem.c size_t chunk = iov_iter_count(iter), n; chunk 712 drivers/char/mem.c if (chunk > PAGE_SIZE) chunk 713 drivers/char/mem.c chunk = PAGE_SIZE; /* Just for latency reasons */ chunk 714 drivers/char/mem.c n = iov_iter_zero(chunk, iter); chunk 1906 drivers/char/random.c int chunk = min_t(int, left, sizeof(unsigned long)); chunk 1911 drivers/char/random.c memcpy(p, &v, chunk); chunk 1912 drivers/char/random.c p += chunk; chunk 1913 drivers/char/random.c left -= chunk; chunk 826 drivers/crypto/axis/artpec6_crypto.c size_t chunk; chunk 831 drivers/crypto/axis/artpec6_crypto.c chunk = min(count, artpec6_crypto_walk_chunklen(walk)); chunk 840 drivers/crypto/axis/artpec6_crypto.c chunk = min_t(dma_addr_t, chunk, chunk 844 drivers/crypto/axis/artpec6_crypto.c pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); chunk 845 drivers/crypto/axis/artpec6_crypto.c ret = setup_bounce_buffer_in(common, walk, chunk); chunk 846 drivers/crypto/axis/artpec6_crypto.c } else if (chunk < ARTPEC_CACHE_LINE_MAX) { chunk 847 drivers/crypto/axis/artpec6_crypto.c pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); chunk 848 drivers/crypto/axis/artpec6_crypto.c ret = setup_bounce_buffer_in(common, walk, chunk); chunk 852 drivers/crypto/axis/artpec6_crypto.c chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1); chunk 854 drivers/crypto/axis/artpec6_crypto.c pr_debug("CHUNK %pad:%zu\n", &addr, chunk); chunk 860 drivers/crypto/axis/artpec6_crypto.c chunk, chunk 868 drivers/crypto/axis/artpec6_crypto.c chunk, false); chunk 874 drivers/crypto/axis/artpec6_crypto.c count = count - chunk; chunk 875 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_walk_advance(walk, chunk); chunk 889 drivers/crypto/axis/artpec6_crypto.c size_t chunk; chunk 894 drivers/crypto/axis/artpec6_crypto.c chunk = min(count, artpec6_crypto_walk_chunklen(walk)); chunk 897 drivers/crypto/axis/artpec6_crypto.c pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk); chunk 902 drivers/crypto/axis/artpec6_crypto.c chunk = min_t(size_t, chunk, (4-(addr&3))); chunk 904 drivers/crypto/axis/artpec6_crypto.c sg_pcopy_to_buffer(walk->sg, 1, buf, chunk, chunk 908 drivers/crypto/axis/artpec6_crypto.c chunk, chunk 917 drivers/crypto/axis/artpec6_crypto.c chunk, chunk 925 drivers/crypto/axis/artpec6_crypto.c chunk, false); chunk 931 drivers/crypto/axis/artpec6_crypto.c count = count - chunk; chunk 932 drivers/crypto/axis/artpec6_crypto.c artpec6_crypto_walk_advance(walk, chunk); chunk 174 drivers/crypto/cavium/cpt/cptvf_main.c struct command_chunk *chunk = NULL; chunk 184 drivers/crypto/cavium/cpt/cptvf_main.c hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead, chunk 186 drivers/crypto/cavium/cpt/cptvf_main.c dma_free_coherent(&pdev->dev, chunk->size, chunk 187 drivers/crypto/cavium/cpt/cptvf_main.c chunk->head, chunk 188 drivers/crypto/cavium/cpt/cptvf_main.c chunk->dma_addr); chunk 189 drivers/crypto/cavium/cpt/cptvf_main.c chunk->head = NULL; chunk 190 drivers/crypto/cavium/cpt/cptvf_main.c chunk->dma_addr = 0; chunk 191 drivers/crypto/cavium/cpt/cptvf_main.c hlist_del(&chunk->nextchunk); chunk 192 drivers/crypto/cavium/cpt/cptvf_main.c kzfree(chunk); chunk 229 drivers/crypto/cavium/cpt/cptvf_reqmanager.c struct command_chunk *chunk; chunk 250 drivers/crypto/cavium/cpt/cptvf_reqmanager.c chunk = hlist_entry(node, struct command_chunk, chunk 252 drivers/crypto/cavium/cpt/cptvf_reqmanager.c if (chunk == queue->qhead) { chunk 255 drivers/crypto/cavium/cpt/cptvf_reqmanager.c queue->qhead = chunk; chunk 688 drivers/crypto/n2_core.c struct n2_crypto_chunk chunk; chunk 884 drivers/crypto/n2_core.c struct n2_crypto_chunk *chunk; chunk 897 drivers/crypto/n2_core.c chunk = &rctx->chunk; chunk 898 drivers/crypto/n2_core.c INIT_LIST_HEAD(&chunk->entry); chunk 900 drivers/crypto/n2_core.c chunk->iv_paddr = 0UL; chunk 901 drivers/crypto/n2_core.c chunk->arr_len = 0; chunk 902 drivers/crypto/n2_core.c chunk->dest_paddr = 0UL; chunk 920 drivers/crypto/n2_core.c if (chunk->arr_len != 0) { chunk 924 drivers/crypto/n2_core.c chunk->arr_len == N2_CHUNK_ARR_LEN || chunk 926 drivers/crypto/n2_core.c chunk->dest_final = dest_prev; chunk 927 drivers/crypto/n2_core.c list_add_tail(&chunk->entry, chunk 929 drivers/crypto/n2_core.c chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); chunk 930 drivers/crypto/n2_core.c if (!chunk) { chunk 934 drivers/crypto/n2_core.c INIT_LIST_HEAD(&chunk->entry); chunk 937 drivers/crypto/n2_core.c if (chunk->arr_len == 0) { chunk 938 drivers/crypto/n2_core.c chunk->dest_paddr = dest_paddr; chunk 941 drivers/crypto/n2_core.c chunk->arr[chunk->arr_len].src_paddr = src_paddr; chunk 942 drivers/crypto/n2_core.c chunk->arr[chunk->arr_len].src_len = this_len; chunk 943 drivers/crypto/n2_core.c chunk->arr_len++; chunk 953 drivers/crypto/n2_core.c if (!err && chunk->arr_len != 0) { chunk 954 drivers/crypto/n2_core.c chunk->dest_final = dest_prev; chunk 955 drivers/crypto/n2_core.c list_add_tail(&chunk->entry, &rctx->chunk_list); chunk 972 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) chunk 1002 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) chunk 1062 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) chunk 1069 drivers/crypto/n2_core.c if (c == &rctx->chunk) { chunk 1091 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) chunk 526 drivers/crypto/qat/qat_common/qat_uclo.c char *chunk; chunk 534 drivers/crypto/qat/qat_common/qat_uclo.c chunk = buf + file_chunk->offset; chunk 536 drivers/crypto/qat/qat_common/qat_uclo.c chunk, file_chunk->size)) chunk 541 drivers/crypto/qat/qat_common/qat_uclo.c obj_hdr->file_buff = chunk; chunk 731 drivers/dma/at_hdmac.c struct data_chunk *chunk = xt->sgl + i; chunk 733 drivers/dma/at_hdmac.c if ((chunk->size != xt->sgl->size) || chunk 734 drivers/dma/at_hdmac.c (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) || chunk 735 drivers/dma/at_hdmac.c (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) { chunk 742 drivers/dma/at_hdmac.c len += chunk->size; chunk 852 drivers/dma/at_xdmac.c struct data_chunk *chunk) chunk 876 drivers/dma/at_xdmac.c dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); chunk 877 drivers/dma/at_xdmac.c if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { chunk 880 drivers/dma/at_xdmac.c __func__, chunk->size, chunk 913 drivers/dma/at_xdmac.c ublen = chunk->size >> dwidth; chunk 917 drivers/dma/at_xdmac.c desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); chunk 918 drivers/dma/at_xdmac.c desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); chunk 947 drivers/dma/at_xdmac.c struct data_chunk *chunk; chunk 985 drivers/dma/at_xdmac.c chunk = xt->sgl + i; chunk 987 drivers/dma/at_xdmac.c dst_icg = dmaengine_get_dst_icg(xt, chunk); chunk 988 drivers/dma/at_xdmac.c src_icg = dmaengine_get_src_icg(xt, chunk); chunk 990 drivers/dma/at_xdmac.c src_skip = chunk->size + src_icg; chunk 991 drivers/dma/at_xdmac.c dst_skip = chunk->size + dst_icg; chunk 995 drivers/dma/at_xdmac.c __func__, chunk->size, src_icg, dst_icg); chunk 1000 drivers/dma/at_xdmac.c xt, chunk); chunk 1020 drivers/dma/at_xdmac.c len += chunk->size; chunk 42 drivers/dma/dw-edma/dw-edma-core.c static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) chunk 51 drivers/dma/dw-edma/dw-edma-core.c if (chunk->burst) { chunk 53 drivers/dma/dw-edma/dw-edma-core.c chunk->bursts_alloc++; chunk 54 drivers/dma/dw-edma/dw-edma-core.c list_add_tail(&burst->list, &chunk->burst->list); chunk 57 drivers/dma/dw-edma/dw-edma-core.c chunk->bursts_alloc = 0; chunk 58 drivers/dma/dw-edma/dw-edma-core.c chunk->burst = burst; chunk 68 drivers/dma/dw-edma/dw-edma-core.c struct dw_edma_chunk *chunk; chunk 70 drivers/dma/dw-edma/dw-edma-core.c chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); chunk 71 drivers/dma/dw-edma/dw-edma-core.c if (unlikely(!chunk)) chunk 74 drivers/dma/dw-edma/dw-edma-core.c INIT_LIST_HEAD(&chunk->list); chunk 75 drivers/dma/dw-edma/dw-edma-core.c chunk->chan = chan; chunk 82 drivers/dma/dw-edma/dw-edma-core.c chunk->cb = !(desc->chunks_alloc % 2); chunk 83 drivers/dma/dw-edma/dw-edma-core.c chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; chunk 84 drivers/dma/dw-edma/dw-edma-core.c chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; chunk 86 drivers/dma/dw-edma/dw-edma-core.c if (desc->chunk) { chunk 89 drivers/dma/dw-edma/dw-edma-core.c list_add_tail(&chunk->list, &desc->chunk->list); chunk 90 drivers/dma/dw-edma/dw-edma-core.c if (!dw_edma_alloc_burst(chunk)) { chunk 91 drivers/dma/dw-edma/dw-edma-core.c kfree(chunk); chunk 96 drivers/dma/dw-edma/dw-edma-core.c chunk->burst = NULL; chunk 98 drivers/dma/dw-edma/dw-edma-core.c desc->chunk = chunk; chunk 101 drivers/dma/dw-edma/dw-edma-core.c return chunk; chunk 121 drivers/dma/dw-edma/dw-edma-core.c static void dw_edma_free_burst(struct dw_edma_chunk *chunk) chunk 126 drivers/dma/dw-edma/dw-edma-core.c list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { chunk 129 drivers/dma/dw-edma/dw-edma-core.c chunk->bursts_alloc--; chunk 134 drivers/dma/dw-edma/dw-edma-core.c chunk->burst = NULL; chunk 141 drivers/dma/dw-edma/dw-edma-core.c if (!desc->chunk) chunk 145 drivers/dma/dw-edma/dw-edma-core.c list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { chunk 154 drivers/dma/dw-edma/dw-edma-core.c desc->chunk = NULL; chunk 182 drivers/dma/dw-edma/dw-edma-core.c child = list_first_entry_or_null(&desc->chunk->list, chunk 328 drivers/dma/dw-edma/dw-edma-core.c struct dw_edma_chunk *chunk; chunk 353 drivers/dma/dw-edma/dw-edma-core.c chunk = dw_edma_alloc_chunk(desc); chunk 354 drivers/dma/dw-edma/dw-edma-core.c if (unlikely(!chunk)) chunk 371 drivers/dma/dw-edma/dw-edma-core.c if (chunk->bursts_alloc == chan->ll_max) { chunk 372 drivers/dma/dw-edma/dw-edma-core.c chunk = dw_edma_alloc_chunk(desc); chunk 373 drivers/dma/dw-edma/dw-edma-core.c if (unlikely(!chunk)) chunk 377 drivers/dma/dw-edma/dw-edma-core.c burst = dw_edma_alloc_burst(chunk); chunk 386 drivers/dma/dw-edma/dw-edma-core.c chunk->ll_region.sz += burst->sz; chunk 71 drivers/dma/dw-edma/dw-edma-core.h struct dw_edma_chunk *chunk; chunk 192 drivers/dma/dw-edma/dw-edma-v0-core.c static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) chunk 200 drivers/dma/dw-edma/dw-edma-v0-core.c lli = chunk->ll_region.vaddr; chunk 202 drivers/dma/dw-edma/dw-edma-v0-core.c if (chunk->cb) chunk 205 drivers/dma/dw-edma/dw-edma-v0-core.c j = chunk->bursts_alloc; chunk 206 drivers/dma/dw-edma/dw-edma-v0-core.c list_for_each_entry(child, &chunk->burst->list, list) { chunk 226 drivers/dma/dw-edma/dw-edma-v0-core.c if (!chunk->cb) chunk 232 drivers/dma/dw-edma/dw-edma-v0-core.c SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr)); chunk 233 drivers/dma/dw-edma/dw-edma-v0-core.c SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr)); chunk 236 drivers/dma/dw-edma/dw-edma-v0-core.c void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) chunk 238 drivers/dma/dw-edma/dw-edma-v0-core.c struct dw_edma_chan *chan = chunk->chan; chunk 242 drivers/dma/dw-edma/dw-edma-v0-core.c dw_edma_v0_core_write_chunk(chunk); chunk 261 drivers/dma/dw-edma/dw-edma-v0-core.c lower_32_bits(chunk->ll_region.paddr)); chunk 263 drivers/dma/dw-edma/dw-edma-v0-core.c upper_32_bits(chunk->ll_region.paddr)); chunk 22 drivers/dma/dw-edma/dw-edma-v0-core.h void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); chunk 330 drivers/dma/ioat/dma.c int chunk; chunk 335 drivers/dma/ioat/dma.c chunk = idx / IOAT_DESCS_PER_2M; chunk 338 drivers/dma/ioat/dma.c pos = (u8 *)ioat_chan->descs[chunk].virt + offs; chunk 339 drivers/dma/ioat/dma.c phys = ioat_chan->descs[chunk].hw + offs; chunk 350 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk = chunk 360 drivers/dma/sh/rcar-dmac.c chunk->src_addr >> 32); chunk 362 drivers/dma/sh/rcar-dmac.c chunk->dst_addr >> 32); chunk 381 drivers/dma/sh/rcar-dmac.c chunk->dst_addr & 0xffffffff); chunk 411 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk = desc->running; chunk 415 drivers/dma/sh/rcar-dmac.c chan->index, chunk, chunk->size, &chunk->src_addr, chunk 416 drivers/dma/sh/rcar-dmac.c &chunk->dst_addr); chunk 420 drivers/dma/sh/rcar-dmac.c chunk->src_addr >> 32); chunk 422 drivers/dma/sh/rcar-dmac.c chunk->dst_addr >> 32); chunk 425 drivers/dma/sh/rcar-dmac.c chunk->src_addr & 0xffffffff); chunk 427 drivers/dma/sh/rcar-dmac.c chunk->dst_addr & 0xffffffff); chunk 429 drivers/dma/sh/rcar-dmac.c chunk->size >> desc->xfer_shift); chunk 635 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; chunk 637 drivers/dma/sh/rcar-dmac.c list_add_tail(&chunk->node, &list); chunk 660 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk; chunk 680 drivers/dma/sh/rcar-dmac.c chunk = list_first_entry(&chan->desc.chunks_free, chunk 682 drivers/dma/sh/rcar-dmac.c list_del(&chunk->node); chunk 686 drivers/dma/sh/rcar-dmac.c return chunk; chunk 724 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk; chunk 733 drivers/dma/sh/rcar-dmac.c list_for_each_entry(chunk, &desc->chunks, node) { chunk 734 drivers/dma/sh/rcar-dmac.c hwdesc->sar = chunk->src_addr; chunk 735 drivers/dma/sh/rcar-dmac.c hwdesc->dar = chunk->dst_addr; chunk 736 drivers/dma/sh/rcar-dmac.c hwdesc->tcr = chunk->size >> desc->xfer_shift; chunk 899 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk; chunk 964 drivers/dma/sh/rcar-dmac.c chunk = rcar_dmac_xfer_chunk_get(chan); chunk 965 drivers/dma/sh/rcar-dmac.c if (!chunk) { chunk 971 drivers/dma/sh/rcar-dmac.c chunk->src_addr = dev_addr; chunk 972 drivers/dma/sh/rcar-dmac.c chunk->dst_addr = mem_addr; chunk 974 drivers/dma/sh/rcar-dmac.c chunk->src_addr = mem_addr; chunk 975 drivers/dma/sh/rcar-dmac.c chunk->dst_addr = dev_addr; chunk 978 drivers/dma/sh/rcar-dmac.c chunk->size = size; chunk 982 drivers/dma/sh/rcar-dmac.c chan->index, chunk, desc, i, sg, size, len, chunk 983 drivers/dma/sh/rcar-dmac.c &chunk->src_addr, &chunk->dst_addr); chunk 991 drivers/dma/sh/rcar-dmac.c list_add_tail(&chunk->node, &desc->chunks); chunk 1286 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_xfer_chunk *chunk; chunk 1375 drivers/dma/sh/rcar-dmac.c list_for_each_entry_reverse(chunk, &desc->chunks, node) { chunk 1376 drivers/dma/sh/rcar-dmac.c if (chunk == running || ++dptr == desc->nchunks) chunk 1379 drivers/dma/sh/rcar-dmac.c residue += chunk->size; chunk 72 drivers/dma/sh/shdma-base.c struct shdma_desc *chunk, *c, *desc = chunk 86 drivers/dma/sh/shdma-base.c list_for_each_entry_safe(chunk, c, desc->node.prev, node) { chunk 91 drivers/dma/sh/shdma-base.c if (chunk != desc && (chunk->mark == DESC_IDLE || chunk 92 drivers/dma/sh/shdma-base.c chunk->async_tx.cookie > 0 || chunk 93 drivers/dma/sh/shdma-base.c chunk->async_tx.cookie == -EBUSY || chunk 94 drivers/dma/sh/shdma-base.c &chunk->node == &schan->ld_free)) chunk 96 drivers/dma/sh/shdma-base.c chunk->mark = DESC_SUBMITTED; chunk 97 drivers/dma/sh/shdma-base.c if (chunk->chunks == 1) { chunk 98 drivers/dma/sh/shdma-base.c chunk->async_tx.callback = callback; chunk 99 drivers/dma/sh/shdma-base.c chunk->async_tx.callback_param = tx->callback_param; chunk 102 drivers/dma/sh/shdma-base.c chunk->async_tx.callback = NULL; chunk 104 drivers/dma/sh/shdma-base.c chunk->cookie = cookie; chunk 105 drivers/dma/sh/shdma-base.c list_move_tail(&chunk->node, &schan->ld_queue); chunk 108 drivers/dma/sh/shdma-base.c tx->cookie, &chunk->async_tx, schan->id); chunk 477 drivers/fsi/fsi-sbefifo.c size_t len, chunk, vacant = 0, remaining = cmd_len; chunk 495 drivers/fsi/fsi-sbefifo.c len = chunk = min(vacant, remaining); chunk 498 drivers/fsi/fsi-sbefifo.c status, vacant, chunk); chunk 508 drivers/fsi/fsi-sbefifo.c remaining -= chunk; chunk 509 drivers/fsi/fsi-sbefifo.c vacant -= chunk; chunk 803 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk; chunk 808 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk = &p->chunks[i]; chunk 810 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk_ib = chunk->kdata; chunk 812 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) chunk 950 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk; chunk 955 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk = &parser->chunks[i]; chunk 957 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; chunk 959 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) chunk 1015 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk) chunk 1022 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; chunk 1023 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c num_deps = chunk->length_dw * 4 / chunk 1051 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { chunk 1089 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk) chunk 1095 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; chunk 1096 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c num_deps = chunk->length_dw * 4 / chunk 1110 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk) chunk 1116 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; chunk 1117 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c num_deps = chunk->length_dw * 4 / chunk 1132 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk) chunk 1138 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; chunk 1139 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c num_deps = chunk->length_dw * 4 / chunk 1168 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk) chunk 1174 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; chunk 1175 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c num_deps = chunk->length_dw * 4 / chunk 1217 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_cs_chunk *chunk; chunk 1219 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk = &p->chunks[i]; chunk 1221 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c switch (chunk->chunk_id) { chunk 1224 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_process_fence_dep(p, chunk); chunk 1229 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_process_syncobj_in_dep(p, chunk); chunk 1234 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_process_syncobj_out_dep(p, chunk); chunk 1239 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); chunk 1244 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); chunk 430 drivers/gpu/drm/drm_dp_mst_topology.c memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); chunk 432 drivers/gpu/drm/drm_dp_mst_topology.c memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); chunk 438 drivers/gpu/drm/drm_dp_mst_topology.c crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); chunk 440 drivers/gpu/drm/drm_dp_mst_topology.c memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); chunk 1999 drivers/gpu/drm/drm_dp_mst_topology.c u8 chunk[48]; chunk 2031 drivers/gpu/drm/drm_dp_mst_topology.c drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); chunk 2032 drivers/gpu/drm/drm_dp_mst_topology.c memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); chunk 2034 drivers/gpu/drm/drm_dp_mst_topology.c drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); chunk 2037 drivers/gpu/drm/drm_dp_mst_topology.c ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); chunk 775 drivers/gpu/drm/drm_mipi_dbi.c size_t chunk, max_chunk = dbi->tx_buf9_len; chunk 816 drivers/gpu/drm/drm_mipi_dbi.c chunk = min(len, max_chunk); chunk 817 drivers/gpu/drm/drm_mipi_dbi.c len -= chunk; chunk 820 drivers/gpu/drm/drm_mipi_dbi.c if (chunk < 8) { chunk 827 drivers/gpu/drm/drm_mipi_dbi.c for (i = 1; i < (chunk + 1); i++) { chunk 839 drivers/gpu/drm/drm_mipi_dbi.c for (i = 1; i < (chunk + 1); i++) { chunk 847 drivers/gpu/drm/drm_mipi_dbi.c chunk = 8; chunk 850 drivers/gpu/drm/drm_mipi_dbi.c for (i = 0; i < chunk; i += 8) { chunk 878 drivers/gpu/drm/drm_mipi_dbi.c tr.len = chunk + added; chunk 920 drivers/gpu/drm/drm_mipi_dbi.c size_t chunk = min(len, max_chunk); chunk 924 drivers/gpu/drm/drm_mipi_dbi.c for (i = 0; i < (chunk * 2); i += 2) { chunk 933 drivers/gpu/drm/drm_mipi_dbi.c for (i = 0; i < chunk; i++) { chunk 940 drivers/gpu/drm/drm_mipi_dbi.c tr.len = chunk; chunk 941 drivers/gpu/drm/drm_mipi_dbi.c len -= chunk; chunk 1156 drivers/gpu/drm/drm_mipi_dbi.c size_t chunk; chunk 1162 drivers/gpu/drm/drm_mipi_dbi.c chunk = min(len, max_chunk); chunk 1165 drivers/gpu/drm/drm_mipi_dbi.c tr.len = chunk; chunk 1166 drivers/gpu/drm/drm_mipi_dbi.c buf += chunk; chunk 1167 drivers/gpu/drm/drm_mipi_dbi.c len -= chunk; chunk 179 drivers/gpu/drm/i915/gem/i915_gem_mman.c unsigned int chunk) chunk 184 drivers/gpu/drm/i915/gem/i915_gem_mman.c chunk = roundup(chunk, tile_row_pages(obj)); chunk 187 drivers/gpu/drm/i915/gem/i915_gem_mman.c view.partial.offset = rounddown(page_offset, chunk); chunk 189 drivers/gpu/drm/i915/gem/i915_gem_mman.c min_t(unsigned int, chunk, chunk 193 drivers/gpu/drm/i915/gem/i915_gem_mman.c if (chunk >= obj->base.size >> PAGE_SHIFT) chunk 89 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk = page->zone_device_data; chunk 90 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long idx = page_to_pfn(page) - chunk->pfn_first; chunk 92 drivers/gpu/drm/nouveau/nouveau_dmem.c return (idx << PAGE_SHIFT) + chunk->bo->bo.offset; chunk 97 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk = page->zone_device_data; chunk 98 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long idx = page_to_pfn(page) - chunk->pfn_first; chunk 107 drivers/gpu/drm/nouveau/nouveau_dmem.c spin_lock(&chunk->lock); chunk 108 drivers/gpu/drm/nouveau/nouveau_dmem.c clear_bit(idx, chunk->bitmap); chunk 109 drivers/gpu/drm/nouveau/nouveau_dmem.c WARN_ON(!chunk->callocated); chunk 110 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk->callocated--; chunk 115 drivers/gpu/drm/nouveau/nouveau_dmem.c spin_unlock(&chunk->lock); chunk 212 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk; chunk 219 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk = list_first_entry_or_null(&drm->dmem->chunk_empty, chunk 222 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk == NULL) { chunk 227 drivers/gpu/drm/nouveau/nouveau_dmem.c list_del(&chunk->list); chunk 232 drivers/gpu/drm/nouveau/nouveau_dmem.c &chunk->bo); chunk 236 drivers/gpu/drm/nouveau/nouveau_dmem.c ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); chunk 238 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_bo_ref(NULL, &chunk->bo); chunk 242 drivers/gpu/drm/nouveau/nouveau_dmem.c bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES); chunk 243 drivers/gpu/drm/nouveau/nouveau_dmem.c spin_lock_init(&chunk->lock); chunk 247 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk->bo) chunk 248 drivers/gpu/drm/nouveau/nouveau_dmem.c list_add(&chunk->list, &drm->dmem->chunk_empty); chunk 250 drivers/gpu/drm/nouveau/nouveau_dmem.c list_add_tail(&chunk->list, &drm->dmem->chunk_empty); chunk 259 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk; chunk 261 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk = list_first_entry_or_null(&drm->dmem->chunk_free, chunk 264 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk) chunk 265 drivers/gpu/drm/nouveau/nouveau_dmem.c return chunk; chunk 267 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk = list_first_entry_or_null(&drm->dmem->chunk_empty, chunk 270 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk->bo) chunk 271 drivers/gpu/drm/nouveau/nouveau_dmem.c return chunk; chunk 281 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk; chunk 291 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk = nouveau_dmem_chunk_first_free_locked(drm); chunk 292 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk == NULL) { chunk 304 drivers/gpu/drm/nouveau/nouveau_dmem.c spin_lock(&chunk->lock); chunk 305 drivers/gpu/drm/nouveau/nouveau_dmem.c i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES); chunk 307 drivers/gpu/drm/nouveau/nouveau_dmem.c pages[c] = chunk->pfn_first + i; chunk 308 drivers/gpu/drm/nouveau/nouveau_dmem.c set_bit(i, chunk->bitmap); chunk 309 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk->callocated++; chunk 312 drivers/gpu/drm/nouveau/nouveau_dmem.c i = find_next_zero_bit(chunk->bitmap, chunk 315 drivers/gpu/drm/nouveau/nouveau_dmem.c spin_unlock(&chunk->lock); chunk 350 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk; chunk 357 drivers/gpu/drm/nouveau/nouveau_dmem.c list_for_each_entry (chunk, &drm->dmem->chunk_free, list) { chunk 358 drivers/gpu/drm/nouveau/nouveau_dmem.c ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); chunk 362 drivers/gpu/drm/nouveau/nouveau_dmem.c list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { chunk 363 drivers/gpu/drm/nouveau/nouveau_dmem.c ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); chunk 373 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk; chunk 379 drivers/gpu/drm/nouveau/nouveau_dmem.c list_for_each_entry (chunk, &drm->dmem->chunk_free, list) { chunk 380 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_bo_unpin(chunk->bo); chunk 382 drivers/gpu/drm/nouveau/nouveau_dmem.c list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { chunk 383 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_bo_unpin(chunk->bo); chunk 391 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk, *tmp; chunk 401 drivers/gpu/drm/nouveau/nouveau_dmem.c list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) { chunk 402 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk->bo) { chunk 403 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_bo_unpin(chunk->bo); chunk 404 drivers/gpu/drm/nouveau/nouveau_dmem.c nouveau_bo_ref(NULL, &chunk->bo); chunk 406 drivers/gpu/drm/nouveau/nouveau_dmem.c list_del(&chunk->list); chunk 407 drivers/gpu/drm/nouveau/nouveau_dmem.c kfree(chunk); chunk 534 drivers/gpu/drm/nouveau/nouveau_dmem.c struct nouveau_dmem_chunk *chunk; chunk 538 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); chunk 539 drivers/gpu/drm/nouveau/nouveau_dmem.c if (chunk == NULL) { chunk 544 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk->drm = drm; chunk 545 drivers/gpu/drm/nouveau/nouveau_dmem.c chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES); chunk 546 drivers/gpu/drm/nouveau/nouveau_dmem.c list_add_tail(&chunk->list, &drm->dmem->chunk_empty); chunk 548 drivers/gpu/drm/nouveau/nouveau_dmem.c page = pfn_to_page(chunk->pfn_first); chunk 550 drivers/gpu/drm/nouveau/nouveau_dmem.c page->zone_device_data = chunk; chunk 420 drivers/gpu/drm/qxl/qxl_dev.h struct qxl_data_chunk chunk; chunk 481 drivers/gpu/drm/qxl/qxl_dev.h struct qxl_data_chunk chunk; chunk 760 drivers/gpu/drm/qxl/qxl_dev.h struct qxl_data_chunk chunk; chunk 778 drivers/gpu/drm/qxl/qxl_dev.h struct qxl_data_chunk chunk; chunk 638 drivers/gpu/drm/qxl/qxl_display.c cursor->chunk.next_chunk = 0; chunk 639 drivers/gpu/drm/qxl/qxl_display.c cursor->chunk.prev_chunk = 0; chunk 640 drivers/gpu/drm/qxl/qxl_display.c cursor->chunk.data_size = size; chunk 641 drivers/gpu/drm/qxl/qxl_display.c memcpy(cursor->chunk.data, user_ptr, size); chunk 53 drivers/gpu/drm/qxl/qxl_draw.c dev_clips->chunk.next_chunk = 0; chunk 54 drivers/gpu/drm/qxl/qxl_draw.c dev_clips->chunk.prev_chunk = 0; chunk 55 drivers/gpu/drm/qxl/qxl_draw.c dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips; chunk 56 drivers/gpu/drm/qxl/qxl_draw.c return (struct qxl_rect *)dev_clips->chunk.data; chunk 38 drivers/gpu/drm/qxl/qxl_image.c struct qxl_drm_chunk *chunk; chunk 41 drivers/gpu/drm/qxl/qxl_image.c chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); chunk 42 drivers/gpu/drm/qxl/qxl_image.c if (!chunk) chunk 45 drivers/gpu/drm/qxl/qxl_image.c ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); chunk 47 drivers/gpu/drm/qxl/qxl_image.c kfree(chunk); chunk 51 drivers/gpu/drm/qxl/qxl_image.c list_add_tail(&chunk->head, &image->chunk_list); chunk 88 drivers/gpu/drm/qxl/qxl_image.c struct qxl_drm_chunk *chunk, *tmp; chunk 90 drivers/gpu/drm/qxl/qxl_image.c list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { chunk 91 drivers/gpu/drm/qxl/qxl_image.c qxl_bo_unref(&chunk->bo); chunk 92 drivers/gpu/drm/qxl/qxl_image.c kfree(chunk); chunk 110 drivers/gpu/drm/qxl/qxl_image.c struct qxl_data_chunk *chunk; chunk 128 drivers/gpu/drm/qxl/qxl_image.c chunk = ptr; chunk 129 drivers/gpu/drm/qxl/qxl_image.c chunk->data_size = height * chunk_stride; chunk 130 drivers/gpu/drm/qxl/qxl_image.c chunk->prev_chunk = 0; chunk 131 drivers/gpu/drm/qxl/qxl_image.c chunk->next_chunk = 0; chunk 149 drivers/gpu/drm/qxl/qxl_image.c chunk = ptr; chunk 150 drivers/gpu/drm/qxl/qxl_image.c k_data = chunk->data; chunk 83 drivers/gpu/drm/radeon/radeon_cs.c struct radeon_cs_chunk *chunk; chunk 92 drivers/gpu/drm/radeon/radeon_cs.c chunk = p->chunk_relocs; chunk 95 drivers/gpu/drm/radeon/radeon_cs.c p->nrelocs = chunk->length_dw / 4; chunk 109 drivers/gpu/drm/radeon/radeon_cs.c r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; chunk 78 drivers/gpu/drm/vboxvideo/vbva_base.c u32 chunk = len; chunk 80 drivers/gpu/drm/vboxvideo/vbva_base.c if (chunk >= available) { chunk 85 drivers/gpu/drm/vboxvideo/vbva_base.c if (chunk >= available) { chunk 90 drivers/gpu/drm/vboxvideo/vbva_base.c chunk = available - vbva->partial_write_tresh; chunk 93 drivers/gpu/drm/vboxvideo/vbva_base.c vbva_buffer_place_data_at(vbva_ctx, p, chunk, chunk 96 drivers/gpu/drm/vboxvideo/vbva_base.c vbva->free_offset = (vbva->free_offset + chunk) % chunk 98 drivers/gpu/drm/vboxvideo/vbva_base.c record->len_and_flags += chunk; chunk 99 drivers/gpu/drm/vboxvideo/vbva_base.c available -= chunk; chunk 100 drivers/gpu/drm/vboxvideo/vbva_base.c len -= chunk; chunk 101 drivers/gpu/drm/vboxvideo/vbva_base.c p += chunk; chunk 483 drivers/infiniband/hw/cxgb4/resource.c unsigned start, chunk, top; chunk 490 drivers/infiniband/hw/cxgb4/resource.c chunk = rdev->lldi.vr->ocq.size; chunk 491 drivers/infiniband/hw/cxgb4/resource.c top = start + chunk; chunk 494 drivers/infiniband/hw/cxgb4/resource.c chunk = min(top - start + 1, chunk); chunk 495 drivers/infiniband/hw/cxgb4/resource.c if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { chunk 497 drivers/infiniband/hw/cxgb4/resource.c start, chunk); chunk 498 drivers/infiniband/hw/cxgb4/resource.c if (chunk <= 1024 << MIN_OCQP_SHIFT) { chunk 503 drivers/infiniband/hw/cxgb4/resource.c chunk >>= 1; chunk 506 drivers/infiniband/hw/cxgb4/resource.c start, chunk); chunk 507 drivers/infiniband/hw/cxgb4/resource.c start += chunk; chunk 282 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_chunk *chunk = NULL; chunk 301 drivers/infiniband/hw/hns/hns_roce_hem.c if (!chunk) { chunk 302 drivers/infiniband/hw/hns/hns_roce_hem.c chunk = kmalloc(sizeof(*chunk), chunk 304 drivers/infiniband/hw/hns/hns_roce_hem.c if (!chunk) chunk 307 drivers/infiniband/hw/hns/hns_roce_hem.c sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN); chunk 308 drivers/infiniband/hw/hns/hns_roce_hem.c chunk->npages = 0; chunk 309 drivers/infiniband/hw/hns/hns_roce_hem.c chunk->nsg = 0; chunk 310 drivers/infiniband/hw/hns/hns_roce_hem.c memset(chunk->buf, 0, sizeof(chunk->buf)); chunk 311 drivers/infiniband/hw/hns/hns_roce_hem.c list_add_tail(&chunk->list, &hem->chunk_list); chunk 321 drivers/infiniband/hw/hns/hns_roce_hem.c mem = &chunk->mem[chunk->npages]; chunk 327 drivers/infiniband/hw/hns/hns_roce_hem.c chunk->buf[chunk->npages] = buf; chunk 330 drivers/infiniband/hw/hns/hns_roce_hem.c ++chunk->npages; chunk 331 drivers/infiniband/hw/hns/hns_roce_hem.c ++chunk->nsg; chunk 344 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_chunk *chunk, *tmp; chunk 350 drivers/infiniband/hw/hns/hns_roce_hem.c list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) { chunk 351 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = 0; i < chunk->npages; ++i) chunk 353 drivers/infiniband/hw/hns/hns_roce_hem.c sg_dma_len(&chunk->mem[i]), chunk 354 drivers/infiniband/hw/hns/hns_roce_hem.c chunk->buf[i], chunk 355 drivers/infiniband/hw/hns/hns_roce_hem.c sg_dma_address(&chunk->mem[i])); chunk 356 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(chunk); chunk 805 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_chunk *chunk; chunk 851 drivers/infiniband/hw/hns/hns_roce_hem.c list_for_each_entry(chunk, &hem->chunk_list, list) { chunk 852 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = 0; i < chunk->npages; ++i) { chunk 853 drivers/infiniband/hw/hns/hns_roce_hem.c length = sg_dma_len(&chunk->mem[i]); chunk 857 drivers/infiniband/hw/hns/hns_roce_hem.c &chunk->mem[i]) + dma_offset; chunk 862 drivers/infiniband/hw/hns/hns_roce_hem.c addr = chunk->buf[i] + offset; chunk 96 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_chunk *chunk; chunk 154 drivers/infiniband/hw/hns/hns_roce_hem.h iter->chunk = list_empty(&hem->chunk_list) ? NULL : chunk 162 drivers/infiniband/hw/hns/hns_roce_hem.h return !iter->chunk; chunk 167 drivers/infiniband/hw/hns/hns_roce_hem.h if (++iter->page_idx >= iter->chunk->nsg) { chunk 168 drivers/infiniband/hw/hns/hns_roce_hem.h if (iter->chunk->list.next == &iter->hem->chunk_list) { chunk 169 drivers/infiniband/hw/hns/hns_roce_hem.h iter->chunk = NULL; chunk 173 drivers/infiniband/hw/hns/hns_roce_hem.h iter->chunk = list_entry(iter->chunk->list.next, chunk 181 drivers/infiniband/hw/hns/hns_roce_hem.h return sg_dma_address(&iter->chunk->mem[iter->page_idx]); chunk 847 drivers/infiniband/hw/hns/hns_roce_mr.c int chunk; chunk 875 drivers/infiniband/hw/hns/hns_roce_mr.c chunk = min_t(int, bt_page_size / sizeof(u64), npages); chunk 877 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk, chunk 882 drivers/infiniband/hw/hns/hns_roce_mr.c npages -= chunk; chunk 883 drivers/infiniband/hw/hns/hns_roce_mr.c start_index += chunk; chunk 884 drivers/infiniband/hw/hns/hns_roce_mr.c page_list += chunk; chunk 53 drivers/infiniband/hw/i40iw/i40iw_pble.c static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk); chunk 63 drivers/infiniband/hw/i40iw/i40iw_pble.c struct i40iw_chunk *chunk; chunk 68 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk = list_entry(clist, struct i40iw_chunk, list); chunk 69 drivers/infiniband/hw/i40iw/i40iw_pble.c if (chunk->type == I40IW_VMALLOC) chunk 70 drivers/infiniband/hw/i40iw/i40iw_pble.c i40iw_free_vmalloc_mem(dev->hw, chunk); chunk 71 drivers/infiniband/hw/i40iw/i40iw_pble.c kfree(chunk); chunk 138 drivers/infiniband/hw/i40iw/i40iw_pble.c struct i40iw_chunk *chunk = info->chunk; chunk 151 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->type = I40IW_DMA_COHERENT; chunk 155 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT; chunk 156 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset); chunk 157 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->fpm_addr = pble_rsrc->next_fpm_addr; chunk 159 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); chunk 168 drivers/infiniband/hw/i40iw/i40iw_pble.c static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk) chunk 173 drivers/infiniband/hw/i40iw/i40iw_pble.c if (!chunk->pg_cnt) chunk 175 drivers/infiniband/hw/i40iw/i40iw_pble.c for (i = 0; i < chunk->pg_cnt; i++) chunk 176 drivers/infiniband/hw/i40iw/i40iw_pble.c dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); chunk 179 drivers/infiniband/hw/i40iw/i40iw_pble.c kfree(chunk->dmaaddrs); chunk 180 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->dmaaddrs = NULL; chunk 181 drivers/infiniband/hw/i40iw/i40iw_pble.c vfree(chunk->vaddr); chunk 182 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->vaddr = NULL; chunk 183 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->type = 0; chunk 193 drivers/infiniband/hw/i40iw/i40iw_pble.c struct i40iw_chunk *chunk, chunk 202 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); chunk 203 drivers/infiniband/hw/i40iw/i40iw_pble.c if (!chunk->dmaaddrs) chunk 206 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->vaddr = vmalloc(size); chunk 207 drivers/infiniband/hw/i40iw/i40iw_pble.c if (!chunk->vaddr) { chunk 208 drivers/infiniband/hw/i40iw/i40iw_pble.c kfree(chunk->dmaaddrs); chunk 209 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->dmaaddrs = NULL; chunk 212 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->size = size; chunk 213 drivers/infiniband/hw/i40iw/i40iw_pble.c addr = (u8 *)chunk->vaddr; chunk 218 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0, chunk 220 drivers/infiniband/hw/i40iw/i40iw_pble.c if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i])) chunk 225 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->pg_cnt = i; chunk 226 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->type = I40IW_VMALLOC; chunk 230 drivers/infiniband/hw/i40iw/i40iw_pble.c i40iw_free_vmalloc_mem(hw, chunk); chunk 259 drivers/infiniband/hw/i40iw/i40iw_pble.c struct i40iw_chunk *chunk = info->chunk; chunk 266 drivers/infiniband/hw/i40iw/i40iw_pble.c status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages); chunk 284 drivers/infiniband/hw/i40iw/i40iw_pble.c addr = chunk->vaddr; chunk 286 drivers/infiniband/hw/i40iw/i40iw_pble.c mem.pa = chunk->dmaaddrs[i]; chunk 312 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->fpm_addr = pble_rsrc->next_fpm_addr; chunk 315 drivers/infiniband/hw/i40iw/i40iw_pble.c i40iw_free_vmalloc_mem(dev->hw, chunk); chunk 329 drivers/infiniband/hw/i40iw/i40iw_pble.c struct i40iw_chunk *chunk; chunk 343 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); chunk 344 drivers/infiniband/hw/i40iw/i40iw_pble.c if (!chunk) chunk 347 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->fpm_addr = pble_rsrc->next_fpm_addr; chunk 353 drivers/infiniband/hw/i40iw/i40iw_pble.c info.chunk = chunk; chunk 385 drivers/infiniband/hw/i40iw/i40iw_pble.c if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr, chunk 386 drivers/infiniband/hw/i40iw/i40iw_pble.c (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) { chunk 391 drivers/infiniband/hw/i40iw/i40iw_pble.c pble_rsrc->next_fpm_addr += chunk->size; chunk 393 drivers/infiniband/hw/i40iw/i40iw_pble.c pble_rsrc->next_fpm_addr, chunk->size, chunk->size); chunk 394 drivers/infiniband/hw/i40iw/i40iw_pble.c pble_rsrc->unallocated_pble -= (chunk->size >> 3); chunk 395 drivers/infiniband/hw/i40iw/i40iw_pble.c list_add(&chunk->list, &pble_rsrc->pinfo.clist); chunk 413 drivers/infiniband/hw/i40iw/i40iw_pble.c kfree(chunk); chunk 83 drivers/infiniband/hw/i40iw/i40iw_pble.h struct i40iw_chunk *chunk; chunk 64 drivers/infiniband/hw/mthca/mthca_memfree.c static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) chunk 68 drivers/infiniband/hw/mthca/mthca_memfree.c if (chunk->nsg > 0) chunk 69 drivers/infiniband/hw/mthca/mthca_memfree.c pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, chunk 72 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < chunk->npages; ++i) chunk 73 drivers/infiniband/hw/mthca/mthca_memfree.c __free_pages(sg_page(&chunk->mem[i]), chunk 74 drivers/infiniband/hw/mthca/mthca_memfree.c get_order(chunk->mem[i].length)); chunk 77 drivers/infiniband/hw/mthca/mthca_memfree.c static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) chunk 81 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < chunk->npages; ++i) { chunk 82 drivers/infiniband/hw/mthca/mthca_memfree.c dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, chunk 83 drivers/infiniband/hw/mthca/mthca_memfree.c lowmem_page_address(sg_page(&chunk->mem[i])), chunk 84 drivers/infiniband/hw/mthca/mthca_memfree.c sg_dma_address(&chunk->mem[i])); chunk 90 drivers/infiniband/hw/mthca/mthca_memfree.c struct mthca_icm_chunk *chunk, *tmp; chunk 95 drivers/infiniband/hw/mthca/mthca_memfree.c list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { chunk 97 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm_coherent(dev, chunk); chunk 99 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm_pages(dev, chunk); chunk 101 drivers/infiniband/hw/mthca/mthca_memfree.c kfree(chunk); chunk 141 drivers/infiniband/hw/mthca/mthca_memfree.c struct mthca_icm_chunk *chunk = NULL; chunk 158 drivers/infiniband/hw/mthca/mthca_memfree.c if (!chunk) { chunk 159 drivers/infiniband/hw/mthca/mthca_memfree.c chunk = kmalloc(sizeof *chunk, chunk 161 drivers/infiniband/hw/mthca/mthca_memfree.c if (!chunk) chunk 164 drivers/infiniband/hw/mthca/mthca_memfree.c sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN); chunk 165 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->npages = 0; chunk 166 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->nsg = 0; chunk 167 drivers/infiniband/hw/mthca/mthca_memfree.c list_add_tail(&chunk->list, &icm->chunk_list); chunk 175 drivers/infiniband/hw/mthca/mthca_memfree.c &chunk->mem[chunk->npages], chunk 178 drivers/infiniband/hw/mthca/mthca_memfree.c ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], chunk 182 drivers/infiniband/hw/mthca/mthca_memfree.c ++chunk->npages; chunk 185 drivers/infiniband/hw/mthca/mthca_memfree.c ++chunk->nsg; chunk 186 drivers/infiniband/hw/mthca/mthca_memfree.c else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { chunk 187 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk 188 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->npages, chunk 191 drivers/infiniband/hw/mthca/mthca_memfree.c if (chunk->nsg <= 0) chunk 195 drivers/infiniband/hw/mthca/mthca_memfree.c if (chunk->npages == MTHCA_ICM_CHUNK_LEN) chunk 196 drivers/infiniband/hw/mthca/mthca_memfree.c chunk = NULL; chunk 206 drivers/infiniband/hw/mthca/mthca_memfree.c if (!coherent && chunk) { chunk 207 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk 208 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->npages, chunk 211 drivers/infiniband/hw/mthca/mthca_memfree.c if (chunk->nsg <= 0) chunk 281 drivers/infiniband/hw/mthca/mthca_memfree.c struct mthca_icm_chunk *chunk; chunk 297 drivers/infiniband/hw/mthca/mthca_memfree.c list_for_each_entry(chunk, &icm->chunk_list, list) { chunk 298 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < chunk->npages; ++i) { chunk 300 drivers/infiniband/hw/mthca/mthca_memfree.c if (sg_dma_len(&chunk->mem[i]) > dma_offset) chunk 301 drivers/infiniband/hw/mthca/mthca_memfree.c *dma_handle = sg_dma_address(&chunk->mem[i]) + chunk 303 drivers/infiniband/hw/mthca/mthca_memfree.c dma_offset -= sg_dma_len(&chunk->mem[i]); chunk 308 drivers/infiniband/hw/mthca/mthca_memfree.c if (chunk->mem[i].length > offset) { chunk 309 drivers/infiniband/hw/mthca/mthca_memfree.c page = sg_page(&chunk->mem[i]); chunk 312 drivers/infiniband/hw/mthca/mthca_memfree.c offset -= chunk->mem[i].length; chunk 76 drivers/infiniband/hw/mthca/mthca_memfree.h struct mthca_icm_chunk *chunk; chunk 103 drivers/infiniband/hw/mthca/mthca_memfree.h iter->chunk = list_empty(&icm->chunk_list) ? chunk 111 drivers/infiniband/hw/mthca/mthca_memfree.h return !iter->chunk; chunk 116 drivers/infiniband/hw/mthca/mthca_memfree.h if (++iter->page_idx >= iter->chunk->nsg) { chunk 117 drivers/infiniband/hw/mthca/mthca_memfree.h if (iter->chunk->list.next == &iter->icm->chunk_list) { chunk 118 drivers/infiniband/hw/mthca/mthca_memfree.h iter->chunk = NULL; chunk 122 drivers/infiniband/hw/mthca/mthca_memfree.h iter->chunk = list_entry(iter->chunk->list.next, chunk 130 drivers/infiniband/hw/mthca/mthca_memfree.h return sg_dma_address(&iter->chunk->mem[iter->page_idx]); chunk 135 drivers/infiniband/hw/mthca/mthca_memfree.h return sg_dma_len(&iter->chunk->mem[iter->page_idx]); chunk 362 drivers/infiniband/hw/mthca/mthca_mr.c int chunk; chunk 369 drivers/infiniband/hw/mthca/mthca_mr.c chunk = min(size, list_len); chunk 372 drivers/infiniband/hw/mthca/mthca_mr.c buffer_list, chunk); chunk 375 drivers/infiniband/hw/mthca/mthca_mr.c buffer_list, chunk); chunk 377 drivers/infiniband/hw/mthca/mthca_mr.c list_len -= chunk; chunk 378 drivers/infiniband/hw/mthca/mthca_mr.c start_index += chunk; chunk 379 drivers/infiniband/hw/mthca/mthca_mr.c buffer_list += chunk; chunk 281 drivers/infiniband/hw/qib/qib_driver.c const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift; chunk 284 drivers/infiniband/hw/qib/qib_driver.c return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); chunk 1623 drivers/infiniband/hw/qib/qib_init.c unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; chunk 1640 drivers/infiniband/hw/qib/qib_init.c chunk = rcd->rcvegrbuf_chunks; chunk 1645 drivers/infiniband/hw/qib/qib_init.c kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]), chunk 1652 drivers/infiniband/hw/qib/qib_init.c kmalloc_array_node(chunk, chunk 1675 drivers/infiniband/hw/qib/qib_init.c for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { chunk 1676 drivers/infiniband/hw/qib/qib_init.c dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; chunk 1680 drivers/infiniband/hw/qib/qib_init.c memset(rcd->rcvegrbuf[chunk], 0, size); chunk 90 drivers/infiniband/hw/usnic/usnic_ib_verbs.c struct usnic_vnic_res_chunk *chunk; chunk 115 drivers/infiniband/hw/usnic/usnic_ib_verbs.c chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); chunk 116 drivers/infiniband/hw/usnic/usnic_ib_verbs.c if (IS_ERR(chunk)) { chunk 120 drivers/infiniband/hw/usnic/usnic_ib_verbs.c PTR_ERR(chunk)); chunk 121 drivers/infiniband/hw/usnic/usnic_ib_verbs.c return PTR_ERR(chunk); chunk 124 drivers/infiniband/hw/usnic/usnic_ib_verbs.c WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); chunk 125 drivers/infiniband/hw/usnic/usnic_ib_verbs.c resp.rq_cnt = chunk->cnt; chunk 126 drivers/infiniband/hw/usnic/usnic_ib_verbs.c for (i = 0; i < chunk->cnt; i++) chunk 127 drivers/infiniband/hw/usnic/usnic_ib_verbs.c resp.rq_idx[i] = chunk->res[i]->vnic_idx; chunk 129 drivers/infiniband/hw/usnic/usnic_ib_verbs.c chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); chunk 130 drivers/infiniband/hw/usnic/usnic_ib_verbs.c if (IS_ERR(chunk)) { chunk 134 drivers/infiniband/hw/usnic/usnic_ib_verbs.c PTR_ERR(chunk)); chunk 135 drivers/infiniband/hw/usnic/usnic_ib_verbs.c return PTR_ERR(chunk); chunk 138 drivers/infiniband/hw/usnic/usnic_ib_verbs.c WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); chunk 139 drivers/infiniband/hw/usnic/usnic_ib_verbs.c resp.wq_cnt = chunk->cnt; chunk 140 drivers/infiniband/hw/usnic/usnic_ib_verbs.c for (i = 0; i < chunk->cnt; i++) chunk 141 drivers/infiniband/hw/usnic/usnic_ib_verbs.c resp.wq_idx[i] = chunk->res[i]->vnic_idx; chunk 143 drivers/infiniband/hw/usnic/usnic_ib_verbs.c chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); chunk 144 drivers/infiniband/hw/usnic/usnic_ib_verbs.c if (IS_ERR(chunk)) { chunk 148 drivers/infiniband/hw/usnic/usnic_ib_verbs.c PTR_ERR(chunk)); chunk 149 drivers/infiniband/hw/usnic/usnic_ib_verbs.c return PTR_ERR(chunk); chunk 152 drivers/infiniband/hw/usnic/usnic_ib_verbs.c WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ); chunk 153 drivers/infiniband/hw/usnic/usnic_ib_verbs.c resp.cq_cnt = chunk->cnt; chunk 154 drivers/infiniband/hw/usnic/usnic_ib_verbs.c for (i = 0; i < chunk->cnt; i++) chunk 155 drivers/infiniband/hw/usnic/usnic_ib_verbs.c resp.cq_idx[i] = chunk->res[i]->vnic_idx; chunk 68 drivers/infiniband/hw/usnic/usnic_uiom.c struct usnic_uiom_chunk *chunk, *tmp; chunk 74 drivers/infiniband/hw/usnic/usnic_uiom.c list_for_each_entry_safe(chunk, tmp, chunk_list, list) { chunk 75 drivers/infiniband/hw/usnic/usnic_uiom.c for_each_sg(chunk->page_list, sg, chunk->nents, i) { chunk 81 drivers/infiniband/hw/usnic/usnic_uiom.c kfree(chunk); chunk 91 drivers/infiniband/hw/usnic/usnic_uiom.c struct usnic_uiom_chunk *chunk; chunk 157 drivers/infiniband/hw/usnic/usnic_uiom.c chunk = kmalloc(struct_size(chunk, page_list, chunk 160 drivers/infiniband/hw/usnic/usnic_uiom.c if (!chunk) { chunk 165 drivers/infiniband/hw/usnic/usnic_uiom.c chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); chunk 166 drivers/infiniband/hw/usnic/usnic_uiom.c sg_init_table(chunk->page_list, chunk->nents); chunk 167 drivers/infiniband/hw/usnic/usnic_uiom.c for_each_sg(chunk->page_list, sg, chunk->nents, i) { chunk 174 drivers/infiniband/hw/usnic/usnic_uiom.c cur_base += chunk->nents * PAGE_SIZE; chunk 175 drivers/infiniband/hw/usnic/usnic_uiom.c ret -= chunk->nents; chunk 176 drivers/infiniband/hw/usnic/usnic_uiom.c off += chunk->nents; chunk 177 drivers/infiniband/hw/usnic/usnic_uiom.c list_add_tail(&chunk->list, chunk_list); chunk 249 drivers/infiniband/hw/usnic/usnic_uiom.c struct usnic_uiom_chunk *chunk; chunk 260 drivers/infiniband/hw/usnic/usnic_uiom.c chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk, chunk 264 drivers/infiniband/hw/usnic/usnic_uiom.c for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { chunk 265 drivers/infiniband/hw/usnic/usnic_uiom.c pa = sg_phys(&chunk->page_list[i]); chunk 315 drivers/infiniband/hw/usnic/usnic_uiom.c if (i == chunk->nents) { chunk 320 drivers/infiniband/hw/usnic/usnic_uiom.c chunk = list_first_entry(&chunk->list, chunk 96 drivers/infiniband/hw/usnic/usnic_vnic.c struct usnic_vnic_res_chunk *chunk; chunk 119 drivers/infiniband/hw/usnic/usnic_vnic.c chunk = &vnic->chunks[i]; chunk 120 drivers/infiniband/hw/usnic/usnic_vnic.c for (j = 0; j < chunk->cnt; j++) { chunk 121 drivers/infiniband/hw/usnic/usnic_vnic.c res = chunk->res[j]; chunk 274 drivers/infiniband/hw/usnic/usnic_vnic.c void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk) chunk 279 drivers/infiniband/hw/usnic/usnic_vnic.c struct usnic_vnic *vnic = chunk->vnic; chunk 281 drivers/infiniband/hw/usnic/usnic_vnic.c if (chunk->cnt > 0) { chunk 283 drivers/infiniband/hw/usnic/usnic_vnic.c while ((i = --chunk->cnt) >= 0) { chunk 284 drivers/infiniband/hw/usnic/usnic_vnic.c res = chunk->res[i]; chunk 285 drivers/infiniband/hw/usnic/usnic_vnic.c chunk->res[i] = NULL; chunk 292 drivers/infiniband/hw/usnic/usnic_vnic.c kfree(chunk->res); chunk 293 drivers/infiniband/hw/usnic/usnic_vnic.c kfree(chunk); chunk 303 drivers/infiniband/hw/usnic/usnic_vnic.c struct usnic_vnic_res_chunk *chunk) chunk 314 drivers/infiniband/hw/usnic/usnic_vnic.c chunk->cnt = chunk->free_cnt = cnt; chunk 315 drivers/infiniband/hw/usnic/usnic_vnic.c chunk->res = kcalloc(cnt, sizeof(*(chunk->res)), GFP_KERNEL); chunk 316 drivers/infiniband/hw/usnic/usnic_vnic.c if (!chunk->res) chunk 330 drivers/infiniband/hw/usnic/usnic_vnic.c chunk->res[i] = res; chunk 333 drivers/infiniband/hw/usnic/usnic_vnic.c chunk->vnic = vnic; chunk 337 drivers/infiniband/hw/usnic/usnic_vnic.c kfree(chunk->res[i]); chunk 338 drivers/infiniband/hw/usnic/usnic_vnic.c kfree(chunk->res); chunk 342 drivers/infiniband/hw/usnic/usnic_vnic.c static void usnic_vnic_free_res_chunk(struct usnic_vnic_res_chunk *chunk) chunk 345 drivers/infiniband/hw/usnic/usnic_vnic.c for (i = 0; i < chunk->cnt; i++) chunk 346 drivers/infiniband/hw/usnic/usnic_vnic.c kfree(chunk->res[i]); chunk 347 drivers/infiniband/hw/usnic/usnic_vnic.c kfree(chunk->res); chunk 110 drivers/infiniband/hw/usnic/usnic_vnic.h void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk); chunk 63 drivers/infiniband/sw/siw/siw_mem.c static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, chunk 66 drivers/infiniband/sw/siw/siw_mem.c put_user_pages_dirty_lock(chunk->plist, num_pages, dirty); chunk 510 drivers/input/misc/ims-pcu.c u8 command, int chunk, int len) chunk 522 drivers/input/misc/ims-pcu.c command, chunk, error); chunk 533 drivers/input/misc/ims-pcu.c int chunk = 0; chunk 561 drivers/input/misc/ims-pcu.c ++chunk, count); chunk 579 drivers/input/misc/ims-pcu.c error = ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); chunk 592 drivers/input/misc/ims-pcu.c return ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); chunk 686 drivers/input/touchscreen/wdt87xx_i2c.c static int wdt87xx_write_firmware(struct i2c_client *client, const void *chunk) chunk 688 drivers/input/touchscreen/wdt87xx_i2c.c u32 start_addr = get_unaligned_le32(chunk + FW_CHUNK_TGT_START_OFFSET); chunk 689 drivers/input/touchscreen/wdt87xx_i2c.c u32 size = get_unaligned_le32(chunk + FW_CHUNK_PAYLOAD_LEN_OFFSET); chunk 690 drivers/input/touchscreen/wdt87xx_i2c.c const void *data = chunk + FW_CHUNK_PAYLOAD_OFFSET; chunk 788 drivers/input/touchscreen/wdt87xx_i2c.c const void *chunk; chunk 791 drivers/input/touchscreen/wdt87xx_i2c.c chunk = wdt87xx_get_fw_chunk(fw, ck_id); chunk 792 drivers/input/touchscreen/wdt87xx_i2c.c if (!chunk) { chunk 798 drivers/input/touchscreen/wdt87xx_i2c.c error = wdt87xx_validate_fw_chunk(chunk, ck_id); chunk 805 drivers/input/touchscreen/wdt87xx_i2c.c error = wdt87xx_write_firmware(client, chunk); chunk 342 drivers/irqchip/irq-ixp4xx.c const struct ixp4xx_irq_chunk *chunk = &ixp4xx_irq_chunks[i]; chunk 345 drivers/irqchip/irq-ixp4xx.c chunk->irq, chunk->irq + chunk->nr_irqs - 1, chunk 346 drivers/irqchip/irq-ixp4xx.c chunk->hwirq, chunk->hwirq + chunk->nr_irqs - 1); chunk 348 drivers/irqchip/irq-ixp4xx.c fwspec.param[0] = chunk->hwirq; chunk 352 drivers/irqchip/irq-ixp4xx.c chunk->irq, chunk 353 drivers/irqchip/irq-ixp4xx.c chunk->nr_irqs, chunk 83 drivers/lightnvm/pblk-core.c struct nvm_chk_meta *chunk; chunk 89 drivers/lightnvm/pblk-core.c chunk = &line->chks[pos]; chunk 97 drivers/lightnvm/pblk-core.c chunk->state = NVM_CHK_ST_OFFLINE; chunk 103 drivers/lightnvm/pblk-core.c chunk->state = NVM_CHK_ST_FREE; chunk 107 drivers/lightnvm/pblk-core.c chunk->state); chunk 532 drivers/lightnvm/pblk-core.c struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa); chunk 538 drivers/lightnvm/pblk-core.c else if (caddr == (chunk->cnlb - 1)) chunk 714 drivers/lightnvm/pblk-init.c struct nvm_chk_meta *chunk; chunk 721 drivers/lightnvm/pblk-init.c chunk = &line->chks[pos]; chunk 726 drivers/lightnvm/pblk-init.c chunk->state = chunk_meta->state; chunk 727 drivers/lightnvm/pblk-init.c chunk->type = chunk_meta->type; chunk 728 drivers/lightnvm/pblk-init.c chunk->wi = chunk_meta->wi; chunk 729 drivers/lightnvm/pblk-init.c chunk->slba = chunk_meta->slba; chunk 730 drivers/lightnvm/pblk-init.c chunk->cnlb = chunk_meta->cnlb; chunk 731 drivers/lightnvm/pblk-init.c chunk->wp = chunk_meta->wp; chunk 734 drivers/lightnvm/pblk-init.c chunk->state); chunk 736 drivers/lightnvm/pblk-init.c if (chunk->type & NVM_CHK_TP_SZ_SPEC) { chunk 741 drivers/lightnvm/pblk-init.c if (!(chunk->state & NVM_CHK_ST_OFFLINE)) chunk 125 drivers/lightnvm/pblk-recovery.c struct nvm_chk_meta *chunk = &line->chks[i]; chunk 127 drivers/lightnvm/pblk-recovery.c if (chunk->state & NVM_CHK_ST_OFFLINE) chunk 130 drivers/lightnvm/pblk-recovery.c written_secs += chunk->wp; chunk 323 drivers/lightnvm/pblk-recovery.c struct nvm_chk_meta *chunk; chunk 335 drivers/lightnvm/pblk-recovery.c chunk = pblk_get_stripe_chunk(pblk, line, i); chunk 336 drivers/lightnvm/pblk-recovery.c max_wp = chunk->wp; chunk 344 drivers/lightnvm/pblk-recovery.c chunk = pblk_get_stripe_chunk(pblk, line, i); chunk 345 drivers/lightnvm/pblk-recovery.c if (chunk->wp > max_wp || chunk->wp < min_wp) chunk 617 drivers/lightnvm/pblk-recovery.c struct nvm_chk_meta *chunk; chunk 629 drivers/lightnvm/pblk-recovery.c chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)]; chunk 631 drivers/lightnvm/pblk-recovery.c if (chunk->state & NVM_CHK_ST_CLOSED || chunk 632 drivers/lightnvm/pblk-recovery.c (chunk->state & NVM_CHK_ST_OPEN chunk 633 drivers/lightnvm/pblk-recovery.c && chunk->wp >= lm->smeta_sec)) chunk 907 drivers/macintosh/smu.c unsigned int chunk; chunk 915 drivers/macintosh/smu.c chunk = 0xe; chunk 918 drivers/macintosh/smu.c unsigned int clen = min(len, chunk); chunk 923 drivers/macintosh/smu.c cmd.reply_len = chunk; chunk 142 drivers/md/dm-exception-store.h static inline chunk_t dm_chunk_number(chunk_t chunk) chunk 144 drivers/md/dm-exception-store.h return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL); chunk 229 drivers/md/dm-snap-persistent.c static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, chunk 234 drivers/md/dm-snap-persistent.c .sector = ps->store->chunk_size * chunk, chunk 288 drivers/md/dm-snap-persistent.c chunk_t chunk; chunk 290 drivers/md/dm-snap-persistent.c chunk = area_location(ps, ps->current_area); chunk 292 drivers/md/dm-snap-persistent.c r = chunk_io(ps, ps->area, chunk, op, op_flags, 0); chunk 521 drivers/md/dm-snap-persistent.c chunk_t chunk; chunk 536 drivers/md/dm-snap-persistent.c chunk = area_location(ps, ps->current_area); chunk 538 drivers/md/dm-snap-persistent.c area = dm_bufio_read(client, chunk, &bp); chunk 552 drivers/md/dm-snap-persistent.c dm_bufio_forget(client, chunk); chunk 184 drivers/md/dm-snap.c chunk_t chunk) chunk 186 drivers/md/dm-snap.c return chunk << store->chunk_shift; chunk 241 drivers/md/dm-snap.c chunk_t chunk; chunk 256 drivers/md/dm-snap.c static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) chunk 260 drivers/md/dm-snap.c c->chunk = chunk; chunk 264 drivers/md/dm-snap.c &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); chunk 278 drivers/md/dm-snap.c static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) chunk 286 drivers/md/dm-snap.c &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { chunk 287 drivers/md/dm-snap.c if (c->chunk == chunk) { chunk 302 drivers/md/dm-snap.c static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) chunk 304 drivers/md/dm-snap.c while (__chunk_is_tracked(s, chunk)) chunk 624 drivers/md/dm-snap.c static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk); chunk 632 drivers/md/dm-snap.c static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, chunk 638 drivers/md/dm-snap.c lock->complete_slot = &complete->table[exception_hash(complete, chunk)]; chunk 639 drivers/md/dm-snap.c lock->pending_slot = &pending->table[exception_hash(pending, chunk)]; chunk 690 drivers/md/dm-snap.c static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) chunk 692 drivers/md/dm-snap.c return (chunk >> et->hash_shift) & et->hash_mask; chunk 705 drivers/md/dm-snap.c chunk_t chunk) chunk 711 drivers/md/dm-snap.c slot = &et->table[exception_hash(et, chunk)]; chunk 713 drivers/md/dm-snap.c if (chunk >= e->old_chunk && chunk 714 drivers/md/dm-snap.c chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) chunk 1836 drivers/md/dm-snap.c __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) chunk 1838 drivers/md/dm-snap.c struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); chunk 1854 drivers/md/dm-snap.c struct dm_snap_pending_exception *pe, chunk_t chunk) chunk 1856 drivers/md/dm-snap.c pe->e.old_chunk = chunk; chunk 1887 drivers/md/dm-snap.c struct dm_snap_pending_exception *pe, chunk_t chunk) chunk 1891 drivers/md/dm-snap.c pe2 = __lookup_pending_exception(s, chunk); chunk 1897 drivers/md/dm-snap.c return __insert_pending_exception(s, pe, chunk); chunk 1901 drivers/md/dm-snap.c struct bio *bio, chunk_t chunk) chunk 1906 drivers/md/dm-snap.c (chunk - e->old_chunk)) + chunk 1921 drivers/md/dm-snap.c struct bio *bio, chunk_t chunk) chunk 1946 drivers/md/dm-snap.c chunk_t chunk; chunk 1957 drivers/md/dm-snap.c chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); chunk 1958 drivers/md/dm-snap.c dm_exception_table_lock_init(s, chunk, &lock); chunk 1988 drivers/md/dm-snap.c track_chunk(s, bio, chunk); chunk 1995 drivers/md/dm-snap.c e = dm_lookup_exception(&s->complete, chunk); chunk 1997 drivers/md/dm-snap.c remap_exception(s, e, bio, chunk); chunk 2002 drivers/md/dm-snap.c zero_exception(s, e, bio, chunk); chunk 2025 drivers/md/dm-snap.c pe = __lookup_pending_exception(s, chunk); chunk 2031 drivers/md/dm-snap.c e = dm_lookup_exception(&s->complete, chunk); chunk 2034 drivers/md/dm-snap.c remap_exception(s, e, bio, chunk); chunk 2038 drivers/md/dm-snap.c pe = __find_pending_exception(s, pe, chunk); chunk 2059 drivers/md/dm-snap.c remap_exception(s, &pe->e, bio, chunk); chunk 2087 drivers/md/dm-snap.c track_chunk(s, bio, chunk); chunk 2114 drivers/md/dm-snap.c chunk_t chunk; chunk 2132 drivers/md/dm-snap.c chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); chunk 2141 drivers/md/dm-snap.c e = dm_lookup_exception(&s->complete, chunk); chunk 2145 drivers/md/dm-snap.c chunk >= s->first_merging_chunk && chunk 2146 drivers/md/dm-snap.c chunk < (s->first_merging_chunk + chunk 2154 drivers/md/dm-snap.c remap_exception(s, e, bio, chunk); chunk 2157 drivers/md/dm-snap.c track_chunk(s, bio, chunk); chunk 2429 drivers/md/dm-snap.c chunk_t chunk; chunk 2448 drivers/md/dm-snap.c chunk = sector_to_chunk(snap->store, sector); chunk 2449 drivers/md/dm-snap.c dm_exception_table_lock_init(snap, chunk, &lock); chunk 2458 drivers/md/dm-snap.c pe = __lookup_pending_exception(snap, chunk); chunk 2465 drivers/md/dm-snap.c e = dm_lookup_exception(&snap->complete, chunk); chunk 2473 drivers/md/dm-snap.c pe2 = __lookup_pending_exception(snap, chunk); chunk 2476 drivers/md/dm-snap.c e = dm_lookup_exception(&snap->complete, chunk); chunk 2482 drivers/md/dm-snap.c pe = __insert_pending_exception(snap, pe, chunk); chunk 219 drivers/md/dm-stripe.c sector_t chunk = dm_target_offset(sc->ti, sector); chunk 223 drivers/md/dm-stripe.c chunk_offset = sector_div(chunk, sc->chunk_size); chunk 225 drivers/md/dm-stripe.c chunk_offset = chunk & (sc->chunk_size - 1); chunk 226 drivers/md/dm-stripe.c chunk >>= sc->chunk_size_shift; chunk 230 drivers/md/dm-stripe.c *stripe = sector_div(chunk, sc->stripes); chunk 232 drivers/md/dm-stripe.c *stripe = chunk & (sc->stripes - 1); chunk 233 drivers/md/dm-stripe.c chunk >>= sc->stripes_shift; chunk 237 drivers/md/dm-stripe.c chunk *= sc->chunk_size; chunk 239 drivers/md/dm-stripe.c chunk <<= sc->chunk_size_shift; chunk 241 drivers/md/dm-stripe.c *result = chunk + chunk_offset; chunk 1106 drivers/md/dm-zoned-metadata.c zone->chunk = DMZ_MAP_UNMAPPED; chunk 1355 drivers/md/dm-zoned-metadata.c unsigned int i = 0, e = 0, chunk = 0; chunk 1366 drivers/md/dm-zoned-metadata.c while (chunk < zmd->nr_chunks) { chunk 1385 drivers/md/dm-zoned-metadata.c chunk, dzone_id); chunk 1391 drivers/md/dm-zoned-metadata.c dzone->chunk = chunk; chunk 1406 drivers/md/dm-zoned-metadata.c chunk, bzone_id); chunk 1413 drivers/md/dm-zoned-metadata.c chunk, bzone_id); chunk 1419 drivers/md/dm-zoned-metadata.c bzone->chunk = chunk; chunk 1425 drivers/md/dm-zoned-metadata.c chunk++; chunk 1453 drivers/md/dm-zoned-metadata.c dzone->chunk = DMZ_MAP_UNMAPPED; chunk 1473 drivers/md/dm-zoned-metadata.c static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, chunk 1476 drivers/md/dm-zoned-metadata.c struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; chunk 1478 drivers/md/dm-zoned-metadata.c int map_idx = chunk & DMZ_MAP_ENTRIES_MASK; chunk 1646 drivers/md/dm-zoned-metadata.c struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op) chunk 1648 drivers/md/dm-zoned-metadata.c struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; chunk 1650 drivers/md/dm-zoned-metadata.c int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK; chunk 1678 drivers/md/dm-zoned-metadata.c dmz_map_zone(zmd, dzone, chunk); chunk 1683 drivers/md/dm-zoned-metadata.c if (dzone->chunk != chunk) { chunk 1780 drivers/md/dm-zoned-metadata.c dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone), chunk 1784 drivers/md/dm-zoned-metadata.c bzone->chunk = dzone->chunk; chunk 1872 drivers/md/dm-zoned-metadata.c unsigned int chunk) chunk 1875 drivers/md/dm-zoned-metadata.c dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone), chunk 1877 drivers/md/dm-zoned-metadata.c dzone->chunk = chunk; chunk 1890 drivers/md/dm-zoned-metadata.c unsigned int chunk = zone->chunk; chunk 1893 drivers/md/dm-zoned-metadata.c if (chunk == DMZ_MAP_UNMAPPED) { chunk 1919 drivers/md/dm-zoned-metadata.c dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED); chunk 1921 drivers/md/dm-zoned-metadata.c zone->chunk = DMZ_MAP_UNMAPPED; chunk 199 drivers/md/dm-zoned-reclaim.c dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone), chunk 231 drivers/md/dm-zoned-reclaim.c unsigned int chunk = dzone->chunk; chunk 238 drivers/md/dm-zoned-reclaim.c chunk, dmz_id(zmd, dzone), dmz_weight(dzone), chunk 261 drivers/md/dm-zoned-reclaim.c dmz_map_zone(zmd, bzone, chunk); chunk 276 drivers/md/dm-zoned-reclaim.c unsigned int chunk = dzone->chunk; chunk 290 drivers/md/dm-zoned-reclaim.c chunk, dmz_id(zmd, dzone), dmz_weight(dzone), chunk 314 drivers/md/dm-zoned-reclaim.c dmz_map_zone(zmd, szone, chunk); chunk 33 drivers/md/dm-zoned-target.c unsigned int chunk; chunk 463 drivers/md/dm-zoned-target.c radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk); chunk 528 drivers/md/dm-zoned-target.c unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); chunk 535 drivers/md/dm-zoned-target.c cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); chunk 549 drivers/md/dm-zoned-target.c cw->chunk = chunk; chunk 552 drivers/md/dm-zoned-target.c ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); chunk 97 drivers/md/dm-zoned.h unsigned int chunk; chunk 191 drivers/md/dm-zoned.h unsigned int chunk); chunk 226 drivers/md/dm-zoned.h unsigned int chunk, int op); chunk 750 drivers/md/md-bitmap.c unsigned long chunk) chunk 753 drivers/md/md-bitmap.c chunk += sizeof(bitmap_super_t) << 3; chunk 754 drivers/md/md-bitmap.c return chunk >> PAGE_BIT_SHIFT; chunk 759 drivers/md/md-bitmap.c unsigned long chunk) chunk 762 drivers/md/md-bitmap.c chunk += sizeof(bitmap_super_t) << 3; chunk 763 drivers/md/md-bitmap.c return chunk & (PAGE_BITS - 1); chunk 771 drivers/md/md-bitmap.c unsigned long chunk) chunk 773 drivers/md/md-bitmap.c if (file_page_index(store, chunk) >= store->file_pages) chunk 775 drivers/md/md-bitmap.c return store->filemap[file_page_index(store, chunk)]; chunk 932 drivers/md/md-bitmap.c unsigned long chunk = block >> bitmap->counts.chunkshift; chunk 939 drivers/md/md-bitmap.c page = filemap_get_page(&bitmap->storage, chunk); chunk 942 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, chunk); chunk 961 drivers/md/md-bitmap.c unsigned long chunk = block >> bitmap->counts.chunkshift; chunk 968 drivers/md/md-bitmap.c page = filemap_get_page(&bitmap->storage, chunk); chunk 971 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, chunk); chunk 989 drivers/md/md-bitmap.c unsigned long chunk = block >> bitmap->counts.chunkshift; chunk 992 drivers/md/md-bitmap.c page = filemap_get_page(&bitmap->storage, chunk); chunk 995 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, chunk); chunk 1205 drivers/md/md-bitmap.c sector_t chunk = offset >> bitmap->chunkshift; chunk 1206 drivers/md/md-bitmap.c unsigned long page = chunk >> PAGE_COUNTER_SHIFT; chunk 1213 drivers/md/md-bitmap.c sector_t chunk = offset >> bitmap->chunkshift; chunk 1214 drivers/md/md-bitmap.c unsigned long page = chunk >> PAGE_COUNTER_SHIFT; chunk 1364 drivers/md/md-bitmap.c sector_t chunk = offset >> bitmap->chunkshift; chunk 1365 drivers/md/md-bitmap.c unsigned long page = chunk >> PAGE_COUNTER_SHIFT; chunk 1366 drivers/md/md-bitmap.c unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; chunk 1693 drivers/md/md-bitmap.c unsigned long chunk; chunk 1695 drivers/md/md-bitmap.c for (chunk = s; chunk <= e; chunk++) { chunk 1696 drivers/md/md-bitmap.c sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; chunk 4494 drivers/md/md.c unsigned long chunk, end_chunk; chunk 4504 drivers/md/md.c chunk = end_chunk = simple_strtoul(buf, &end, 0); chunk 4512 drivers/md/md.c md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); chunk 4953 drivers/md/md.c int chunk; chunk 4968 drivers/md/md.c chunk = mddev->chunk_sectors; chunk 4969 drivers/md/md.c if (chunk) { chunk 4973 drivers/md/md.c if (sector_div(temp, chunk)) chunk 330 drivers/md/raid0.c sector_t chunk; chunk 341 drivers/md/raid0.c chunk = *sector_offset; chunk 343 drivers/md/raid0.c sector_div(chunk, zone->nb_dev << chunksect_bits); chunk 346 drivers/md/raid0.c chunk = *sector_offset; chunk 347 drivers/md/raid0.c sector_div(chunk, chunk_sects * zone->nb_dev); chunk 354 drivers/md/raid0.c *sector_offset = (chunk * chunk_sects) + sect_in_chunk; chunk 565 drivers/md/raid10.c sector_t chunk; chunk 578 drivers/md/raid10.c chunk = r10bio->sector >> geo->chunk_shift; chunk 581 drivers/md/raid10.c chunk *= geo->near_copies; chunk 582 drivers/md/raid10.c stripe = chunk; chunk 641 drivers/md/raid10.c sector_t offset, chunk, vchunk; chunk 664 drivers/md/raid10.c chunk = sector >> geo->chunk_shift; chunk 665 drivers/md/raid10.c fc = sector_div(chunk, geo->far_copies); chunk 677 drivers/md/raid10.c chunk = sector >> geo->chunk_shift; chunk 679 drivers/md/raid10.c vchunk = chunk * geo->raid_disks + dev; chunk 3578 drivers/md/raid10.c int layout, chunk, disks; chunk 3582 drivers/md/raid10.c chunk = mddev->chunk_sectors; chunk 3587 drivers/md/raid10.c chunk = mddev->new_chunk_sectors; chunk 3594 drivers/md/raid10.c chunk = mddev->new_chunk_sectors; chunk 3600 drivers/md/raid10.c if (chunk < (PAGE_SIZE >> 9) || chunk 3601 drivers/md/raid10.c !is_power_of_2(chunk)) chunk 3626 drivers/md/raid10.c geo->chunk_mask = chunk - 1; chunk 3627 drivers/md/raid10.c geo->chunk_shift = ffz(~chunk); chunk 433 drivers/media/dvb-frontends/drxk_hard.c int chunk = blk_size > state->m_chunk_size ? chunk 435 drivers/media/dvb-frontends/drxk_hard.c u8 *adr_buf = &state->chunk[0]; chunk 445 drivers/media/dvb-frontends/drxk_hard.c if (chunk == state->m_chunk_size) chunk 446 drivers/media/dvb-frontends/drxk_hard.c chunk -= 2; chunk 453 drivers/media/dvb-frontends/drxk_hard.c memcpy(&state->chunk[adr_length], p_block, chunk); chunk 458 drivers/media/dvb-frontends/drxk_hard.c for (i = 0; i < chunk; i++) chunk 463 drivers/media/dvb-frontends/drxk_hard.c &state->chunk[0], chunk + adr_length); chunk 469 drivers/media/dvb-frontends/drxk_hard.c p_block += chunk; chunk 470 drivers/media/dvb-frontends/drxk_hard.c address += (chunk >> 1); chunk 471 drivers/media/dvb-frontends/drxk_hard.c blk_size -= chunk; chunk 225 drivers/media/dvb-frontends/drxk_hard.h u8 chunk[256]; chunk 181 drivers/media/rc/serial_ir.c unsigned char chunk, shifted; chunk 186 drivers/media/rc/serial_ir.c chunk = 3; chunk 188 drivers/media/rc/serial_ir.c chunk = 1; chunk 190 drivers/media/rc/serial_ir.c shifted = chunk << (i * 3); chunk 370 drivers/media/usb/go7007/go7007-fw.c int size = 0, i, off = 0, chunk; chunk 380 drivers/media/usb/go7007/go7007-fw.c chunk = mjpeg_frame_header(go, buf + size, 1); chunk 381 drivers/media/usb/go7007/go7007-fw.c memmove(buf + size, buf + size + 80, chunk - 80); chunk 382 drivers/media/usb/go7007/go7007-fw.c size += chunk - 80; chunk 384 drivers/media/usb/go7007/go7007-fw.c for (i = 0; i < size; i += chunk * 2) { chunk 392 drivers/media/usb/go7007/go7007-fw.c chunk = 28; chunk 393 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk > 0x4000) chunk 394 drivers/media/usb/go7007/go7007-fw.c chunk = 0x4000 - mem; chunk 395 drivers/media/usb/go7007/go7007-fw.c if (i + 2 * chunk > size) chunk 396 drivers/media/usb/go7007/go7007-fw.c chunk = (size - i) / 2; chunk 398 drivers/media/usb/go7007/go7007-fw.c if (chunk < 28) { chunk 399 drivers/media/usb/go7007/go7007-fw.c code[off] = __cpu_to_le16(0x4000 | chunk); chunk 408 drivers/media/usb/go7007/go7007-fw.c memcpy(&code[off + 2], buf + i, chunk * 2); chunk 637 drivers/media/usb/go7007/go7007-fw.c int i, off = 0, chunk; chunk 673 drivers/media/usb/go7007/go7007-fw.c for (i = 0; i < 5120; i += chunk * 2) { chunk 681 drivers/media/usb/go7007/go7007-fw.c chunk = 28; chunk 682 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk > 0x4000) chunk 683 drivers/media/usb/go7007/go7007-fw.c chunk = 0x4000 - mem; chunk 684 drivers/media/usb/go7007/go7007-fw.c if (i + 2 * chunk > 5120) chunk 685 drivers/media/usb/go7007/go7007-fw.c chunk = (5120 - i) / 2; chunk 687 drivers/media/usb/go7007/go7007-fw.c if (chunk < 28) { chunk 688 drivers/media/usb/go7007/go7007-fw.c code[off] = __cpu_to_le16(0x4000 | chunk); chunk 690 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk == 0x4000) { chunk 700 drivers/media/usb/go7007/go7007-fw.c memcpy(&code[off + 2], buf + i, chunk * 2); chunk 823 drivers/media/usb/go7007/go7007-fw.c int i, off = 0, chunk; chunk 841 drivers/media/usb/go7007/go7007-fw.c for (i = 0; i < 5120; i += chunk * 2) { chunk 849 drivers/media/usb/go7007/go7007-fw.c chunk = 28; chunk 850 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk > 0x4000) chunk 851 drivers/media/usb/go7007/go7007-fw.c chunk = 0x4000 - mem; chunk 852 drivers/media/usb/go7007/go7007-fw.c if (i + 2 * chunk > 5120) chunk 853 drivers/media/usb/go7007/go7007-fw.c chunk = (5120 - i) / 2; chunk 855 drivers/media/usb/go7007/go7007-fw.c if (chunk < 28) { chunk 856 drivers/media/usb/go7007/go7007-fw.c code[off] = __cpu_to_le16(0x4000 | chunk); chunk 858 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk == 0x4000) { chunk 868 drivers/media/usb/go7007/go7007-fw.c memcpy(&code[off + 2], buf + i, chunk * 2); chunk 884 drivers/media/usb/go7007/go7007-fw.c for (i = 0; i < 5120; i += chunk * 2) { chunk 892 drivers/media/usb/go7007/go7007-fw.c chunk = 28; chunk 893 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk > 0x4000) chunk 894 drivers/media/usb/go7007/go7007-fw.c chunk = 0x4000 - mem; chunk 895 drivers/media/usb/go7007/go7007-fw.c if (i + 2 * chunk > 5120) chunk 896 drivers/media/usb/go7007/go7007-fw.c chunk = (5120 - i) / 2; chunk 898 drivers/media/usb/go7007/go7007-fw.c if (chunk < 28) { chunk 899 drivers/media/usb/go7007/go7007-fw.c code[off] = __cpu_to_le16(0x4000 | chunk); chunk 901 drivers/media/usb/go7007/go7007-fw.c if (mem + chunk == 0x4000) { chunk 911 drivers/media/usb/go7007/go7007-fw.c memcpy(&code[off + 2], buf + i, chunk * 2); chunk 391 drivers/media/usb/usbtv/usbtv-video.c static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk) chunk 399 drivers/media/usb/usbtv/usbtv-video.c if (!USBTV_MAGIC_OK(chunk)) chunk 401 drivers/media/usb/usbtv/usbtv-video.c frame_id = USBTV_FRAME_ID(chunk); chunk 402 drivers/media/usb/usbtv/usbtv-video.c odd = USBTV_ODD(chunk); chunk 403 drivers/media/usb/usbtv/usbtv-video.c chunk_no = USBTV_CHUNK_NO(chunk); chunk 428 drivers/media/usb/usbtv/usbtv-video.c usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); chunk 65 drivers/media/usb/usbtv/usbtv.h #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ chunk 67 drivers/media/usb/usbtv/usbtv.h #define USBTV_FRAME_ID(chunk) ((be32_to_cpu(chunk[0]) & 0x00ff0000) >> 16) chunk 68 drivers/media/usb/usbtv/usbtv.h #define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) chunk 69 drivers/media/usb/usbtv/usbtv.h #define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) chunk 708 drivers/memstick/core/mspro_block.c bool chunk; chunk 710 drivers/memstick/core/mspro_block.c chunk = blk_update_request(msb->block_req, chunk 713 drivers/memstick/core/mspro_block.c if (chunk) chunk 741 drivers/memstick/core/mspro_block.c bool chunk; chunk 774 drivers/memstick/core/mspro_block.c chunk = blk_update_request(msb->block_req, chunk 776 drivers/memstick/core/mspro_block.c if (chunk) { chunk 392 drivers/misc/habanalabs/command_submission.c struct hl_cs_chunk *chunk, chunk 403 drivers/misc/habanalabs/command_submission.c hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; chunk 405 drivers/misc/habanalabs/command_submission.c if ((chunk->queue_index >= HL_MAX_QUEUES) || chunk 408 drivers/misc/habanalabs/command_submission.c chunk->queue_index); chunk 415 drivers/misc/habanalabs/command_submission.c chunk->queue_index); chunk 419 drivers/misc/habanalabs/command_submission.c return (struct hl_cb *) (uintptr_t) chunk->cb_handle; chunk 423 drivers/misc/habanalabs/command_submission.c cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT); chunk 431 drivers/misc/habanalabs/command_submission.c if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) { chunk 432 drivers/misc/habanalabs/command_submission.c dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size); chunk 516 drivers/misc/habanalabs/command_submission.c struct hl_cs_chunk *chunk = &cs_chunk_array[i]; chunk 519 drivers/misc/habanalabs/command_submission.c cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk, chunk 542 drivers/misc/habanalabs/command_submission.c job->user_cb_size = chunk->cb_size; chunk 546 drivers/misc/habanalabs/command_submission.c job->job_cb_size = chunk->cb_size; chunk 547 drivers/misc/habanalabs/command_submission.c job->hw_queue_id = chunk->queue_index; chunk 476 drivers/mmc/host/sdhci.c size_t blksize, len, chunk; chunk 483 drivers/mmc/host/sdhci.c chunk = 0; chunk 498 drivers/mmc/host/sdhci.c if (chunk == 0) { chunk 500 drivers/mmc/host/sdhci.c chunk = 4; chunk 507 drivers/mmc/host/sdhci.c chunk--; chunk 520 drivers/mmc/host/sdhci.c size_t blksize, len, chunk; chunk 527 drivers/mmc/host/sdhci.c chunk = 0; chunk 543 drivers/mmc/host/sdhci.c scratch |= (u32)*buf << (chunk * 8); chunk 546 drivers/mmc/host/sdhci.c chunk++; chunk 549 drivers/mmc/host/sdhci.c if ((chunk == 4) || ((len == 0) && (blksize == 0))) { chunk 551 drivers/mmc/host/sdhci.c chunk = 0; chunk 254 drivers/mtd/nand/raw/marvell_nand.c int chunk; chunk 270 drivers/mtd/nand/raw/marvell_nand.c .chunk = dc, \ chunk 1203 drivers/mtd/nand/raw/marvell_nand.c int chunk; chunk 1212 drivers/mtd/nand/raw/marvell_nand.c for (chunk = 0; chunk < lt->nchunks; chunk++) { chunk 1214 drivers/mtd/nand/raw/marvell_nand.c if (chunk >= lt->full_chunk_cnt) { chunk 1221 drivers/mtd/nand/raw/marvell_nand.c nand_change_read_column_op(chip, chunk * chunk_size, chunk 1222 drivers/mtd/nand/raw/marvell_nand.c buf + (lt->data_bytes * chunk), chunk 1226 drivers/mtd/nand/raw/marvell_nand.c nand_read_data_op(chip, oob + (lt->spare_bytes * chunk), chunk 1231 drivers/mtd/nand/raw/marvell_nand.c (ALIGN(lt->ecc_bytes, 32) * chunk), chunk 1238 drivers/mtd/nand/raw/marvell_nand.c static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk, chunk 1260 drivers/mtd/nand/raw/marvell_nand.c if (chunk == 0) chunk 1269 drivers/mtd/nand/raw/marvell_nand.c if (chunk == 0) chunk 1271 drivers/mtd/nand/raw/marvell_nand.c else if (chunk < lt->nchunks - 1) chunk 1315 drivers/mtd/nand/raw/marvell_nand.c int chunk, ret; chunk 1329 drivers/mtd/nand/raw/marvell_nand.c for (chunk = 0; chunk < lt->nchunks; chunk++) { chunk 1331 drivers/mtd/nand/raw/marvell_nand.c if (chunk >= lt->full_chunk_cnt) { chunk 1337 drivers/mtd/nand/raw/marvell_nand.c marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len, chunk 1341 drivers/mtd/nand/raw/marvell_nand.c failure_mask |= BIT(chunk); chunk 1372 drivers/mtd/nand/raw/marvell_nand.c for (chunk = 0; chunk < lt->nchunks; chunk++) { chunk 1378 drivers/mtd/nand/raw/marvell_nand.c if (!(failure_mask & BIT(chunk))) chunk 1381 drivers/mtd/nand/raw/marvell_nand.c data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes + chunk 1384 drivers/mtd/nand/raw/marvell_nand.c (chunk < lt->full_chunk_cnt ? lt->data_bytes : chunk 1387 drivers/mtd/nand/raw/marvell_nand.c (chunk < lt->full_chunk_cnt ? lt->spare_bytes : chunk 1390 drivers/mtd/nand/raw/marvell_nand.c data_off = chunk * lt->data_bytes; chunk 1391 drivers/mtd/nand/raw/marvell_nand.c spare_off = chunk * lt->spare_bytes; chunk 1394 drivers/mtd/nand/raw/marvell_nand.c (chunk * (lt->ecc_bytes + 2)); chunk 1396 drivers/mtd/nand/raw/marvell_nand.c data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes : chunk 1398 drivers/mtd/nand/raw/marvell_nand.c spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes : chunk 1400 drivers/mtd/nand/raw/marvell_nand.c ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes : chunk 1459 drivers/mtd/nand/raw/marvell_nand.c int chunk; chunk 1465 drivers/mtd/nand/raw/marvell_nand.c for (chunk = 0; chunk < lt->nchunks; chunk++) { chunk 1466 drivers/mtd/nand/raw/marvell_nand.c if (chunk >= lt->full_chunk_cnt) { chunk 1473 drivers/mtd/nand/raw/marvell_nand.c nand_change_write_column_op(chip, chunk * full_chunk_size, chunk 1477 drivers/mtd/nand/raw/marvell_nand.c nand_write_data_op(chip, buf + (chunk * lt->data_bytes), chunk 1501 drivers/mtd/nand/raw/marvell_nand.c marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, chunk 1524 drivers/mtd/nand/raw/marvell_nand.c if (chunk == 0) { chunk 1535 drivers/mtd/nand/raw/marvell_nand.c } else if (chunk < lt->nchunks - 1) { chunk 1542 drivers/mtd/nand/raw/marvell_nand.c if (chunk == lt->nchunks - 1) chunk 1572 drivers/mtd/nand/raw/marvell_nand.c int chunk, ret; chunk 1582 drivers/mtd/nand/raw/marvell_nand.c for (chunk = 0; chunk < lt->nchunks; chunk++) { chunk 1583 drivers/mtd/nand/raw/marvell_nand.c if (chunk >= lt->full_chunk_cnt) { chunk 1588 drivers/mtd/nand/raw/marvell_nand.c marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len, chunk 2189 drivers/mtd/nand/raw/marvell_nand.c ecc->size == l->chunk && ecc->strength == l->strength) { chunk 3343 drivers/mtd/nand/raw/nand_base.c int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; chunk 3356 drivers/mtd/nand/raw/nand_base.c pos = eccsize + i * (eccsize + chunk); chunk 3369 drivers/mtd/nand/raw/nand_base.c toread = min_t(int, length, chunk); chunk 3410 drivers/mtd/nand/raw/nand_base.c int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; chunk 3421 drivers/mtd/nand/raw/nand_base.c pos = steps * (eccsize + chunk); chunk 3447 drivers/mtd/nand/raw/nand_base.c pos = eccsize + i * (eccsize + chunk); chunk 3456 drivers/mtd/nand/raw/nand_base.c len = min_t(int, length, chunk); chunk 151 drivers/net/dsa/sja1105/sja1105_spi.c } chunk; chunk 156 drivers/net/dsa/sja1105/sja1105_spi.c chunk.buf_ptr = packed_buf; chunk 157 drivers/net/dsa/sja1105/sja1105_spi.c chunk.spi_address = base_addr; chunk 158 drivers/net/dsa/sja1105/sja1105_spi.c chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN); chunk 160 drivers/net/dsa/sja1105/sja1105_spi.c while (chunk.len) { chunk 161 drivers/net/dsa/sja1105/sja1105_spi.c rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address, chunk 162 drivers/net/dsa/sja1105/sja1105_spi.c chunk.buf_ptr, chunk.len); chunk 166 drivers/net/dsa/sja1105/sja1105_spi.c chunk.buf_ptr += chunk.len; chunk 167 drivers/net/dsa/sja1105/sja1105_spi.c chunk.spi_address += chunk.len / 4; chunk 169 drivers/net/dsa/sja1105/sja1105_spi.c chunk.buf_ptr); chunk 170 drivers/net/dsa/sja1105/sja1105_spi.c chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN); chunk 2485 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c unsigned int chunk = chunk 2489 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, chunk 2493 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c if (copy_to_user(useraddr, buf, chunk)) chunk 2495 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c useraddr += chunk; chunk 2496 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.addr += chunk; chunk 2497 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.len -= chunk; chunk 326 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int chunk) chunk 330 drivers/net/ethernet/chelsio/cxgb3/sge.c reclaim = min(chunk, reclaim); chunk 1488 drivers/net/ethernet/ibm/emac/core.c int chunk = min(len, MAL_MAX_TX_SIZE); chunk 1489 drivers/net/ethernet/ibm/emac/core.c len -= chunk; chunk 1500 drivers/net/ethernet/ibm/emac/core.c dev->tx_desc[slot].data_len = (u16) chunk; chunk 1507 drivers/net/ethernet/ibm/emac/core.c pd += chunk; chunk 1518 drivers/net/ethernet/ibm/emac/core.c int len = skb->len, chunk; chunk 1542 drivers/net/ethernet/ibm/emac/core.c chunk = min(len, MAL_MAX_TX_SIZE); chunk 1545 drivers/net/ethernet/ibm/emac/core.c dev->tx_desc[slot].data_len = (u16) chunk; chunk 1546 drivers/net/ethernet/ibm/emac/core.c len -= chunk; chunk 1548 drivers/net/ethernet/ibm/emac/core.c slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags, chunk 2531 drivers/net/ethernet/marvell/skge.c u32 chunk, ram_addr; chunk 2594 drivers/net/ethernet/marvell/skge.c chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); chunk 2595 drivers/net/ethernet/marvell/skge.c ram_addr = hw->ram_offset + 2 * chunk * port; chunk 2597 drivers/net/ethernet/marvell/skge.c skge_ramset(hw, rxqaddr[port], ram_addr, chunk); chunk 2601 drivers/net/ethernet/marvell/skge.c skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); chunk 55 drivers/net/ethernet/mellanox/mlx4/icm.c static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) chunk 59 drivers/net/ethernet/mellanox/mlx4/icm.c if (chunk->nsg > 0) chunk 60 drivers/net/ethernet/mellanox/mlx4/icm.c dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, chunk 63 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < chunk->npages; ++i) chunk 64 drivers/net/ethernet/mellanox/mlx4/icm.c __free_pages(sg_page(&chunk->sg[i]), chunk 65 drivers/net/ethernet/mellanox/mlx4/icm.c get_order(chunk->sg[i].length)); chunk 68 drivers/net/ethernet/mellanox/mlx4/icm.c static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) chunk 72 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < chunk->npages; ++i) chunk 74 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->buf[i].size, chunk 75 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->buf[i].addr, chunk 76 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->buf[i].dma_addr); chunk 81 drivers/net/ethernet/mellanox/mlx4/icm.c struct mlx4_icm_chunk *chunk, *tmp; chunk 86 drivers/net/ethernet/mellanox/mlx4/icm.c list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { chunk 88 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm_coherent(dev, chunk); chunk 90 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm_pages(dev, chunk); chunk 92 drivers/net/ethernet/mellanox/mlx4/icm.c kfree(chunk); chunk 136 drivers/net/ethernet/mellanox/mlx4/icm.c struct mlx4_icm_chunk *chunk = NULL; chunk 160 drivers/net/ethernet/mellanox/mlx4/icm.c if (!chunk) { chunk 161 drivers/net/ethernet/mellanox/mlx4/icm.c chunk = kzalloc_node(sizeof(*chunk), chunk 165 drivers/net/ethernet/mellanox/mlx4/icm.c if (!chunk) { chunk 166 drivers/net/ethernet/mellanox/mlx4/icm.c chunk = kzalloc(sizeof(*chunk), chunk 169 drivers/net/ethernet/mellanox/mlx4/icm.c if (!chunk) chunk 172 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->coherent = coherent; chunk 175 drivers/net/ethernet/mellanox/mlx4/icm.c sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN); chunk 176 drivers/net/ethernet/mellanox/mlx4/icm.c list_add_tail(&chunk->list, &icm->chunk_list); chunk 188 drivers/net/ethernet/mellanox/mlx4/icm.c &chunk->buf[chunk->npages], chunk 191 drivers/net/ethernet/mellanox/mlx4/icm.c ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], chunk 202 drivers/net/ethernet/mellanox/mlx4/icm.c ++chunk->npages; chunk 205 drivers/net/ethernet/mellanox/mlx4/icm.c ++chunk->nsg; chunk 206 drivers/net/ethernet/mellanox/mlx4/icm.c else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { chunk 207 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk 208 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->sg, chunk->npages, chunk 211 drivers/net/ethernet/mellanox/mlx4/icm.c if (chunk->nsg <= 0) chunk 215 drivers/net/ethernet/mellanox/mlx4/icm.c if (chunk->npages == MLX4_ICM_CHUNK_LEN) chunk 216 drivers/net/ethernet/mellanox/mlx4/icm.c chunk = NULL; chunk 221 drivers/net/ethernet/mellanox/mlx4/icm.c if (!coherent && chunk) { chunk 222 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg, chunk 223 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->npages, DMA_BIDIRECTIONAL); chunk 225 drivers/net/ethernet/mellanox/mlx4/icm.c if (chunk->nsg <= 0) chunk 319 drivers/net/ethernet/mellanox/mlx4/icm.c struct mlx4_icm_chunk *chunk; chunk 335 drivers/net/ethernet/mellanox/mlx4/icm.c list_for_each_entry(chunk, &icm->chunk_list, list) { chunk 336 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < chunk->npages; ++i) { chunk 341 drivers/net/ethernet/mellanox/mlx4/icm.c len = chunk->buf[i].size; chunk 342 drivers/net/ethernet/mellanox/mlx4/icm.c dma_addr = chunk->buf[i].dma_addr; chunk 343 drivers/net/ethernet/mellanox/mlx4/icm.c addr = chunk->buf[i].addr; chunk 347 drivers/net/ethernet/mellanox/mlx4/icm.c len = sg_dma_len(&chunk->sg[i]); chunk 348 drivers/net/ethernet/mellanox/mlx4/icm.c dma_addr = sg_dma_address(&chunk->sg[i]); chunk 355 drivers/net/ethernet/mellanox/mlx4/icm.c page = sg_page(&chunk->sg[i]); chunk 74 drivers/net/ethernet/mellanox/mlx4/icm.h struct mlx4_icm_chunk *chunk; chunk 100 drivers/net/ethernet/mellanox/mlx4/icm.h iter->chunk = list_empty(&icm->chunk_list) ? chunk 108 drivers/net/ethernet/mellanox/mlx4/icm.h return !iter->chunk; chunk 113 drivers/net/ethernet/mellanox/mlx4/icm.h if (++iter->page_idx >= iter->chunk->nsg) { chunk 114 drivers/net/ethernet/mellanox/mlx4/icm.h if (iter->chunk->list.next == &iter->icm->chunk_list) { chunk 115 drivers/net/ethernet/mellanox/mlx4/icm.h iter->chunk = NULL; chunk 119 drivers/net/ethernet/mellanox/mlx4/icm.h iter->chunk = list_entry(iter->chunk->list.next, chunk 127 drivers/net/ethernet/mellanox/mlx4/icm.h if (iter->chunk->coherent) chunk 128 drivers/net/ethernet/mellanox/mlx4/icm.h return iter->chunk->buf[iter->page_idx].dma_addr; chunk 130 drivers/net/ethernet/mellanox/mlx4/icm.h return sg_dma_address(&iter->chunk->sg[iter->page_idx]); chunk 135 drivers/net/ethernet/mellanox/mlx4/icm.h if (iter->chunk->coherent) chunk 136 drivers/net/ethernet/mellanox/mlx4/icm.h return iter->chunk->buf[iter->page_idx].size; chunk 138 drivers/net/ethernet/mellanox/mlx4/icm.h return sg_dma_len(&iter->chunk->sg[iter->page_idx]); chunk 722 drivers/net/ethernet/mellanox/mlx4/mr.c int chunk; chunk 731 drivers/net/ethernet/mellanox/mlx4/mr.c chunk = min_t(int, max_mtts_first_page, npages); chunk 734 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); chunk 737 drivers/net/ethernet/mellanox/mlx4/mr.c npages -= chunk; chunk 738 drivers/net/ethernet/mellanox/mlx4/mr.c start_index += chunk; chunk 739 drivers/net/ethernet/mellanox/mlx4/mr.c page_list += chunk; chunk 741 drivers/net/ethernet/mellanox/mlx4/mr.c chunk = min_t(int, mtts_per_page, npages); chunk 751 drivers/net/ethernet/mellanox/mlx4/mr.c int chunk; chunk 765 drivers/net/ethernet/mellanox/mlx4/mr.c chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, chunk 769 drivers/net/ethernet/mellanox/mlx4/mr.c for (i = 0; i < chunk; ++i) chunk 772 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_WRITE_MTT(dev, mailbox, chunk); chunk 778 drivers/net/ethernet/mellanox/mlx4/mr.c npages -= chunk; chunk 779 drivers/net/ethernet/mellanox/mlx4/mr.c start_index += chunk; chunk 780 drivers/net/ethernet/mellanox/mlx4/mr.c page_list += chunk; chunk 685 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : chunk 686 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr; chunk 1102 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, chunk 1104 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c if (!action->rewrite.chunk) chunk 1107 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->rewrite.index = (action->rewrite.chunk->icm_addr - chunk 1113 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c mlx5dr_icm_free_chunk(action->rewrite.chunk); chunk 1418 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c struct mlx5dr_icm_chunk *chunk; chunk 1435 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16); chunk 1436 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c if (!chunk) chunk 1455 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->rewrite.chunk = chunk; chunk 1459 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->rewrite.index = (chunk->icm_addr - chunk 1472 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c mlx5dr_icm_free_chunk(chunk); chunk 1568 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c mlx5dr_icm_free_chunk(action->rewrite.chunk); chunk 1578 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c mlx5dr_icm_free_chunk(action->rewrite.chunk); chunk 169 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) chunk 171 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c struct mlx5dr_icm_bucket *bucket = chunk->bucket; chunk 173 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->ste_arr = kvzalloc(bucket->num_of_entries * chunk 174 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c sizeof(chunk->ste_arr[0]), GFP_KERNEL); chunk 175 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c if (!chunk->ste_arr) chunk 178 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * chunk 180 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c if (!chunk->hw_ste_arr) chunk 183 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->miss_list = kvmalloc(bucket->num_of_entries * chunk 184 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c sizeof(chunk->miss_list[0]), GFP_KERNEL); chunk 185 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c if (!chunk->miss_list) chunk 191 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk->hw_ste_arr); chunk 193 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk->ste_arr); chunk 202 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c struct mlx5dr_icm_chunk *chunk; chunk 240 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL); chunk 241 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c if (!chunk) { chunk 246 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->bucket = bucket; chunk 247 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->rkey = icm_mr->mkey.key; chunk 249 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->mr_addr = icm_mr->used_length; chunk 250 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length; chunk 252 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->num_of_entries = bucket->num_of_entries; chunk 253 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk->byte_size = chunk->num_of_entries * bucket->entry_size; chunk 256 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c err = dr_icm_chunk_ste_init(chunk); chunk 261 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c INIT_LIST_HEAD(&chunk->chunk_list); chunk 262 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_add(&chunk->chunk_list, &bucket->free_list); chunk 270 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk); chunk 276 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) chunk 278 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk->miss_list); chunk 279 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk->hw_ste_arr); chunk 280 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk->ste_arr); chunk 283 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk) chunk 285 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c struct mlx5dr_icm_bucket *bucket = chunk->bucket; chunk 287 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_del(&chunk->chunk_list); chunk 291 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c dr_icm_chunk_ste_cleanup(chunk); chunk 293 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c kvfree(chunk); chunk 316 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c struct mlx5dr_icm_chunk *chunk, *next; chunk 322 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list) chunk 323 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c dr_icm_chunk_destroy(chunk); chunk 328 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list) chunk 329 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c dr_icm_chunk_destroy(chunk); chunk 452 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */ chunk 472 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk = NULL; chunk 482 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c chunk = list_last_entry(&bucket->free_list, chunk 485 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c if (chunk) { chunk 486 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_del_init(&chunk->chunk_list); chunk 487 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_add_tail(&chunk->chunk_list, &bucket->used_list); chunk 494 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c return chunk; chunk 497 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk) chunk 499 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c struct mlx5dr_icm_bucket *bucket = chunk->bucket; chunk 502 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c memset(chunk->ste_arr, 0, chunk 503 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c bucket->num_of_entries * sizeof(chunk->ste_arr[0])); chunk 504 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c memset(chunk->hw_ste_arr, 0, chunk 509 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_del_init(&chunk->chunk_list); chunk 510 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c list_add_tail(&chunk->chunk_list, &bucket->hot_list); chunk 408 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr; chunk 60 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); chunk 240 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); chunk 393 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; chunk 440 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c new_htbl->chunk->icm_addr, chunk 441 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c new_htbl->chunk->num_of_entries); chunk 734 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); chunk 792 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c miss_list = &cur_htbl->chunk->miss_list[index]; chunk 393 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) { chunk 394 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *iterations = htbl->chunk->byte_size / chunk 401 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *num_stes = htbl->chunk->num_of_entries; chunk 435 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c send_info.rkey = ste->htbl->chunk->rkey; chunk 444 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c u32 byte_size = htbl->chunk->byte_size; chunk 488 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c send_info.rkey = htbl->chunk->rkey; chunk 506 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c u32 byte_size = htbl->chunk->byte_size; chunk 542 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c send_info.rkey = htbl->chunk->rkey; chunk 564 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c send_info.remote_addr = action->rewrite.chunk->mr_addr; chunk 565 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c send_info.rkey = action->rewrite.chunk->rkey; chunk 119 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0) chunk 132 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index = crc32 & (htbl->chunk->num_of_entries - 1); chunk 288 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index; chunk 295 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index; chunk 308 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; chunk 313 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); chunk 367 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c miss_addr = nic_matcher->e_anchor->chunk->icm_addr; chunk 521 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; chunk 523 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); chunk 628 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; chunk 666 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_icm_chunk *chunk; chunk 674 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size); chunk 675 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c if (!chunk) chunk 678 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c htbl->chunk = chunk; chunk 681 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c htbl->ste_arr = chunk->ste_arr; chunk 682 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c htbl->hw_ste_arr = chunk->hw_ste_arr; chunk 683 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c htbl->miss_list = chunk->miss_list; chunk 686 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c for (i = 0; i < chunk->num_of_entries; i++) { chunk 711 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c mlx5dr_icm_free_chunk(htbl->chunk); chunk 32 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : chunk 56 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr : chunk 219 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr; chunk 222 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr; chunk 58 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk) chunk 60 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h chunk += 2; chunk 61 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h if (chunk < DR_CHUNK_SIZE_MAX) chunk 62 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h return chunk; chunk 153 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5dr_icm_chunk *chunk; chunk 719 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5dr_icm_chunk *chunk; chunk 951 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk); chunk 157 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; chunk 159 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, chunk 165 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; chunk 167 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); chunk 176 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; chunk 180 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c &chunk->cchunk, &entry->centry, chunk 189 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; chunk 193 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c &chunk->cchunk, &entry->centry); chunk 187 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; chunk 189 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c mlxsw_sp_acl_atcam_chunk_init(®ion->aregion, &chunk->achunk, chunk 195 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; chunk 197 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk); chunk 206 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; chunk 211 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c &chunk->achunk, &entry->aentry, chunk 220 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; chunk 223 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, ®ion->aregion, &chunk->achunk, chunk 125 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c char *chunk = output; chunk 134 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES); chunk 135 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id, chunk 137 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET, chunk 140 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES; chunk 232 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_chunk *chunk; chunk 245 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_chunk *chunk; chunk 961 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_chunk *chunk; chunk 963 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); chunk 964 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c if (!chunk) chunk 966 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c chunk->vchunk = vchunk; chunk 967 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c chunk->region = region; chunk 969 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ops->chunk_init(region->priv, chunk->priv, vchunk->priority); chunk 970 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c return chunk; chunk 975 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_chunk *chunk) chunk 979 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ops->chunk_fini(chunk->priv); chunk 980 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c kfree(chunk); chunk 1020 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, chunk 1022 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c if (IS_ERR(vchunk->chunk)) { chunk 1024 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c err = PTR_ERR(vchunk->chunk); chunk 1063 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk); chunk 1104 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_chunk *chunk) chunk 1114 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c entry->chunk = chunk; chunk 1116 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv, chunk 1133 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ops->entry_del(mlxsw_sp, entry->chunk->region->priv, chunk 1134 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c entry->chunk->priv, entry->priv); chunk 1157 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv, chunk 1181 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c vchunk->chunk); chunk 1237 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_chunk *chunk, chunk 1243 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c if (ventry->entry->chunk == chunk) chunk 1249 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk); chunk 1268 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c vchunk->chunk2 = vchunk->chunk; chunk 1269 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c vchunk->chunk = new_chunk; chunk 1296 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c if (vchunk->chunk->region != region) { chunk 1323 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c vchunk->chunk, credits); chunk 1336 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c swap(vchunk->chunk, vchunk->chunk2); chunk 503 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c void *chunk; chunk 525 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c chunks[i].chunk = kmalloc(chunk_size, chunk 527 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c if (!chunks[i].chunk) chunk 535 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c memcpy(chunks[i].chunk, arg->in_buf + off, coff); chunk 537 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c memset(chunks[i].chunk + coff, 0, chunk_size - coff); chunk 547 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, chunk 595 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c memcpy(arg->out_buf + off, chunks[i].chunk, len); chunk 613 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c kfree(chunks[i].chunk); chunk 2208 drivers/net/ethernet/sfc/mcdi.c size_t chunk; chunk 2212 drivers/net/ethernet/sfc/mcdi.c chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); chunk 2214 drivers/net/ethernet/sfc/mcdi.c buffer, chunk); chunk 2217 drivers/net/ethernet/sfc/mcdi.c offset += chunk; chunk 2218 drivers/net/ethernet/sfc/mcdi.c buffer += chunk; chunk 2231 drivers/net/ethernet/sfc/mcdi.c size_t chunk = part->common.mtd.erasesize; chunk 2246 drivers/net/ethernet/sfc/mcdi.c chunk); chunk 2249 drivers/net/ethernet/sfc/mcdi.c offset += chunk; chunk 2262 drivers/net/ethernet/sfc/mcdi.c size_t chunk; chunk 2273 drivers/net/ethernet/sfc/mcdi.c chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); chunk 2275 drivers/net/ethernet/sfc/mcdi.c buffer, chunk); chunk 2278 drivers/net/ethernet/sfc/mcdi.c offset += chunk; chunk 2279 drivers/net/ethernet/sfc/mcdi.c buffer += chunk; chunk 643 drivers/net/wimax/i2400m/fw.c static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, chunk 657 drivers/net/wimax/i2400m/fw.c "direct %u do_csum %u)\n", i2400m, chunk, __chunk_len, chunk 660 drivers/net/wimax/i2400m/fw.c memcpy(buf->cmd_payload, chunk, __chunk_len); chunk 673 drivers/net/wimax/i2400m/fw.c "direct %u do_csum %u) = %d\n", i2400m, chunk, __chunk_len, chunk 529 drivers/net/wireless/ath/ar5523/ar5523.c struct ar5523_chunk *chunk; chunk 550 drivers/net/wireless/ath/ar5523/ar5523.c chunk = (struct ar5523_chunk *) data->skb->data; chunk 552 drivers/net/wireless/ath/ar5523/ar5523.c if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) || chunk 553 drivers/net/wireless/ath/ar5523/ar5523.c chunk->seqnum != 0) { chunk 555 drivers/net/wireless/ath/ar5523/ar5523.c chunk->seqnum, chunk->flags, chunk 556 drivers/net/wireless/ath/ar5523/ar5523.c be16_to_cpu(chunk->length)); chunk 582 drivers/net/wireless/ath/ar5523/ar5523.c skb_reserve(data->skb, sizeof(*chunk)); chunk 792 drivers/net/wireless/ath/ar5523/ar5523.c struct ar5523_chunk *chunk; chunk 834 drivers/net/wireless/ath/ar5523/ar5523.c chunk = skb_push(skb, sizeof(*chunk)); chunk 836 drivers/net/wireless/ath/ar5523/ar5523.c chunk->seqnum = 0; chunk 837 drivers/net/wireless/ath/ar5523/ar5523.c chunk->flags = UATH_CFLAGS_FINAL; chunk 838 drivers/net/wireless/ath/ar5523/ar5523.c chunk->length = cpu_to_be16(skb->len); chunk 1652 drivers/net/wireless/ath/ath10k/wmi-tlv.c struct host_memory_chunk *chunk; chunk 1658 drivers/net/wireless/ath/ath10k/wmi-tlv.c tlv_len = __cpu_to_le16(sizeof(*chunk)); chunk 1663 drivers/net/wireless/ath/ath10k/wmi-tlv.c chunk = (void *)tlv->value; chunk 1665 drivers/net/wireless/ath/ath10k/wmi-tlv.c chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); chunk 1666 drivers/net/wireless/ath/ath10k/wmi-tlv.c chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); chunk 1667 drivers/net/wireless/ath/ath10k/wmi-tlv.c chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); chunk 1677 drivers/net/wireless/ath/ath10k/wmi-tlv.c host_mem_chunks += sizeof(*chunk); chunk 6426 drivers/net/wireless/ath/ath10k/wmi.c struct host_memory_chunk *chunk; chunk 6432 drivers/net/wireless/ath/ath10k/wmi.c chunk = &chunks->items[i]; chunk 6433 drivers/net/wireless/ath/ath10k/wmi.c chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); chunk 6434 drivers/net/wireless/ath/ath10k/wmi.c chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); chunk 6435 drivers/net/wireless/ath/ath10k/wmi.c chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); chunk 3189 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct fw_chunk *chunk; chunk 3230 drivers/net/wireless/intel/ipw2x00/ipw2200.c chunk = (struct fw_chunk *)(data + offset); chunk 3232 drivers/net/wireless/intel/ipw2x00/ipw2200.c chunk_len = le32_to_cpu(chunk->length); chunk 3257 drivers/net/wireless/intel/ipw2x00/ipw2200.c nr, le32_to_cpu(chunk->address), chunk 320 drivers/net/wireless/marvell/libertas/if_sdio.c u16 size, type, chunk; chunk 342 drivers/net/wireless/marvell/libertas/if_sdio.c chunk = sdio_align_size(card->func, size); chunk 344 drivers/net/wireless/marvell/libertas/if_sdio.c ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk); chunk 348 drivers/net/wireless/marvell/libertas/if_sdio.c chunk = card->buffer[0] | (card->buffer[1] << 8); chunk 352 drivers/net/wireless/marvell/libertas/if_sdio.c (int)type, (int)chunk); chunk 354 drivers/net/wireless/marvell/libertas/if_sdio.c if (chunk > size) { chunk 356 drivers/net/wireless/marvell/libertas/if_sdio.c (int)chunk, (int)size); chunk 361 drivers/net/wireless/marvell/libertas/if_sdio.c if (chunk < size) { chunk 363 drivers/net/wireless/marvell/libertas/if_sdio.c (int)chunk, (int)size); chunk 368 drivers/net/wireless/marvell/libertas/if_sdio.c ret = if_sdio_handle_cmd(card, card->buffer + 4, chunk - 4); chunk 373 drivers/net/wireless/marvell/libertas/if_sdio.c ret = if_sdio_handle_data(card, card->buffer + 4, chunk - 4); chunk 378 drivers/net/wireless/marvell/libertas/if_sdio.c ret = if_sdio_handle_event(card, card->buffer + 4, chunk - 4); chunk 165 drivers/net/wireless/ti/wlcore/boot.c u8 *p, *chunk; chunk 180 drivers/net/wireless/ti/wlcore/boot.c chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL); chunk 181 drivers/net/wireless/ti/wlcore/boot.c if (!chunk) { chunk 212 drivers/net/wireless/ti/wlcore/boot.c memcpy(chunk, p, CHUNK_SIZE); chunk 215 drivers/net/wireless/ti/wlcore/boot.c ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false); chunk 225 drivers/net/wireless/ti/wlcore/boot.c memcpy(chunk, p, fw_data_len % CHUNK_SIZE); chunk 228 drivers/net/wireless/ti/wlcore/boot.c ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); chunk 231 drivers/net/wireless/ti/wlcore/boot.c kfree(chunk); chunk 661 drivers/nfc/pn544/i2c.c struct pn544_i2c_fw_secure_frame *chunk; chunk 668 drivers/nfc/pn544/i2c.c chunk = (struct pn544_i2c_fw_secure_frame *) buf; chunk 670 drivers/nfc/pn544/i2c.c chunk->cmd = PN544_FW_CMD_SECURE_CHUNK_WRITE; chunk 672 drivers/nfc/pn544/i2c.c put_unaligned_be16(datalen, &chunk->be_datalen); chunk 674 drivers/nfc/pn544/i2c.c memcpy(chunk->data, data, datalen); chunk 676 drivers/nfc/pn544/i2c.c chunklen = sizeof(chunk->cmd) + sizeof(chunk->be_datalen) + datalen; chunk 518 drivers/nvdimm/btt.c unsigned long chunk = min(len, PAGE_SIZE); chunk 521 drivers/nvdimm/btt.c chunk, 0); chunk 524 drivers/nvdimm/btt.c len -= chunk; chunk 525 drivers/nvdimm/btt.c nsoff += chunk; chunk 399 drivers/nvdimm/pfn_devs.c unsigned long chunk = min(zero_len, PAGE_SIZE); chunk 402 drivers/nvdimm/pfn_devs.c chunk, 0); chunk 406 drivers/nvdimm/pfn_devs.c zero_len -= chunk; chunk 407 drivers/nvdimm/pfn_devs.c nsoff += chunk; chunk 103 drivers/nvdimm/pmem.c unsigned int chunk; chunk 108 drivers/nvdimm/pmem.c chunk = min_t(unsigned int, len, PAGE_SIZE - off); chunk 109 drivers/nvdimm/pmem.c memcpy_flushcache(pmem_addr, mem + off, chunk); chunk 111 drivers/nvdimm/pmem.c len -= chunk; chunk 114 drivers/nvdimm/pmem.c pmem_addr += chunk; chunk 121 drivers/nvdimm/pmem.c unsigned int chunk; chunk 127 drivers/nvdimm/pmem.c chunk = min_t(unsigned int, len, PAGE_SIZE - off); chunk 128 drivers/nvdimm/pmem.c rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); chunk 132 drivers/nvdimm/pmem.c len -= chunk; chunk 135 drivers/nvdimm/pmem.c pmem_addr += chunk; chunk 226 drivers/nvmem/rave-sp-eeprom.c unsigned int chunk; chunk 243 drivers/nvmem/rave-sp-eeprom.c chunk = RAVE_SP_EEPROM_PAGE_SIZE - head; chunk 252 drivers/nvmem/rave-sp-eeprom.c chunk = RAVE_SP_EEPROM_PAGE_SIZE; chunk 258 drivers/nvmem/rave-sp-eeprom.c chunk = min(chunk, residue); chunk 260 drivers/nvmem/rave-sp-eeprom.c data, chunk); chunk 264 drivers/nvmem/rave-sp-eeprom.c residue -= chunk; chunk 265 drivers/nvmem/rave-sp-eeprom.c offset += chunk; chunk 266 drivers/nvmem/rave-sp-eeprom.c data += chunk; chunk 1236 drivers/s390/block/dasd.c char *data, *chunk; chunk 1247 drivers/s390/block/dasd.c data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); chunk 1249 drivers/s390/block/dasd.c if (!chunk) chunk 1256 drivers/s390/block/dasd.c cqr->mem_chunk = chunk; chunk 651 drivers/s390/block/dasd_int.h struct dasd_mchunk *chunk; chunk 654 drivers/s390/block/dasd_int.h chunk = (struct dasd_mchunk *) mem; chunk 655 drivers/s390/block/dasd_int.h chunk->size = size - sizeof(struct dasd_mchunk); chunk 656 drivers/s390/block/dasd_int.h list_add(&chunk->list, chunk_list); chunk 662 drivers/s390/block/dasd_int.h struct dasd_mchunk *chunk, *tmp; chunk 665 drivers/s390/block/dasd_int.h list_for_each_entry(chunk, chunk_list, list) { chunk 666 drivers/s390/block/dasd_int.h if (chunk->size < size) chunk 668 drivers/s390/block/dasd_int.h if (chunk->size > size + sizeof(struct dasd_mchunk)) { chunk 669 drivers/s390/block/dasd_int.h char *endaddr = (char *) (chunk + 1) + chunk->size; chunk 672 drivers/s390/block/dasd_int.h chunk->size -= size + sizeof(struct dasd_mchunk); chunk 673 drivers/s390/block/dasd_int.h chunk = tmp; chunk 675 drivers/s390/block/dasd_int.h list_del(&chunk->list); chunk 676 drivers/s390/block/dasd_int.h return (void *) (chunk + 1); chunk 684 drivers/s390/block/dasd_int.h struct dasd_mchunk *chunk, *tmp; chunk 687 drivers/s390/block/dasd_int.h chunk = (struct dasd_mchunk *) chunk 692 drivers/s390/block/dasd_int.h if (list_entry(p, struct dasd_mchunk, list) > chunk) chunk 699 drivers/s390/block/dasd_int.h if ((char *) (chunk + 1) + chunk->size == (char *) tmp) { chunk 701 drivers/s390/block/dasd_int.h chunk->size += tmp->size + sizeof(struct dasd_mchunk); chunk 707 drivers/s390/block/dasd_int.h if ((char *) (tmp + 1) + tmp->size == (char *) chunk) { chunk 708 drivers/s390/block/dasd_int.h tmp->size += chunk->size + sizeof(struct dasd_mchunk); chunk 712 drivers/s390/block/dasd_int.h __list_add(&chunk->list, left, left->next); chunk 1121 drivers/s390/cio/css.c struct gen_pool_chunk *chunk, void *data) chunk 1123 drivers/s390/cio/css.c size_t chunk_size = chunk->end_addr - chunk->start_addr + 1; chunk 1126 drivers/s390/cio/css.c (void *) chunk->start_addr, chunk 1127 drivers/s390/cio/css.c (dma_addr_t) chunk->phys_addr); chunk 182 drivers/s390/cio/itcw.c void *chunk; chunk 194 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); chunk 195 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 196 drivers/s390/cio/itcw.c return chunk; chunk 197 drivers/s390/cio/itcw.c itcw = chunk; chunk 210 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); chunk 211 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 212 drivers/s390/cio/itcw.c return chunk; chunk 213 drivers/s390/cio/itcw.c itcw->tcw = chunk; chunk 218 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); chunk 219 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 220 drivers/s390/cio/itcw.c return chunk; chunk 221 drivers/s390/cio/itcw.c itcw->intrg_tcw = chunk; chunk 227 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tidaw) * chunk 229 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 230 drivers/s390/cio/itcw.c return chunk; chunk 231 drivers/s390/cio/itcw.c tcw_set_data(itcw->tcw, chunk, 1); chunk 235 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tidaw) * chunk 237 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 238 drivers/s390/cio/itcw.c return chunk; chunk 239 drivers/s390/cio/itcw.c tcw_set_data(itcw->intrg_tcw, chunk, 1); chunk 242 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); chunk 243 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 244 drivers/s390/cio/itcw.c return chunk; chunk 245 drivers/s390/cio/itcw.c tsb_init(chunk); chunk 246 drivers/s390/cio/itcw.c tcw_set_tsb(itcw->tcw, chunk); chunk 249 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); chunk 250 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 251 drivers/s390/cio/itcw.c return chunk; chunk 252 drivers/s390/cio/itcw.c tsb_init(chunk); chunk 253 drivers/s390/cio/itcw.c tcw_set_tsb(itcw->intrg_tcw, chunk); chunk 256 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); chunk 257 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 258 drivers/s390/cio/itcw.c return chunk; chunk 259 drivers/s390/cio/itcw.c tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); chunk 260 drivers/s390/cio/itcw.c tcw_set_tccb(itcw->tcw, chunk); chunk 263 drivers/s390/cio/itcw.c chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); chunk 264 drivers/s390/cio/itcw.c if (IS_ERR(chunk)) chunk 265 drivers/s390/cio/itcw.c return chunk; chunk 266 drivers/s390/cio/itcw.c tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); chunk 267 drivers/s390/cio/itcw.c tcw_set_tccb(itcw->intrg_tcw, chunk); chunk 268 drivers/s390/cio/itcw.c tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, chunk 715 drivers/scsi/ips.h IPS_CHUNK chunk[IPS_MAX_CHUNKS]; chunk 116 drivers/scsi/qla2xxx/qla_dbg.c uint32_t *chunk = (void *)ha->gid_list; chunk 181 drivers/scsi/qla2xxx/qla_dbg.c chunk[j] : swab32(chunk[j]); chunk 196 drivers/scsi/qla2xxx/qla_dbg.c uint32_t *chunk = (void *)ha->gid_list; chunk 257 drivers/scsi/qla2xxx/qla_dbg.c chunk[j] : swab32(chunk[j]); chunk 848 drivers/thermal/ti-soc-thermal/ti-bandgap.c void __iomem *chunk; chunk 853 drivers/thermal/ti-soc-thermal/ti-bandgap.c chunk = devm_ioremap_resource(&pdev->dev, res); chunk 855 drivers/thermal/ti-soc-thermal/ti-bandgap.c bgp->base = chunk; chunk 856 drivers/thermal/ti-soc-thermal/ti-bandgap.c if (IS_ERR(chunk)) chunk 857 drivers/thermal/ti-soc-thermal/ti-bandgap.c return ERR_CAST(chunk); chunk 487 drivers/tty/serial/qcom_geni_serial.c int chunk = min_t(int, bytes - i, port->rx_bytes_pw); chunk 490 drivers/tty/serial/qcom_geni_serial.c i += chunk; chunk 494 drivers/tty/serial/qcom_geni_serial.c for (c = 0; c < chunk; c++) { chunk 688 drivers/tty/serial/qcom_geni_serial.c unsigned int chunk; chunk 709 drivers/tty/serial/qcom_geni_serial.c chunk = min(avail, pending); chunk 710 drivers/tty/serial/qcom_geni_serial.c if (!chunk) chunk 723 drivers/tty/serial/qcom_geni_serial.c remaining = chunk; chunk 724 drivers/tty/serial/qcom_geni_serial.c for (i = 0; i < chunk; ) { chunk 908 drivers/tty/tty_io.c unsigned int chunk; chunk 930 drivers/tty/tty_io.c chunk = 2048; chunk 932 drivers/tty/tty_io.c chunk = 65536; chunk 933 drivers/tty/tty_io.c if (count < chunk) chunk 934 drivers/tty/tty_io.c chunk = count; chunk 937 drivers/tty/tty_io.c if (tty->write_cnt < chunk) { chunk 940 drivers/tty/tty_io.c if (chunk < 1024) chunk 941 drivers/tty/tty_io.c chunk = 1024; chunk 943 drivers/tty/tty_io.c buf_chunk = kmalloc(chunk, GFP_KERNEL); chunk 949 drivers/tty/tty_io.c tty->write_cnt = chunk; chunk 956 drivers/tty/tty_io.c if (size > chunk) chunk 957 drivers/tty/tty_io.c size = chunk; chunk 915 drivers/usb/early/ehci-dbgp.c int chunk, ret; chunk 940 drivers/usb/early/ehci-dbgp.c for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0; chunk 941 drivers/usb/early/ehci-dbgp.c str++, chunk++, n--) { chunk 944 drivers/usb/early/ehci-dbgp.c buf[chunk] = '\r'; chunk 951 drivers/usb/early/ehci-dbgp.c buf[chunk] = *str; chunk 953 drivers/usb/early/ehci-dbgp.c if (chunk > 0) { chunk 955 drivers/usb/early/ehci-dbgp.c dbgp_endpoint_out, buf, chunk); chunk 874 drivers/usb/early/xhci-dbc.c int chunk, ret; chunk 881 drivers/usb/early/xhci-dbc.c for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) { chunk 885 drivers/usb/early/xhci-dbc.c buf[chunk] = '\r'; chunk 893 drivers/usb/early/xhci-dbc.c buf[chunk] = *str; chunk 896 drivers/usb/early/xhci-dbc.c if (chunk > 0) { chunk 897 drivers/usb/early/xhci-dbc.c ret = xdbc_bulk_write(buf, chunk); chunk 185 drivers/usb/gadget/udc/aspeed-vhub/ep0.c unsigned int chunk; chunk 208 drivers/usb/gadget/udc/aspeed-vhub/ep0.c chunk = req->req.length - req->req.actual; chunk 209 drivers/usb/gadget/udc/aspeed-vhub/ep0.c if (chunk > ep->ep.maxpacket) chunk 210 drivers/usb/gadget/udc/aspeed-vhub/ep0.c chunk = ep->ep.maxpacket; chunk 211 drivers/usb/gadget/udc/aspeed-vhub/ep0.c else if ((chunk < ep->ep.maxpacket) || !req->req.zero) chunk 215 drivers/usb/gadget/udc/aspeed-vhub/ep0.c chunk, req->last_desc, req->req.actual, ep->ep.maxpacket); chunk 221 drivers/usb/gadget/udc/aspeed-vhub/ep0.c if (chunk && req->req.buf) chunk 222 drivers/usb/gadget/udc/aspeed-vhub/ep0.c memcpy(ep->buf, req->req.buf + req->req.actual, chunk); chunk 227 drivers/usb/gadget/udc/aspeed-vhub/ep0.c reg = VHUB_EP0_SET_TX_LEN(chunk); chunk 230 drivers/usb/gadget/udc/aspeed-vhub/ep0.c req->req.actual += chunk; chunk 50 drivers/usb/gadget/udc/aspeed-vhub/epn.c unsigned int chunk; chunk 56 drivers/usb/gadget/udc/aspeed-vhub/epn.c chunk = len - act; chunk 57 drivers/usb/gadget/udc/aspeed-vhub/epn.c if (chunk > ep->ep.maxpacket) chunk 58 drivers/usb/gadget/udc/aspeed-vhub/epn.c chunk = ep->ep.maxpacket; chunk 59 drivers/usb/gadget/udc/aspeed-vhub/epn.c else if ((chunk < ep->ep.maxpacket) || !req->req.zero) chunk 63 drivers/usb/gadget/udc/aspeed-vhub/epn.c req, act, len, chunk, req->last_desc); chunk 70 drivers/usb/gadget/udc/aspeed-vhub/epn.c memcpy(ep->buf, req->req.buf + act, chunk); chunk 82 drivers/usb/gadget/udc/aspeed-vhub/epn.c writel(VHUB_EP_DMA_SET_TX_SIZE(chunk), chunk 84 drivers/usb/gadget/udc/aspeed-vhub/epn.c writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK, chunk 172 drivers/usb/gadget/udc/aspeed-vhub/epn.c unsigned int chunk; chunk 194 drivers/usb/gadget/udc/aspeed-vhub/epn.c chunk = len - act; chunk 195 drivers/usb/gadget/udc/aspeed-vhub/epn.c if (chunk <= ep->epn.chunk_max) { chunk 204 drivers/usb/gadget/udc/aspeed-vhub/epn.c if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0) chunk 207 drivers/usb/gadget/udc/aspeed-vhub/epn.c chunk = ep->epn.chunk_max; chunk 211 drivers/usb/gadget/udc/aspeed-vhub/epn.c act, len, chunk, req->last_desc, d_num, chunk 227 drivers/usb/gadget/udc/aspeed-vhub/epn.c desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk)); chunk 232 drivers/usb/gadget/udc/aspeed-vhub/epn.c req->act_count = act = act + chunk; chunk 286 drivers/usb/usb-skeleton.c size_t chunk = min(available, count); chunk 306 drivers/usb/usb-skeleton.c chunk)) chunk 309 drivers/usb/usb-skeleton.c rv = chunk; chunk 311 drivers/usb/usb-skeleton.c dev->bulk_in_copied += chunk; chunk 318 drivers/usb/usb-skeleton.c skel_do_read_io(dev, count - chunk); chunk 103 drivers/zorro/zorro.c u32 chunk = start>>Z2RAM_CHUNKSHIFT; chunk 106 drivers/zorro/zorro.c set_bit(chunk, zorro_unused_z2ram); chunk 108 drivers/zorro/zorro.c clear_bit(chunk, zorro_unused_z2ram); chunk 432 fs/btrfs/block-group.c u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? chunk 434 fs/btrfs/block-group.c u64 step = chunk << 1; chunk 436 fs/btrfs/block-group.c while (len > chunk) { chunk 437 fs/btrfs/block-group.c btrfs_remove_free_space(block_group, start, chunk); chunk 10 fs/btrfs/print-tree.c static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) chunk 12 fs/btrfs/print-tree.c int num_stripes = btrfs_chunk_num_stripes(eb, chunk); chunk 15 fs/btrfs/print-tree.c btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk), chunk 16 fs/btrfs/print-tree.c btrfs_chunk_type(eb, chunk), num_stripes); chunk 19 fs/btrfs/print-tree.c btrfs_stripe_devid_nr(eb, chunk, i), chunk 20 fs/btrfs/print-tree.c btrfs_stripe_offset_nr(eb, chunk, i)); chunk 521 fs/btrfs/tree-checker.c const struct btrfs_chunk *chunk, u64 logical, chunk 541 fs/btrfs/tree-checker.c (unsigned long)chunk) { chunk 570 fs/btrfs/tree-checker.c struct btrfs_chunk *chunk, u64 logical) chunk 581 fs/btrfs/tree-checker.c length = btrfs_chunk_length(leaf, chunk); chunk 582 fs/btrfs/tree-checker.c stripe_len = btrfs_chunk_stripe_len(leaf, chunk); chunk 583 fs/btrfs/tree-checker.c num_stripes = btrfs_chunk_num_stripes(leaf, chunk); chunk 584 fs/btrfs/tree-checker.c sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); chunk 585 fs/btrfs/tree-checker.c type = btrfs_chunk_type(leaf, chunk); chunk 588 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 593 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 598 fs/btrfs/tree-checker.c if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { chunk 599 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 601 fs/btrfs/tree-checker.c btrfs_chunk_sector_size(leaf, chunk), chunk 606 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 611 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 618 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 622 fs/btrfs/tree-checker.c btrfs_chunk_type(leaf, chunk)); chunk 628 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 634 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 642 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 655 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 667 fs/btrfs/tree-checker.c chunk_err(leaf, chunk, logical, chunk 1249 fs/btrfs/tree-checker.c struct btrfs_chunk *chunk; chunk 1267 fs/btrfs/tree-checker.c chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); chunk 1268 fs/btrfs/tree-checker.c ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); chunk 27 fs/btrfs/tree-checker.h struct btrfs_chunk *chunk, u64 logical); chunk 2933 fs/btrfs/volumes.c struct btrfs_chunk *chunk; chunk 2955 fs/btrfs/volumes.c chunk = (struct btrfs_chunk *)(ptr + len); chunk 2956 fs/btrfs/volumes.c num_stripes = btrfs_stack_chunk_num_stripes(chunk); chunk 3148 fs/btrfs/volumes.c struct btrfs_chunk *chunk; chunk 3186 fs/btrfs/volumes.c chunk = btrfs_item_ptr(leaf, path->slots[0], chunk 3188 fs/btrfs/volumes.c chunk_type = btrfs_chunk_type(leaf, chunk); chunk 3489 fs/btrfs/volumes.c struct btrfs_chunk *chunk, chunk 3493 fs/btrfs/volumes.c int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); chunk 3497 fs/btrfs/volumes.c stripe = btrfs_stripe_nr(chunk, i); chunk 3519 fs/btrfs/volumes.c struct btrfs_chunk *chunk, chunk 3523 fs/btrfs/volumes.c int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); chunk 3533 fs/btrfs/volumes.c type = btrfs_chunk_type(leaf, chunk); chunk 3537 fs/btrfs/volumes.c stripe = btrfs_stripe_nr(chunk, i); chunk 3542 fs/btrfs/volumes.c stripe_length = btrfs_chunk_length(leaf, chunk); chunk 3555 fs/btrfs/volumes.c struct btrfs_chunk *chunk, chunk 3560 fs/btrfs/volumes.c chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) chunk 3568 fs/btrfs/volumes.c struct btrfs_chunk *chunk, chunk 3571 fs/btrfs/volumes.c int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); chunk 3596 fs/btrfs/volumes.c struct btrfs_chunk *chunk, u64 chunk_offset) chunk 3601 fs/btrfs/volumes.c u64 chunk_type = btrfs_chunk_type(leaf, chunk); chunk 3633 fs/btrfs/volumes.c chunk_devid_filter(leaf, chunk, bargs)) { chunk 3639 fs/btrfs/volumes.c chunk_drange_filter(leaf, chunk, bargs)) { chunk 3645 fs/btrfs/volumes.c chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { chunk 3651 fs/btrfs/volumes.c chunk_stripes_range_filter(leaf, chunk, bargs)) { chunk 3689 fs/btrfs/volumes.c struct btrfs_chunk *chunk; chunk 3769 fs/btrfs/volumes.c chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); chunk 3770 fs/btrfs/volumes.c chunk_type = btrfs_chunk_type(leaf, chunk); chunk 3778 fs/btrfs/volumes.c ret = should_balance_chunk(leaf, chunk, found_key.offset); chunk 4900 fs/btrfs/volumes.c struct btrfs_chunk *chunk, int item_size) chunk 4919 fs/btrfs/volumes.c memcpy(ptr, chunk, item_size); chunk 5243 fs/btrfs/volumes.c struct btrfs_chunk *chunk; chunk 5261 fs/btrfs/volumes.c chunk = kzalloc(item_size, GFP_NOFS); chunk 5262 fs/btrfs/volumes.c if (!chunk) { chunk 5292 fs/btrfs/volumes.c stripe = &chunk->stripe; chunk 5304 fs/btrfs/volumes.c btrfs_set_stack_chunk_length(chunk, chunk_size); chunk 5305 fs/btrfs/volumes.c btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); chunk 5306 fs/btrfs/volumes.c btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); chunk 5307 fs/btrfs/volumes.c btrfs_set_stack_chunk_type(chunk, map->type); chunk 5308 fs/btrfs/volumes.c btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); chunk 5309 fs/btrfs/volumes.c btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); chunk 5310 fs/btrfs/volumes.c btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); chunk 5311 fs/btrfs/volumes.c btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); chunk 5312 fs/btrfs/volumes.c btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); chunk 5318 fs/btrfs/volumes.c ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); chunk 5324 fs/btrfs/volumes.c ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); chunk 5328 fs/btrfs/volumes.c kfree(chunk); chunk 6765 fs/btrfs/volumes.c struct btrfs_chunk *chunk) chunk 6780 fs/btrfs/volumes.c length = btrfs_chunk_length(leaf, chunk); chunk 6781 fs/btrfs/volumes.c num_stripes = btrfs_chunk_num_stripes(leaf, chunk); chunk 6788 fs/btrfs/volumes.c ret = btrfs_check_chunk_valid(leaf, chunk, logical); chunk 6823 fs/btrfs/volumes.c map->io_width = btrfs_chunk_io_width(leaf, chunk); chunk 6824 fs/btrfs/volumes.c map->io_align = btrfs_chunk_io_align(leaf, chunk); chunk 6825 fs/btrfs/volumes.c map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); chunk 6826 fs/btrfs/volumes.c map->type = btrfs_chunk_type(leaf, chunk); chunk 6827 fs/btrfs/volumes.c map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); chunk 6833 fs/btrfs/volumes.c btrfs_stripe_offset_nr(leaf, chunk, i); chunk 6834 fs/btrfs/volumes.c devid = btrfs_stripe_devid_nr(leaf, chunk, i); chunk 6836 fs/btrfs/volumes.c btrfs_stripe_dev_uuid_nr(chunk, i), chunk 7059 fs/btrfs/volumes.c struct btrfs_chunk *chunk; chunk 7116 fs/btrfs/volumes.c chunk = (struct btrfs_chunk *)sb_array_offset; chunk 7125 fs/btrfs/volumes.c num_stripes = btrfs_chunk_num_stripes(sb, chunk); chunk 7134 fs/btrfs/volumes.c type = btrfs_chunk_type(sb, chunk); chunk 7147 fs/btrfs/volumes.c ret = read_one_chunk(&key, sb, chunk); chunk 7293 fs/btrfs/volumes.c struct btrfs_chunk *chunk; chunk 7294 fs/btrfs/volumes.c chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); chunk 7295 fs/btrfs/volumes.c ret = read_one_chunk(&found_key, leaf, chunk); chunk 660 fs/ext4/mballoc.c ext4_grpblk_t chunk; chunk 676 fs/ext4/mballoc.c chunk = 1 << min; chunk 684 fs/ext4/mballoc.c len -= chunk; chunk 685 fs/ext4/mballoc.c first += chunk; chunk 1376 fs/gfs2/bmap.c u64 chunk; chunk 1383 fs/gfs2/bmap.c chunk = oldsize - newsize; chunk 1384 fs/gfs2/bmap.c if (chunk > max_chunk) chunk 1385 fs/gfs2/bmap.c chunk = max_chunk; chunk 1388 fs/gfs2/bmap.c if (offs && chunk > PAGE_SIZE) chunk 1389 fs/gfs2/bmap.c chunk = offs + ((chunk - offs) & PAGE_MASK); chunk 1391 fs/gfs2/bmap.c truncate_pagecache(inode, oldsize - chunk); chunk 1392 fs/gfs2/bmap.c oldsize -= chunk; chunk 2413 fs/gfs2/bmap.c loff_t chunk; chunk 2416 fs/gfs2/bmap.c chunk = length; chunk 2417 fs/gfs2/bmap.c if (chunk > max_chunk) chunk 2418 fs/gfs2/bmap.c chunk = max_chunk; chunk 2421 fs/gfs2/bmap.c if (offs && chunk > PAGE_SIZE) chunk 2422 fs/gfs2/bmap.c chunk = offs + ((chunk - offs) & PAGE_MASK); chunk 2424 fs/gfs2/bmap.c truncate_pagecache_range(inode, offset, chunk); chunk 2425 fs/gfs2/bmap.c offset += chunk; chunk 2426 fs/gfs2/bmap.c length -= chunk; chunk 200 fs/nfs/blocklayout/dev.c u64 chunk; chunk 204 fs/nfs/blocklayout/dev.c chunk = div_u64(offset, dev->chunk_size); chunk 205 fs/nfs/blocklayout/dev.c div_u64_rem(chunk, dev->nr_children, &chunk_idx); chunk 215 fs/nfs/blocklayout/dev.c offset = chunk * dev->chunk_size; chunk 442 fs/ocfs2/ioctl.c unsigned int offset = 0, cluster, chunk; chunk 481 fs/ocfs2/ioctl.c for (chunk = 0; chunk < chunks_in_group; chunk++) { chunk 296 fs/ocfs2/quota_local.c int chunk, chunk 304 fs/ocfs2/quota_local.c rc->rc_chunk = chunk; chunk 470 fs/ocfs2/quota_local.c int bit, chunk; chunk 477 fs/ocfs2/quota_local.c chunk = rchunk->rc_chunk; chunk 480 fs/ocfs2/quota_local.c ol_quota_chunk_block(sb, chunk), chunk 490 fs/ocfs2/quota_local.c ol_dqblk_block(sb, chunk, bit), chunk 497 fs/ocfs2/quota_local.c ol_dqblk_block_off(sb, chunk, bit)); chunk 811 fs/ocfs2/quota_local.c struct ocfs2_quota_chunk *chunk; chunk 819 fs/ocfs2/quota_local.c list_for_each_entry(chunk, &oinfo->dqi_chunk, qc_chunk) { chunk 821 fs/ocfs2/quota_local.c (chunk->qc_headerbh->b_data); chunk 822 fs/ocfs2/quota_local.c if (chunk->qc_num < oinfo->dqi_chunks - 1) { chunk 826 fs/ocfs2/quota_local.c ol_quota_chunk_block(sb, chunk->qc_num) - 1) chunk 924 fs/ocfs2/quota_local.c struct ocfs2_quota_chunk *chunk; chunk 928 fs/ocfs2/quota_local.c list_for_each_entry(chunk, &oinfo->dqi_chunk, qc_chunk) { chunk 930 fs/ocfs2/quota_local.c chunk->qc_headerbh->b_data; chunk 939 fs/ocfs2/quota_local.c if (chunk->qc_num < oinfo->dqi_chunks - 1) { chunk 943 fs/ocfs2/quota_local.c ol_quota_chunk_block(sb, chunk->qc_num) - 1) chunk 951 fs/ocfs2/quota_local.c " entries free (type=%d)\n", chunk->qc_num, chunk 956 fs/ocfs2/quota_local.c return chunk; chunk 968 fs/ocfs2/quota_local.c struct ocfs2_quota_chunk *chunk = NULL; chunk 990 fs/ocfs2/quota_local.c chunk = kmem_cache_alloc(ocfs2_qf_chunk_cachep, GFP_NOFS); chunk 991 fs/ocfs2/quota_local.c if (!chunk) { chunk 1074 fs/ocfs2/quota_local.c list_add_tail(&chunk->qc_chunk, &oinfo->dqi_chunk); chunk 1075 fs/ocfs2/quota_local.c chunk->qc_num = list_entry(chunk->qc_chunk.prev, chunk 1078 fs/ocfs2/quota_local.c chunk->qc_headerbh = bh; chunk 1080 fs/ocfs2/quota_local.c return chunk; chunk 1086 fs/ocfs2/quota_local.c kmem_cache_free(ocfs2_qf_chunk_cachep, chunk); chunk 1098 fs/ocfs2/quota_local.c struct ocfs2_quota_chunk *chunk; chunk 1111 fs/ocfs2/quota_local.c chunk = list_entry(oinfo->dqi_chunk.prev, chunk 1114 fs/ocfs2/quota_local.c ol_quota_chunk_block(sb, chunk->qc_num) - 1; chunk 1171 fs/ocfs2/quota_local.c chunk->qc_headerbh, chunk 1178 fs/ocfs2/quota_local.c dchunk = (struct ocfs2_local_disk_chunk *)chunk->qc_headerbh->b_data; chunk 1179 fs/ocfs2/quota_local.c lock_buffer(chunk->qc_headerbh); chunk 1181 fs/ocfs2/quota_local.c unlock_buffer(chunk->qc_headerbh); chunk 1182 fs/ocfs2/quota_local.c ocfs2_journal_dirty(handle, chunk->qc_headerbh); chunk 1198 fs/ocfs2/quota_local.c return chunk; chunk 1221 fs/ocfs2/quota_local.c struct ocfs2_quota_chunk *chunk; chunk 1228 fs/ocfs2/quota_local.c chunk = ocfs2_find_free_entry(sb, type, &offset); chunk 1229 fs/ocfs2/quota_local.c if (!chunk) { chunk 1230 fs/ocfs2/quota_local.c chunk = ocfs2_extend_local_quota_file(sb, type, &offset); chunk 1231 fs/ocfs2/quota_local.c if (IS_ERR(chunk)) { chunk 1232 fs/ocfs2/quota_local.c status = PTR_ERR(chunk); chunk 1235 fs/ocfs2/quota_local.c } else if (IS_ERR(chunk)) { chunk 1236 fs/ocfs2/quota_local.c status = PTR_ERR(chunk); chunk 1239 fs/ocfs2/quota_local.c od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset); chunk 1240 fs/ocfs2/quota_local.c od->dq_chunk = chunk; chunk 1242 fs/ocfs2/quota_local.c ol_dqblk_block(sb, chunk->qc_num, offset), chunk 1255 fs/ocfs2/quota_local.c status = ocfs2_modify_bh(lqinode, chunk->qc_headerbh, olq_alloc_dquot, chunk 676 fs/reiserfs/journal.c static void write_chunk(struct buffer_chunk *chunk) chunk 679 fs/reiserfs/journal.c for (i = 0; i < chunk->nr; i++) { chunk 680 fs/reiserfs/journal.c submit_logged_buffer(chunk->bh[i]); chunk 682 fs/reiserfs/journal.c chunk->nr = 0; chunk 685 fs/reiserfs/journal.c static void write_ordered_chunk(struct buffer_chunk *chunk) chunk 688 fs/reiserfs/journal.c for (i = 0; i < chunk->nr; i++) { chunk 689 fs/reiserfs/journal.c submit_ordered_buffer(chunk->bh[i]); chunk 691 fs/reiserfs/journal.c chunk->nr = 0; chunk 694 fs/reiserfs/journal.c static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, chunk 698 fs/reiserfs/journal.c BUG_ON(chunk->nr >= CHUNK_SIZE); chunk 699 fs/reiserfs/journal.c chunk->bh[chunk->nr++] = bh; chunk 700 fs/reiserfs/journal.c if (chunk->nr >= CHUNK_SIZE) { chunk 704 fs/reiserfs/journal.c fn(chunk); chunk 707 fs/reiserfs/journal.c fn(chunk); chunk 802 fs/reiserfs/journal.c struct buffer_chunk chunk; chunk 806 fs/reiserfs/journal.c chunk.nr = 0; chunk 818 fs/reiserfs/journal.c if (chunk.nr) chunk 819 fs/reiserfs/journal.c write_ordered_chunk(&chunk); chunk 836 fs/reiserfs/journal.c add_to_chunk(&chunk, bh, lock, write_ordered_chunk); chunk 845 fs/reiserfs/journal.c if (chunk.nr) { chunk 847 fs/reiserfs/journal.c write_ordered_chunk(&chunk); chunk 1631 fs/reiserfs/journal.c struct buffer_chunk *chunk) chunk 1665 fs/reiserfs/journal.c add_to_chunk(chunk, tmp_bh, NULL, write_chunk); chunk 1726 fs/reiserfs/journal.c struct buffer_chunk chunk; chunk 1729 fs/reiserfs/journal.c chunk.nr = 0; chunk 1749 fs/reiserfs/journal.c ret = write_one_transaction(s, jl, &chunk); chunk 1767 fs/reiserfs/journal.c if (chunk.nr) { chunk 1768 fs/reiserfs/journal.c write_chunk(&chunk); chunk 548 fs/reiserfs/xattr.c size_t chunk; chunk 553 fs/reiserfs/xattr.c chunk = PAGE_SIZE; chunk 555 fs/reiserfs/xattr.c chunk = buffer_size - buffer_pos; chunk 570 fs/reiserfs/xattr.c if (chunk + skip > PAGE_SIZE) chunk 571 fs/reiserfs/xattr.c chunk = PAGE_SIZE - skip; chunk 578 fs/reiserfs/xattr.c err = __reiserfs_write_begin(page, page_offset, chunk + skip); chunk 581 fs/reiserfs/xattr.c memcpy(data + skip, buffer + buffer_pos, chunk); chunk 583 fs/reiserfs/xattr.c page_offset + chunk + chunk 589 fs/reiserfs/xattr.c buffer_pos += chunk; chunk 590 fs/reiserfs/xattr.c file_pos += chunk; chunk 699 fs/reiserfs/xattr.c size_t chunk; chunk 704 fs/reiserfs/xattr.c chunk = PAGE_SIZE; chunk 706 fs/reiserfs/xattr.c chunk = isize - file_pos; chunk 720 fs/reiserfs/xattr.c chunk -= skip; chunk 734 fs/reiserfs/xattr.c memcpy(buffer + buffer_pos, data + skip, chunk); chunk 737 fs/reiserfs/xattr.c file_pos += chunk; chunk 738 fs/reiserfs/xattr.c buffer_pos += chunk; chunk 237 include/drm/drm_dp_mst_helper.h u8 chunk[48]; chunk 392 include/drm/drm_dp_mst_helper.h u8 chunk[48]; chunk 1136 include/linux/dmaengine.h struct data_chunk *chunk) chunk 1139 include/linux/dmaengine.h chunk->icg, chunk->dst_icg); chunk 1143 include/linux/dmaengine.h struct data_chunk *chunk) chunk 1146 include/linux/dmaengine.h chunk->icg, chunk->src_icg); chunk 88 include/net/sctp/auth.h int sctp_auth_send_cid(enum sctp_cid chunk, chunk 90 include/net/sctp/auth.h int sctp_auth_recv_cid(enum sctp_cid chunk, chunk 116 include/net/sctp/command.h struct sctp_chunk *chunk; chunk 157 include/net/sctp/command.h SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk) chunk 116 include/net/sctp/constants.h enum sctp_cid chunk; chunk 127 include/net/sctp/constants.h SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk) chunk 418 include/net/sctp/sctp.h static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) chunk 420 include/net/sctp/sctp.h return !list_empty(&chunk->list); chunk 428 include/net/sctp/sctp.h #define sctp_walk_params(pos, chunk, member)\ chunk 429 include/net/sctp/sctp.h _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) chunk 431 include/net/sctp/sctp.h #define _sctp_walk_params(pos, chunk, end, member)\ chunk 432 include/net/sctp/sctp.h for (pos.v = chunk->member;\ chunk 434 include/net/sctp/sctp.h (void *)chunk + end) &&\ chunk 435 include/net/sctp/sctp.h pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ chunk 451 include/net/sctp/sctp.h #define sctp_walk_fwdtsn(pos, chunk)\ chunk 452 include/net/sctp/sctp.h _sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk)) chunk 454 include/net/sctp/sctp.h #define _sctp_walk_fwdtsn(pos, chunk, end)\ chunk 455 include/net/sctp/sctp.h for (pos = chunk->subh.fwdtsn_hdr->skip;\ chunk 456 include/net/sctp/sctp.h (void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\ chunk 330 include/net/sctp/sm.h static inline __u16 sctp_data_size(struct sctp_chunk *chunk) chunk 334 include/net/sctp/sm.h size = ntohs(chunk->chunk_hdr->length); chunk 335 include/net/sctp/sm.h size -= sctp_datachk_len(&chunk->asoc->stream); chunk 380 include/net/sctp/sm.h if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag) chunk 414 include/net/sctp/sm.h if ((!sctp_test_T_bit(chunk) && chunk 415 include/net/sctp/sm.h (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) || chunk 416 include/net/sctp/sm.h (sctp_test_T_bit(chunk) && asoc->c.peer_vtag && chunk 417 include/net/sctp/sm.h (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) { chunk 26 include/net/sctp/stream_interleave.h void (*assign_number)(struct sctp_chunk *chunk); chunk 27 include/net/sctp/stream_interleave.h bool (*validate_data)(struct sctp_chunk *chunk); chunk 29 include/net/sctp/stream_interleave.h struct sctp_chunk *chunk, gfp_t gfp); chunk 33 include/net/sctp/stream_interleave.h struct sctp_chunk *chunk, gfp_t gfp); chunk 38 include/net/sctp/stream_interleave.h bool (*validate_ftsn)(struct sctp_chunk *chunk); chunk 41 include/net/sctp/stream_interleave.h struct sctp_chunk *chunk); chunk 37 include/net/sctp/stream_sched.h void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk); chunk 666 include/net/sctp/structs.h #define sctp_chunk_retransmitted(chunk) (chunk->sent_count > 1) chunk 669 include/net/sctp/structs.h int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, chunk 678 include/net/sctp/structs.h const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); chunk 741 include/net/sctp/structs.h struct sctp_chunk *chunk, chunk 744 include/net/sctp/structs.h struct sctp_chunk *chunk); chunk 1101 include/net/sctp/structs.h void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t); chunk 1129 include/net/sctp/structs.h struct sctp_chunk *chunk; chunk 1141 include/net/sctp/structs.h const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; chunk 1143 include/net/sctp/structs.h return chunk->head_skb ? : skb; chunk 1377 include/net/sctp/structs.h struct sctp_chunk *chunk, struct sctp_chunk **err_chunk); chunk 1378 include/net/sctp/structs.h int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk, chunk 40 include/net/sctp/ulpevent.h struct sctp_chunk *chunk; chunk 80 include/net/sctp/ulpevent.h struct sctp_chunk *chunk, chunk 93 include/net/sctp/ulpevent.h struct sctp_chunk *chunk, chunk 98 include/net/sctp/ulpevent.h struct sctp_chunk *chunk, chunk 117 include/net/sctp/ulpevent.h struct sctp_chunk *chunk, chunk 1520 include/trace/events/rpcrdma.h DEFINE_ERROR_EVENT(chunk); chunk 54 include/trace/events/sctp.h struct sctp_chunk *chunk), chunk 56 include/trace/events/sctp.h TP_ARGS(ep, asoc, chunk), chunk 69 include/trace/events/sctp.h struct sk_buff *skb = chunk->skb; chunk 28 init/do_mounts_md.c int chunk; chunk 92 init/do_mounts_md.c md_setup_args[ent].chunk = 1 << (factor+12); chunk 202 init/do_mounts_md.c ainfo.chunk_size = md_setup_args[ent].chunk; chunk 277 kernel/audit.h extern void audit_put_chunk(struct audit_chunk *chunk); chunk 278 kernel/audit.h extern bool audit_tree_match(struct audit_chunk *chunk, chunk 42 kernel/audit_tree.c struct audit_chunk *chunk; chunk 128 kernel/audit_tree.c static void free_chunk(struct audit_chunk *chunk) chunk 132 kernel/audit_tree.c for (i = 0; i < chunk->count; i++) { chunk 133 kernel/audit_tree.c if (chunk->owners[i].owner) chunk 134 kernel/audit_tree.c put_tree(chunk->owners[i].owner); chunk 136 kernel/audit_tree.c kfree(chunk); chunk 139 kernel/audit_tree.c void audit_put_chunk(struct audit_chunk *chunk) chunk 141 kernel/audit_tree.c if (atomic_long_dec_and_test(&chunk->refs)) chunk 142 kernel/audit_tree.c free_chunk(chunk); chunk 147 kernel/audit_tree.c struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); chunk 148 kernel/audit_tree.c audit_put_chunk(chunk); chunk 156 kernel/audit_tree.c static void audit_mark_put_chunk(struct audit_chunk *chunk) chunk 158 kernel/audit_tree.c call_rcu(&chunk->head, __put_chunk); chunk 168 kernel/audit_tree.c return audit_mark(mark)->chunk; chunk 190 kernel/audit_tree.c struct audit_chunk *chunk; chunk 195 kernel/audit_tree.c chunk = kzalloc(size, GFP_KERNEL); chunk 196 kernel/audit_tree.c if (!chunk) chunk 199 kernel/audit_tree.c INIT_LIST_HEAD(&chunk->hash); chunk 200 kernel/audit_tree.c INIT_LIST_HEAD(&chunk->trees); chunk 201 kernel/audit_tree.c chunk->count = count; chunk 202 kernel/audit_tree.c atomic_long_set(&chunk->refs, 1); chunk 204 kernel/audit_tree.c INIT_LIST_HEAD(&chunk->owners[i].list); chunk 205 kernel/audit_tree.c chunk->owners[i].index = i; chunk 207 kernel/audit_tree.c return chunk; chunk 228 kernel/audit_tree.c static void insert_hash(struct audit_chunk *chunk) chunk 238 kernel/audit_tree.c WARN_ON_ONCE(!chunk->key); chunk 239 kernel/audit_tree.c list = chunk_hash(chunk->key); chunk 240 kernel/audit_tree.c list_add_rcu(&chunk->hash, list); chunk 263 kernel/audit_tree.c bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) chunk 266 kernel/audit_tree.c for (n = 0; n < chunk->count; n++) chunk 267 kernel/audit_tree.c if (chunk->owners[n].owner == tree) chunk 282 kernel/audit_tree.c struct audit_chunk *chunk) chunk 288 kernel/audit_tree.c audit_mark(mark)->chunk = chunk; chunk 289 kernel/audit_tree.c if (chunk) chunk 290 kernel/audit_tree.c chunk->mark = mark; chunk 327 kernel/audit_tree.c static void remove_chunk_node(struct audit_chunk *chunk, struct node *p) chunk 331 kernel/audit_tree.c if (owner->root == chunk) { chunk 340 kernel/audit_tree.c static int chunk_count_trees(struct audit_chunk *chunk) chunk 345 kernel/audit_tree.c for (i = 0; i < chunk->count; i++) chunk 346 kernel/audit_tree.c if (chunk->owners[i].owner) chunk 351 kernel/audit_tree.c static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark) chunk 362 kernel/audit_tree.c mark_chunk(mark) != chunk) chunk 365 kernel/audit_tree.c size = chunk_count_trees(chunk); chunk 368 kernel/audit_tree.c list_del_init(&chunk->trees); chunk 369 kernel/audit_tree.c list_del_rcu(&chunk->hash); chunk 374 kernel/audit_tree.c audit_mark_put_chunk(chunk); chunk 388 kernel/audit_tree.c replace_chunk(new, chunk); chunk 391 kernel/audit_tree.c audit_mark_put_chunk(chunk); chunk 402 kernel/audit_tree.c struct audit_chunk *chunk = alloc_chunk(1); chunk 404 kernel/audit_tree.c if (!chunk) { chunk 412 kernel/audit_tree.c kfree(chunk); chunk 419 kernel/audit_tree.c kfree(chunk); chunk 430 kernel/audit_tree.c kfree(chunk); chunk 433 kernel/audit_tree.c replace_mark_chunk(mark, chunk); chunk 434 kernel/audit_tree.c chunk->owners[0].index = (1U << 31); chunk 435 kernel/audit_tree.c chunk->owners[0].owner = tree; chunk 437 kernel/audit_tree.c list_add(&chunk->owners[0].list, &tree->chunks); chunk 439 kernel/audit_tree.c tree->root = chunk; chunk 440 kernel/audit_tree.c list_add(&tree->same_root, &chunk->trees); chunk 442 kernel/audit_tree.c chunk->key = inode_to_key(inode); chunk 447 kernel/audit_tree.c insert_hash(chunk); chunk 463 kernel/audit_tree.c struct audit_chunk *chunk, *old; chunk 490 kernel/audit_tree.c chunk = alloc_chunk(old->count + 1); chunk 491 kernel/audit_tree.c if (!chunk) { chunk 502 kernel/audit_tree.c kfree(chunk); chunk 505 kernel/audit_tree.c p = &chunk->owners[chunk->count - 1]; chunk 506 kernel/audit_tree.c p->index = (chunk->count - 1) | (1U<<31); chunk 511 kernel/audit_tree.c tree->root = chunk; chunk 512 kernel/audit_tree.c list_add(&tree->same_root, &chunk->trees); chunk 518 kernel/audit_tree.c replace_chunk(chunk, old); chunk 576 kernel/audit_tree.c struct audit_chunk *chunk; chunk 583 kernel/audit_tree.c chunk = find_chunk(p); chunk 584 kernel/audit_tree.c mark = chunk->mark; chunk 585 kernel/audit_tree.c remove_chunk_node(chunk, p); chunk 592 kernel/audit_tree.c untag_chunk(chunk, mark); chunk 709 kernel/audit_tree.c struct audit_chunk *chunk = find_chunk(node); chunk 713 kernel/audit_tree.c (void *)(chunk->key), chunk 1006 kernel/audit_tree.c static void evict_chunk(struct audit_chunk *chunk) chunk 1015 kernel/audit_tree.c while (!list_empty(&chunk->trees)) { chunk 1016 kernel/audit_tree.c owner = list_entry(chunk->trees.next, chunk 1031 kernel/audit_tree.c list_del_rcu(&chunk->hash); chunk 1032 kernel/audit_tree.c for (n = 0; n < chunk->count; n++) chunk 1033 kernel/audit_tree.c list_del_init(&chunk->owners[n].list); chunk 1052 kernel/audit_tree.c struct audit_chunk *chunk; chunk 1056 kernel/audit_tree.c chunk = mark_chunk(mark); chunk 1060 kernel/audit_tree.c if (chunk) { chunk 1061 kernel/audit_tree.c evict_chunk(chunk); chunk 1062 kernel/audit_tree.c audit_mark_put_chunk(chunk); chunk 211 kernel/auditsc.c static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) chunk 216 kernel/auditsc.c p->c[--left] = chunk; chunk 224 kernel/auditsc.c p->c[30] = chunk; chunk 1740 kernel/auditsc.c struct audit_chunk *chunk; chunk 1748 kernel/auditsc.c chunk = audit_tree_lookup(inode); chunk 1750 kernel/auditsc.c if (!chunk) chunk 1752 kernel/auditsc.c if (likely(put_tree_ref(context, chunk))) chunk 1757 kernel/auditsc.c audit_put_chunk(chunk); chunk 1761 kernel/auditsc.c put_tree_ref(context, chunk); chunk 1784 kernel/auditsc.c struct audit_chunk *chunk; chunk 1785 kernel/auditsc.c chunk = audit_tree_lookup(inode); chunk 1786 kernel/auditsc.c if (chunk) { chunk 1787 kernel/auditsc.c if (unlikely(!put_tree_ref(context, chunk))) { chunk 1788 kernel/auditsc.c drop = chunk; chunk 381 lib/bitmap.c u32 chunk; chunk 388 lib/bitmap.c chunk = 0; chunk 424 lib/bitmap.c if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) chunk 427 lib/bitmap.c chunk = (chunk << 4) | hex_to_bin(c); chunk 432 lib/bitmap.c if (nchunks == 0 && chunk == 0) chunk 436 lib/bitmap.c *maskp |= chunk; chunk 438 lib/bitmap.c nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ; chunk 38 lib/genalloc.c static inline size_t chunk_size(const struct gen_pool_chunk *chunk) chunk 40 lib/genalloc.c return chunk->end_addr - chunk->start_addr + 1; chunk 185 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 190 lib/genalloc.c chunk = vzalloc_node(nbytes, nid); chunk 191 lib/genalloc.c if (unlikely(chunk == NULL)) chunk 194 lib/genalloc.c chunk->phys_addr = phys; chunk 195 lib/genalloc.c chunk->start_addr = virt; chunk 196 lib/genalloc.c chunk->end_addr = virt + size - 1; chunk 197 lib/genalloc.c chunk->owner = owner; chunk 198 lib/genalloc.c atomic_long_set(&chunk->avail, size); chunk 201 lib/genalloc.c list_add_rcu(&chunk->next_chunk, &pool->chunks); chunk 217 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 221 lib/genalloc.c list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { chunk 222 lib/genalloc.c if (addr >= chunk->start_addr && addr <= chunk->end_addr) { chunk 223 lib/genalloc.c paddr = chunk->phys_addr + (addr - chunk->start_addr); chunk 243 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 248 lib/genalloc.c chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); chunk 249 lib/genalloc.c list_del(&chunk->next_chunk); chunk 251 lib/genalloc.c end_bit = chunk_size(chunk) >> order; chunk 252 lib/genalloc.c bit = find_next_bit(chunk->bits, end_bit, 0); chunk 255 lib/genalloc.c vfree(chunk); chunk 278 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 295 lib/genalloc.c list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { chunk 296 lib/genalloc.c if (size > atomic_long_read(&chunk->avail)) chunk 300 lib/genalloc.c end_bit = chunk_size(chunk) >> order; chunk 302 lib/genalloc.c start_bit = algo(chunk->bits, end_bit, start_bit, chunk 303 lib/genalloc.c nbits, data, pool, chunk->start_addr); chunk 306 lib/genalloc.c remain = bitmap_set_ll(chunk->bits, start_bit, nbits); chunk 308 lib/genalloc.c remain = bitmap_clear_ll(chunk->bits, start_bit, chunk 314 lib/genalloc.c addr = chunk->start_addr + ((unsigned long)start_bit << order); chunk 316 lib/genalloc.c atomic_long_sub(size, &chunk->avail); chunk 318 lib/genalloc.c *owner = chunk->owner; chunk 488 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 501 lib/genalloc.c list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { chunk 502 lib/genalloc.c if (addr >= chunk->start_addr && addr <= chunk->end_addr) { chunk 503 lib/genalloc.c BUG_ON(addr + size - 1 > chunk->end_addr); chunk 504 lib/genalloc.c start_bit = (addr - chunk->start_addr) >> order; chunk 505 lib/genalloc.c remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); chunk 508 lib/genalloc.c atomic_long_add(size, &chunk->avail); chunk 510 lib/genalloc.c *owner = chunk->owner; chunk 530 lib/genalloc.c void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), chunk 533 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 536 lib/genalloc.c list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) chunk 537 lib/genalloc.c func(pool, chunk, data); chunk 556 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 559 lib/genalloc.c list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { chunk 560 lib/genalloc.c if (start >= chunk->start_addr && start <= chunk->end_addr) { chunk 561 lib/genalloc.c if (end <= chunk->end_addr) { chunk 579 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 583 lib/genalloc.c list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) chunk 584 lib/genalloc.c avail += atomic_long_read(&chunk->avail); chunk 598 lib/genalloc.c struct gen_pool_chunk *chunk; chunk 602 lib/genalloc.c list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) chunk 603 lib/genalloc.c size += chunk_size(chunk); chunk 554 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - off); chunk 555 lib/iov_iter.c memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk); chunk 557 lib/iov_iter.c i->iov_offset = off + chunk; chunk 558 lib/iov_iter.c n -= chunk; chunk 559 lib/iov_iter.c addr += chunk; chunk 588 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - r); chunk 590 lib/iov_iter.c sum = csum_and_memcpy(p + r, addr, chunk, sum, off); chunk 593 lib/iov_iter.c i->iov_offset = r + chunk; chunk 594 lib/iov_iter.c n -= chunk; chunk 595 lib/iov_iter.c off += chunk; chunk 596 lib/iov_iter.c addr += chunk; chunk 658 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - off); chunk 662 lib/iov_iter.c chunk); chunk 664 lib/iov_iter.c i->iov_offset = off + chunk - rem; chunk 665 lib/iov_iter.c xfer += chunk - rem; chunk 668 lib/iov_iter.c n -= chunk; chunk 669 lib/iov_iter.c addr += chunk; chunk 939 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - off); chunk 940 lib/iov_iter.c memzero_page(pipe->bufs[idx].page, off, chunk); chunk 942 lib/iov_iter.c i->iov_offset = off + chunk; chunk 943 lib/iov_iter.c n -= chunk; chunk 80 mm/percpu-internal.h static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk) chunk 82 mm/percpu-internal.h return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; chunk 104 mm/percpu-internal.h static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk) chunk 106 mm/percpu-internal.h return pcpu_nr_pages_to_map_bits(chunk->nr_pages); chunk 146 mm/percpu-internal.h static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) chunk 159 mm/percpu-internal.h chunk->nr_alloc++; chunk 160 mm/percpu-internal.h chunk->max_alloc_size = max(chunk->max_alloc_size, size); chunk 170 mm/percpu-internal.h static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) chunk 177 mm/percpu-internal.h chunk->nr_alloc--; chunk 214 mm/percpu-internal.h static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) chunk 218 mm/percpu-internal.h static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) chunk 35 mm/percpu-km.c static int pcpu_populate_chunk(struct pcpu_chunk *chunk, chunk 41 mm/percpu-km.c static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, chunk 50 mm/percpu-km.c struct pcpu_chunk *chunk; chunk 55 mm/percpu-km.c chunk = pcpu_alloc_chunk(gfp); chunk 56 mm/percpu-km.c if (!chunk) chunk 61 mm/percpu-km.c pcpu_free_chunk(chunk); chunk 66 mm/percpu-km.c pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk 68 mm/percpu-km.c chunk->data = pages; chunk 69 mm/percpu-km.c chunk->base_addr = page_address(pages); chunk 72 mm/percpu-km.c pcpu_chunk_populated(chunk, 0, nr_pages); chunk 76 mm/percpu-km.c trace_percpu_create_chunk(chunk->base_addr); chunk 78 mm/percpu-km.c return chunk; chunk 81 mm/percpu-km.c static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) chunk 85 mm/percpu-km.c if (!chunk) chunk 89 mm/percpu-km.c trace_percpu_destroy_chunk(chunk->base_addr); chunk 91 mm/percpu-km.c if (chunk->data) chunk 92 mm/percpu-km.c __free_pages(chunk->data, order_base_2(nr_pages)); chunk 93 mm/percpu-km.c pcpu_free_chunk(chunk); chunk 35 mm/percpu-stats.c struct pcpu_chunk *chunk; chunk 40 mm/percpu-stats.c list_for_each_entry(chunk, &pcpu_slot[slot], list) chunk 41 mm/percpu-stats.c max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); chunk 52 mm/percpu-stats.c static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, chunk 55 mm/percpu-stats.c struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 69 mm/percpu-stats.c last_alloc = find_last_bit(chunk->alloc_map, chunk 70 mm/percpu-stats.c pcpu_chunk_map_bits(chunk) - chunk 71 mm/percpu-stats.c chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1); chunk 72 mm/percpu-stats.c last_alloc = test_bit(last_alloc, chunk->alloc_map) ? chunk 76 mm/percpu-stats.c start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; chunk 88 mm/percpu-stats.c if (test_bit(start, chunk->alloc_map)) { chunk 89 mm/percpu-stats.c end = find_next_bit(chunk->bound_map, last_alloc, chunk 93 mm/percpu-stats.c end = find_next_bit(chunk->alloc_map, last_alloc, chunk 121 mm/percpu-stats.c P("nr_alloc", chunk->nr_alloc); chunk 122 mm/percpu-stats.c P("max_alloc_size", chunk->max_alloc_size); chunk 123 mm/percpu-stats.c P("empty_pop_pages", chunk->nr_empty_pop_pages); chunk 125 mm/percpu-stats.c P("free_bytes", chunk->free_bytes); chunk 137 mm/percpu-stats.c struct pcpu_chunk *chunk; chunk 206 mm/percpu-stats.c list_for_each_entry(chunk, &pcpu_slot[slot], list) { chunk 207 mm/percpu-stats.c if (chunk == pcpu_first_chunk) { chunk 209 mm/percpu-stats.c chunk_map_stats(m, chunk, buffer); chunk 214 mm/percpu-stats.c chunk_map_stats(m, chunk, buffer); chunk 12 mm/percpu-vm.c static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, chunk 16 mm/percpu-vm.c WARN_ON(chunk->immutable); chunk 18 mm/percpu-vm.c return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); chunk 53 mm/percpu-vm.c static void pcpu_free_pages(struct pcpu_chunk *chunk, chunk 81 mm/percpu-vm.c static int pcpu_alloc_pages(struct pcpu_chunk *chunk, chunk 126 mm/percpu-vm.c static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, chunk 130 mm/percpu-vm.c pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), chunk 131 mm/percpu-vm.c pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); chunk 152 mm/percpu-vm.c static void pcpu_unmap_pages(struct pcpu_chunk *chunk, chunk 162 mm/percpu-vm.c page = pcpu_chunk_page(chunk, cpu, i); chunk 166 mm/percpu-vm.c __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), chunk 184 mm/percpu-vm.c static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, chunk 188 mm/percpu-vm.c pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), chunk 189 mm/percpu-vm.c pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); chunk 213 mm/percpu-vm.c static int pcpu_map_pages(struct pcpu_chunk *chunk, chunk 220 mm/percpu-vm.c err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), chunk 228 mm/percpu-vm.c chunk); chunk 235 mm/percpu-vm.c __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), chunk 238 mm/percpu-vm.c pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); chunk 254 mm/percpu-vm.c static void pcpu_post_map_flush(struct pcpu_chunk *chunk, chunk 258 mm/percpu-vm.c pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), chunk 259 mm/percpu-vm.c pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); chunk 275 mm/percpu-vm.c static int pcpu_populate_chunk(struct pcpu_chunk *chunk, chunk 284 mm/percpu-vm.c if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) chunk 287 mm/percpu-vm.c if (pcpu_map_pages(chunk, pages, page_start, page_end)) { chunk 288 mm/percpu-vm.c pcpu_free_pages(chunk, pages, page_start, page_end); chunk 291 mm/percpu-vm.c pcpu_post_map_flush(chunk, page_start, page_end); chunk 308 mm/percpu-vm.c static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, chunk 322 mm/percpu-vm.c pcpu_pre_unmap_flush(chunk, page_start, page_end); chunk 324 mm/percpu-vm.c pcpu_unmap_pages(chunk, pages, page_start, page_end); chunk 328 mm/percpu-vm.c pcpu_free_pages(chunk, pages, page_start, page_end); chunk 333 mm/percpu-vm.c struct pcpu_chunk *chunk; chunk 336 mm/percpu-vm.c chunk = pcpu_alloc_chunk(gfp); chunk 337 mm/percpu-vm.c if (!chunk) chunk 343 mm/percpu-vm.c pcpu_free_chunk(chunk); chunk 347 mm/percpu-vm.c chunk->data = vms; chunk 348 mm/percpu-vm.c chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; chunk 351 mm/percpu-vm.c trace_percpu_create_chunk(chunk->base_addr); chunk 353 mm/percpu-vm.c return chunk; chunk 356 mm/percpu-vm.c static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) chunk 358 mm/percpu-vm.c if (!chunk) chunk 362 mm/percpu-vm.c trace_percpu_destroy_chunk(chunk->base_addr); chunk 364 mm/percpu-vm.c if (chunk->data) chunk 365 mm/percpu-vm.c pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); chunk 366 mm/percpu-vm.c pcpu_free_chunk(chunk); chunk 206 mm/percpu.c static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) chunk 210 mm/percpu.c if (!chunk) chunk 213 mm/percpu.c start_addr = chunk->base_addr + chunk->start_offset; chunk 214 mm/percpu.c end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - chunk 215 mm/percpu.c chunk->end_offset; chunk 233 mm/percpu.c static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) chunk 235 mm/percpu.c const struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 237 mm/percpu.c if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk 266 mm/percpu.c static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, chunk 269 mm/percpu.c return (unsigned long)chunk->base_addr + chunk 304 mm/percpu.c static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) chunk 306 mm/percpu.c return chunk->alloc_map + chunk 364 mm/percpu.c static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, chunk 372 mm/percpu.c for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); chunk 419 mm/percpu.c static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, chunk 427 mm/percpu.c for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); chunk 467 mm/percpu.c *bit_off = pcpu_chunk_map_bits(chunk); chunk 476 mm/percpu.c #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ chunk 477 mm/percpu.c for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ chunk 478 mm/percpu.c (bit_off) < pcpu_chunk_map_bits((chunk)); \ chunk 480 mm/percpu.c pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) chunk 482 mm/percpu.c #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ chunk 483 mm/percpu.c for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ chunk 485 mm/percpu.c (bit_off) < pcpu_chunk_map_bits((chunk)); \ chunk 487 mm/percpu.c pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ chunk 525 mm/percpu.c static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, chunk 528 mm/percpu.c if (chunk != pcpu_reserved_chunk) { chunk 530 mm/percpu.c list_move(&chunk->list, &pcpu_slot[slot]); chunk 532 mm/percpu.c list_move_tail(&chunk->list, &pcpu_slot[slot]); chunk 536 mm/percpu.c static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) chunk 538 mm/percpu.c __pcpu_chunk_move(chunk, slot, true); chunk 554 mm/percpu.c static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) chunk 556 mm/percpu.c int nslot = pcpu_chunk_slot(chunk); chunk 559 mm/percpu.c __pcpu_chunk_move(chunk, nslot, oslot < nslot); chunk 571 mm/percpu.c static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) chunk 573 mm/percpu.c chunk->nr_empty_pop_pages += nr; chunk 574 mm/percpu.c if (chunk != pcpu_reserved_chunk) chunk 685 mm/percpu.c static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, chunk 697 mm/percpu.c block = chunk->md_blocks + s_index; chunk 700 mm/percpu.c l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); chunk 718 mm/percpu.c static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) chunk 720 mm/percpu.c struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 735 mm/percpu.c pcpu_for_each_md_free_region(chunk, bit_off, bits) { chunk 748 mm/percpu.c static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) chunk 750 mm/percpu.c struct pcpu_block_md *block = chunk->md_blocks + index; chunk 751 mm/percpu.c unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); chunk 784 mm/percpu.c static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, chunk 787 mm/percpu.c struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 804 mm/percpu.c s_block = chunk->md_blocks + s_index; chunk 805 mm/percpu.c e_block = chunk->md_blocks + e_index; chunk 818 mm/percpu.c pcpu_index_alloc_map(chunk, s_index), chunk 836 mm/percpu.c pcpu_block_refresh_hint(chunk, s_index); chunk 859 mm/percpu.c pcpu_index_alloc_map(chunk, e_index), chunk 872 mm/percpu.c pcpu_block_refresh_hint(chunk, e_index); chunk 891 mm/percpu.c pcpu_update_empty_pages(chunk, -nr_empty_pages); chunk 910 mm/percpu.c pcpu_chunk_refresh_hint(chunk, false); chunk 931 mm/percpu.c static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, chunk 951 mm/percpu.c s_block = chunk->md_blocks + s_index; chunk 952 mm/percpu.c e_block = chunk->md_blocks + e_index; chunk 974 mm/percpu.c int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), chunk 983 mm/percpu.c end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), chunk 1012 mm/percpu.c pcpu_update_empty_pages(chunk, nr_empty_pages); chunk 1021 mm/percpu.c pcpu_chunk_refresh_hint(chunk, true); chunk 1023 mm/percpu.c pcpu_block_update(&chunk->chunk_md, chunk 1041 mm/percpu.c static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, chunk 1050 mm/percpu.c pcpu_next_unpop(chunk->populated, &rs, &re, page_end); chunk 1077 mm/percpu.c static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, chunk 1080 mm/percpu.c struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 1096 mm/percpu.c pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { chunk 1097 mm/percpu.c if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, chunk 1105 mm/percpu.c if (bit_off == pcpu_chunk_map_bits(chunk)) chunk 1186 mm/percpu.c static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, chunk 1189 mm/percpu.c struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 1196 mm/percpu.c oslot = pcpu_chunk_slot(chunk); chunk 1202 mm/percpu.c pcpu_chunk_map_bits(chunk)); chunk 1203 mm/percpu.c bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, chunk 1209 mm/percpu.c pcpu_block_update_scan(chunk, area_off, area_bits); chunk 1212 mm/percpu.c bitmap_set(chunk->alloc_map, bit_off, alloc_bits); chunk 1215 mm/percpu.c set_bit(bit_off, chunk->bound_map); chunk 1216 mm/percpu.c bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); chunk 1217 mm/percpu.c set_bit(bit_off + alloc_bits, chunk->bound_map); chunk 1219 mm/percpu.c chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; chunk 1224 mm/percpu.c chunk->alloc_map, chunk 1225 mm/percpu.c pcpu_chunk_map_bits(chunk), chunk 1228 mm/percpu.c pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); chunk 1230 mm/percpu.c pcpu_chunk_relocate(chunk, oslot); chunk 1243 mm/percpu.c static void pcpu_free_area(struct pcpu_chunk *chunk, int off) chunk 1245 mm/percpu.c struct pcpu_block_md *chunk_md = &chunk->chunk_md; chunk 1249 mm/percpu.c pcpu_stats_area_dealloc(chunk); chunk 1251 mm/percpu.c oslot = pcpu_chunk_slot(chunk); chunk 1256 mm/percpu.c end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), chunk 1259 mm/percpu.c bitmap_clear(chunk->alloc_map, bit_off, bits); chunk 1262 mm/percpu.c chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE; chunk 1267 mm/percpu.c pcpu_block_update_hint_free(chunk, bit_off, bits); chunk 1269 mm/percpu.c pcpu_chunk_relocate(chunk, oslot); chunk 1282 mm/percpu.c static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) chunk 1287 mm/percpu.c pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); chunk 1289 mm/percpu.c for (md_block = chunk->md_blocks; chunk 1290 mm/percpu.c md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); chunk 1311 mm/percpu.c struct pcpu_chunk *chunk; chunk 1332 mm/percpu.c chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); chunk 1333 mm/percpu.c if (!chunk) chunk 1337 mm/percpu.c INIT_LIST_HEAD(&chunk->list); chunk 1339 mm/percpu.c chunk->base_addr = (void *)aligned_addr; chunk 1340 mm/percpu.c chunk->start_offset = start_offset; chunk 1341 mm/percpu.c chunk->end_offset = region_size - chunk->start_offset - map_size; chunk 1343 mm/percpu.c chunk->nr_pages = region_size >> PAGE_SHIFT; chunk 1344 mm/percpu.c region_bits = pcpu_chunk_map_bits(chunk); chunk 1346 mm/percpu.c alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); chunk 1347 mm/percpu.c chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); chunk 1348 mm/percpu.c if (!chunk->alloc_map) chunk 1353 mm/percpu.c BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); chunk 1354 mm/percpu.c chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); chunk 1355 mm/percpu.c if (!chunk->bound_map) chunk 1359 mm/percpu.c alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); chunk 1360 mm/percpu.c chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); chunk 1361 mm/percpu.c if (!chunk->md_blocks) chunk 1365 mm/percpu.c pcpu_init_md_blocks(chunk); chunk 1368 mm/percpu.c chunk->immutable = true; chunk 1369 mm/percpu.c bitmap_fill(chunk->populated, chunk->nr_pages); chunk 1370 mm/percpu.c chunk->nr_populated = chunk->nr_pages; chunk 1371 mm/percpu.c chunk->nr_empty_pop_pages = chunk->nr_pages; chunk 1373 mm/percpu.c chunk->free_bytes = map_size; chunk 1375 mm/percpu.c if (chunk->start_offset) { chunk 1377 mm/percpu.c offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; chunk 1378 mm/percpu.c bitmap_set(chunk->alloc_map, 0, offset_bits); chunk 1379 mm/percpu.c set_bit(0, chunk->bound_map); chunk 1380 mm/percpu.c set_bit(offset_bits, chunk->bound_map); chunk 1382 mm/percpu.c chunk->chunk_md.first_free = offset_bits; chunk 1384 mm/percpu.c pcpu_block_update_hint_alloc(chunk, 0, offset_bits); chunk 1387 mm/percpu.c if (chunk->end_offset) { chunk 1389 mm/percpu.c offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; chunk 1390 mm/percpu.c bitmap_set(chunk->alloc_map, chunk 1391 mm/percpu.c pcpu_chunk_map_bits(chunk) - offset_bits, chunk 1394 mm/percpu.c chunk->bound_map); chunk 1395 mm/percpu.c set_bit(region_bits, chunk->bound_map); chunk 1397 mm/percpu.c pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) chunk 1401 mm/percpu.c return chunk; chunk 1406 mm/percpu.c struct pcpu_chunk *chunk; chunk 1409 mm/percpu.c chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); chunk 1410 mm/percpu.c if (!chunk) chunk 1413 mm/percpu.c INIT_LIST_HEAD(&chunk->list); chunk 1414 mm/percpu.c chunk->nr_pages = pcpu_unit_pages; chunk 1415 mm/percpu.c region_bits = pcpu_chunk_map_bits(chunk); chunk 1417 mm/percpu.c chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * chunk 1418 mm/percpu.c sizeof(chunk->alloc_map[0]), gfp); chunk 1419 mm/percpu.c if (!chunk->alloc_map) chunk 1422 mm/percpu.c chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * chunk 1423 mm/percpu.c sizeof(chunk->bound_map[0]), gfp); chunk 1424 mm/percpu.c if (!chunk->bound_map) chunk 1427 mm/percpu.c chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * chunk 1428 mm/percpu.c sizeof(chunk->md_blocks[0]), gfp); chunk 1429 mm/percpu.c if (!chunk->md_blocks) chunk 1432 mm/percpu.c pcpu_init_md_blocks(chunk); chunk 1435 mm/percpu.c chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; chunk 1437 mm/percpu.c return chunk; chunk 1440 mm/percpu.c pcpu_mem_free(chunk->bound_map); chunk 1442 mm/percpu.c pcpu_mem_free(chunk->alloc_map); chunk 1444 mm/percpu.c pcpu_mem_free(chunk); chunk 1449 mm/percpu.c static void pcpu_free_chunk(struct pcpu_chunk *chunk) chunk 1451 mm/percpu.c if (!chunk) chunk 1453 mm/percpu.c pcpu_mem_free(chunk->md_blocks); chunk 1454 mm/percpu.c pcpu_mem_free(chunk->bound_map); chunk 1455 mm/percpu.c pcpu_mem_free(chunk->alloc_map); chunk 1456 mm/percpu.c pcpu_mem_free(chunk); chunk 1472 mm/percpu.c static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, chunk 1479 mm/percpu.c bitmap_set(chunk->populated, page_start, nr); chunk 1480 mm/percpu.c chunk->nr_populated += nr; chunk 1483 mm/percpu.c pcpu_update_empty_pages(chunk, nr); chunk 1496 mm/percpu.c static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, chunk 1503 mm/percpu.c bitmap_clear(chunk->populated, page_start, nr); chunk 1504 mm/percpu.c chunk->nr_populated -= nr; chunk 1507 mm/percpu.c pcpu_update_empty_pages(chunk, -nr); chunk 1525 mm/percpu.c static int pcpu_populate_chunk(struct pcpu_chunk *chunk, chunk 1527 mm/percpu.c static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, chunk 1530 mm/percpu.c static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); chunk 1594 mm/percpu.c struct pcpu_chunk *chunk, *next; chunk 1637 mm/percpu.c chunk = pcpu_reserved_chunk; chunk 1639 mm/percpu.c off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); chunk 1645 mm/percpu.c off = pcpu_alloc_area(chunk, bits, bit_align, off); chunk 1656 mm/percpu.c list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) { chunk 1657 mm/percpu.c off = pcpu_find_block_fit(chunk, bits, bit_align, chunk 1661 mm/percpu.c pcpu_chunk_move(chunk, 0); chunk 1665 mm/percpu.c off = pcpu_alloc_area(chunk, bits, bit_align, off); chunk 1685 mm/percpu.c chunk = pcpu_create_chunk(pcpu_gfp); chunk 1686 mm/percpu.c if (!chunk) { chunk 1692 mm/percpu.c pcpu_chunk_relocate(chunk, -1); chunk 1700 mm/percpu.c pcpu_stats_area_alloc(chunk, size); chunk 1710 mm/percpu.c pcpu_for_each_unpop_region(chunk->populated, rs, re, chunk 1712 mm/percpu.c WARN_ON(chunk->immutable); chunk 1714 mm/percpu.c ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); chunk 1718 mm/percpu.c pcpu_free_area(chunk, off); chunk 1722 mm/percpu.c pcpu_chunk_populated(chunk, rs, re); chunk 1734 mm/percpu.c memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); chunk 1736 mm/percpu.c ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); chunk 1740 mm/percpu.c chunk->base_addr, off, ptr); chunk 1838 mm/percpu.c struct pcpu_chunk *chunk, *next; chunk 1848 mm/percpu.c list_for_each_entry_safe(chunk, next, free_head, list) { chunk 1849 mm/percpu.c WARN_ON(chunk->immutable); chunk 1852 mm/percpu.c if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) chunk 1855 mm/percpu.c list_move(&chunk->list, &to_free); chunk 1860 mm/percpu.c list_for_each_entry_safe(chunk, next, &to_free, list) { chunk 1863 mm/percpu.c pcpu_for_each_pop_region(chunk->populated, rs, re, 0, chunk 1864 mm/percpu.c chunk->nr_pages) { chunk 1865 mm/percpu.c pcpu_depopulate_chunk(chunk, rs, re); chunk 1867 mm/percpu.c pcpu_chunk_depopulated(chunk, rs, re); chunk 1870 mm/percpu.c pcpu_destroy_chunk(chunk); chunk 1902 mm/percpu.c list_for_each_entry(chunk, &pcpu_slot[slot], list) { chunk 1903 mm/percpu.c nr_unpop = chunk->nr_pages - chunk->nr_populated; chunk 1913 mm/percpu.c pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, chunk 1914 mm/percpu.c chunk->nr_pages) { chunk 1917 mm/percpu.c ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); chunk 1921 mm/percpu.c pcpu_chunk_populated(chunk, rs, rs + nr); chunk 1934 mm/percpu.c chunk = pcpu_create_chunk(gfp); chunk 1935 mm/percpu.c if (chunk) { chunk 1937 mm/percpu.c pcpu_chunk_relocate(chunk, -1); chunk 1958 mm/percpu.c struct pcpu_chunk *chunk; chunk 1972 mm/percpu.c chunk = pcpu_chunk_addr_search(addr); chunk 1973 mm/percpu.c off = addr - chunk->base_addr; chunk 1975 mm/percpu.c pcpu_free_area(chunk, off); chunk 1978 mm/percpu.c if (chunk->free_bytes == pcpu_unit_size) { chunk 1982 mm/percpu.c if (pos != chunk) { chunk 1988 mm/percpu.c trace_percpu_free_percpu(chunk->base_addr, off, ptr); chunk 2276 mm/percpu.c struct pcpu_chunk *chunk; chunk 2432 mm/percpu.c chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); chunk 2436 mm/percpu.c pcpu_reserved_chunk = chunk; chunk 2441 mm/percpu.c chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); chunk 2445 mm/percpu.c pcpu_first_chunk = chunk; chunk 349 net/bluetooth/af_bluetooth.c int chunk; chunk 375 net/bluetooth/af_bluetooth.c chunk = min_t(unsigned int, skb->len, size); chunk 376 net/bluetooth/af_bluetooth.c if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { chunk 382 net/bluetooth/af_bluetooth.c copied += chunk; chunk 383 net/bluetooth/af_bluetooth.c size -= chunk; chunk 390 net/bluetooth/af_bluetooth.c if (chunk <= skb_len) { chunk 391 net/bluetooth/af_bluetooth.c __skb_pull(skb, chunk); chunk 396 net/bluetooth/af_bluetooth.c chunk -= skb_len; chunk 399 net/bluetooth/af_bluetooth.c if (chunk <= frag->len) { chunk 401 net/bluetooth/af_bluetooth.c skb->len -= chunk; chunk 402 net/bluetooth/af_bluetooth.c skb->data_len -= chunk; chunk 403 net/bluetooth/af_bluetooth.c __skb_pull(frag, chunk); chunk 407 net/bluetooth/af_bluetooth.c chunk -= frag->len; chunk 373 net/caif/caif_socket.c int chunk; chunk 424 net/caif/caif_socket.c chunk = min_t(unsigned int, skb->len, size); chunk 425 net/caif/caif_socket.c if (memcpy_to_msg(msg, skb->data, chunk)) { chunk 431 net/caif/caif_socket.c copied += chunk; chunk 432 net/caif/caif_socket.c size -= chunk; chunk 436 net/caif/caif_socket.c skb_pull(skb, chunk); chunk 723 net/core/datagram.c int chunk = skb->len - hlen; chunk 725 net/core/datagram.c if (!chunk) chunk 728 net/core/datagram.c if (msg_data_left(msg) < chunk) { chunk 731 net/core/datagram.c if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) chunk 736 net/core/datagram.c chunk, &csum)) chunk 740 net/core/datagram.c iov_iter_revert(&msg->msg_iter, chunk); chunk 3749 net/core/devlink.c u8 *chunk, u32 chunk_size, chunk 3759 net/core/devlink.c err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk); chunk 5651 net/core/skbuff.c unsigned long chunk; chunk 5691 net/core/skbuff.c chunk = min_t(unsigned long, data_len, chunk 5693 net/core/skbuff.c skb_fill_page_desc(skb, i, page, 0, chunk); chunk 5694 net/core/skbuff.c data_len -= chunk; chunk 1755 net/decnet/af_decnet.c unsigned int chunk = skb->len; chunk 1758 net/decnet/af_decnet.c if ((chunk + copied) > size) chunk 1759 net/decnet/af_decnet.c chunk = size - copied; chunk 1761 net/decnet/af_decnet.c if (memcpy_to_msg(msg, skb->data, chunk)) { chunk 1765 net/decnet/af_decnet.c copied += chunk; chunk 1768 net/decnet/af_decnet.c skb_pull(skb, chunk); chunk 1985 net/ipv4/tcp_output.c u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); chunk 1990 net/ipv4/tcp_output.c chunk /= win_divisor; chunk 1991 net/ipv4/tcp_output.c if (limit >= chunk) chunk 332 net/rxrpc/sendmsg.c size_t size, chunk, max, space; chunk 351 net/rxrpc/sendmsg.c chunk = max; chunk 352 net/rxrpc/sendmsg.c if (chunk > msg_data_left(msg) && !more) chunk 353 net/rxrpc/sendmsg.c chunk = msg_data_left(msg); chunk 355 net/rxrpc/sendmsg.c space = chunk + call->conn->size_align; chunk 360 net/rxrpc/sendmsg.c _debug("SIZE: %zu/%zu/%zu", chunk, space, size); chunk 380 net/rxrpc/sendmsg.c sp->remain = chunk; chunk 926 net/sctp/associola.c struct sctp_chunk *chunk; chunk 948 net/sctp/associola.c list_for_each_entry(chunk, &active->transmitted, chunk 951 net/sctp/associola.c if (key == chunk->subh.data_hdr->tsn) { chunk 963 net/sctp/associola.c list_for_each_entry(chunk, &transport->transmitted, chunk 965 net/sctp/associola.c if (key == chunk->subh.data_hdr->tsn) { chunk 984 net/sctp/associola.c struct sctp_chunk *chunk; chunk 995 net/sctp/associola.c while (NULL != (chunk = sctp_inq_pop(inqueue))) { chunk 997 net/sctp/associola.c subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); chunk 1002 net/sctp/associola.c if (first_time && subtype.chunk == SCTP_CID_AUTH) { chunk 1015 net/sctp/associola.c chunk->auth_chunk = skb_clone(chunk->skb, chunk 1017 net/sctp/associola.c chunk->auth = 1; chunk 1030 net/sctp/associola.c if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) chunk 1036 net/sctp/associola.c if (sctp_chunk_is_data(chunk)) chunk 1037 net/sctp/associola.c asoc->peer.last_data_from = chunk->transport; chunk 1041 net/sctp/associola.c if (chunk->chunk_hdr->type == SCTP_CID_SACK) chunk 1045 net/sctp/associola.c if (chunk->transport) chunk 1046 net/sctp/associola.c chunk->transport->last_time_heard = ktime_get(); chunk 1050 net/sctp/associola.c state, ep, asoc, chunk, GFP_ATOMIC); chunk 1059 net/sctp/associola.c if (error && chunk) chunk 1060 net/sctp/associola.c chunk->pdiscard = 1; chunk 387 net/sctp/auth.c struct sctp_chunk *chunk; chunk 414 net/sctp/auth.c list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) { chunk 415 net/sctp/auth.c if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) { chunk 416 net/sctp/auth.c chunk->auth = 1; chunk 417 net/sctp/auth.c if (!chunk->shkey) { chunk 418 net/sctp/auth.c chunk->shkey = asoc->shkey; chunk 419 net/sctp/auth.c sctp_auth_shkey_hold(chunk->shkey); chunk 637 net/sctp/auth.c static int __sctp_auth_cid(enum sctp_cid chunk, struct sctp_chunks_param *param) chunk 663 net/sctp/auth.c if (param->chunks[i] == chunk) chunk 673 net/sctp/auth.c int sctp_auth_send_cid(enum sctp_cid chunk, const struct sctp_association *asoc) chunk 681 net/sctp/auth.c return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); chunk 685 net/sctp/auth.c int sctp_auth_recv_cid(enum sctp_cid chunk, const struct sctp_association *asoc) chunk 693 net/sctp/auth.c return __sctp_auth_cid(chunk, chunk 60 net/sctp/chunk.c struct sctp_chunk *chunk; chunk 65 net/sctp/chunk.c list_for_each_entry(chunk, &msg->chunks, frag_list) chunk 66 net/sctp/chunk.c sctp_chunk_free(chunk); chunk 76 net/sctp/chunk.c struct sctp_chunk *chunk; chunk 86 net/sctp/chunk.c chunk = list_entry(pos, struct sctp_chunk, frag_list); chunk 89 net/sctp/chunk.c asoc = chunk->asoc; chunk 102 net/sctp/chunk.c if (chunk->has_tsn) chunk 107 net/sctp/chunk.c ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, chunk 113 net/sctp/chunk.c sctp_chunk_put(chunk); chunk 134 net/sctp/chunk.c static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) chunk 137 net/sctp/chunk.c chunk->msg = msg; chunk 156 net/sctp/chunk.c struct sctp_chunk *chunk; chunk 264 net/sctp/chunk.c chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag, chunk 266 net/sctp/chunk.c if (!chunk) { chunk 271 net/sctp/chunk.c err = sctp_user_addto_chunk(chunk, len, from); chunk 275 net/sctp/chunk.c chunk->shkey = shkey; chunk 278 net/sctp/chunk.c __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - chunk 279 net/sctp/chunk.c chunk->skb->data); chunk 281 net/sctp/chunk.c sctp_datamsg_assign(msg, chunk); chunk 282 net/sctp/chunk.c list_add_tail(&chunk->frag_list, &msg->chunks); chunk 288 net/sctp/chunk.c sctp_chunk_free(chunk); chunk 293 net/sctp/chunk.c chunk = list_entry(pos, struct sctp_chunk, frag_list); chunk 294 net/sctp/chunk.c sctp_chunk_free(chunk); chunk 302 net/sctp/chunk.c int sctp_chunk_abandoned(struct sctp_chunk *chunk) chunk 304 net/sctp/chunk.c if (!chunk->asoc->peer.prsctp_capable) chunk 307 net/sctp/chunk.c if (chunk->msg->abandoned) chunk 310 net/sctp/chunk.c if (!chunk->has_tsn && chunk 311 net/sctp/chunk.c !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)) chunk 314 net/sctp/chunk.c if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && chunk 315 net/sctp/chunk.c time_after(jiffies, chunk->msg->expires_at)) { chunk 317 net/sctp/chunk.c SCTP_SO(&chunk->asoc->stream, chunk 318 net/sctp/chunk.c chunk->sinfo.sinfo_stream); chunk 320 net/sctp/chunk.c if (chunk->sent_count) { chunk 321 net/sctp/chunk.c chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++; chunk 324 net/sctp/chunk.c chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; chunk 327 net/sctp/chunk.c chunk->msg->abandoned = 1; chunk 329 net/sctp/chunk.c } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && chunk 330 net/sctp/chunk.c chunk->sent_count > chunk->sinfo.sinfo_timetolive) { chunk 332 net/sctp/chunk.c SCTP_SO(&chunk->asoc->stream, chunk 333 net/sctp/chunk.c chunk->sinfo.sinfo_stream); chunk 335 net/sctp/chunk.c chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; chunk 337 net/sctp/chunk.c chunk->msg->abandoned = 1; chunk 339 net/sctp/chunk.c } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) && chunk 340 net/sctp/chunk.c chunk->msg->expires_at && chunk 341 net/sctp/chunk.c time_after(jiffies, chunk->msg->expires_at)) { chunk 342 net/sctp/chunk.c chunk->msg->abandoned = 1; chunk 351 net/sctp/chunk.c void sctp_chunk_fail(struct sctp_chunk *chunk, int error) chunk 353 net/sctp/chunk.c chunk->msg->send_failed = 1; chunk 354 net/sctp/chunk.c chunk->msg->send_error = error; chunk 50 net/sctp/debug.c if (cid.chunk <= SCTP_CID_BASE_MAX) chunk 51 net/sctp/debug.c return sctp_cid_tbl[cid.chunk]; chunk 53 net/sctp/debug.c switch (cid.chunk) { chunk 322 net/sctp/endpointola.c struct sctp_chunk *chunk; chunk 337 net/sctp/endpointola.c while (NULL != (chunk = sctp_inq_pop(inqueue))) { chunk 338 net/sctp/endpointola.c subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); chunk 343 net/sctp/endpointola.c if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { chunk 356 net/sctp/endpointola.c chunk->auth_chunk = skb_clone(chunk->skb, chunk 358 net/sctp/endpointola.c chunk->auth = 1; chunk 369 net/sctp/endpointola.c if (NULL == chunk->asoc) { chunk 371 net/sctp/endpointola.c sctp_source(chunk), chunk 373 net/sctp/endpointola.c chunk->asoc = asoc; chunk 374 net/sctp/endpointola.c chunk->transport = transport; chunk 378 net/sctp/endpointola.c if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) chunk 384 net/sctp/endpointola.c if (asoc && sctp_chunk_is_data(chunk)) chunk 385 net/sctp/endpointola.c asoc->peer.last_data_from = chunk->transport; chunk 392 net/sctp/endpointola.c if (chunk->transport) chunk 393 net/sctp/endpointola.c chunk->transport->last_time_heard = ktime_get(); chunk 396 net/sctp/endpointola.c ep, asoc, chunk, GFP_ATOMIC); chunk 398 net/sctp/endpointola.c if (error && chunk) chunk 399 net/sctp/endpointola.c chunk->pdiscard = 1; chunk 92 net/sctp/input.c struct sctp_chunk *chunk; chunk 210 net/sctp/input.c chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC); chunk 211 net/sctp/input.c if (!chunk) chunk 213 net/sctp/input.c SCTP_INPUT_CB(skb)->chunk = chunk; chunk 216 net/sctp/input.c chunk->rcvr = rcvr; chunk 219 net/sctp/input.c chunk->sctp_hdr = sctp_hdr(skb); chunk 222 net/sctp/input.c sctp_init_addrs(chunk, &src, &dest); chunk 225 net/sctp/input.c chunk->transport = transport; chunk 249 net/sctp/input.c sctp_chunk_free(chunk); chunk 256 net/sctp/input.c sctp_inq_push(&chunk->rcvr->inqueue, chunk); chunk 291 net/sctp/input.c struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; chunk 292 net/sctp/input.c struct sctp_inq *inqueue = &chunk->rcvr->inqueue; chunk 293 net/sctp/input.c struct sctp_transport *t = chunk->transport; chunk 297 net/sctp/input.c rcvr = chunk->rcvr; chunk 304 net/sctp/input.c sctp_chunk_free(chunk); chunk 326 net/sctp/input.c sctp_chunk_free(chunk); chunk 330 net/sctp/input.c sctp_inq_push(inqueue, chunk); chunk 342 net/sctp/input.c sctp_chunk_free(chunk); chunk 344 net/sctp/input.c sctp_inq_push(inqueue, chunk); chunk 362 net/sctp/input.c struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; chunk 363 net/sctp/input.c struct sctp_transport *t = chunk->transport; chunk 364 net/sctp/input.c struct sctp_ep_common *rcvr = chunk->rcvr; chunk 44 net/sctp/inqueue.c struct sctp_chunk *chunk, *tmp; chunk 47 net/sctp/inqueue.c list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { chunk 48 net/sctp/inqueue.c list_del_init(&chunk->list); chunk 49 net/sctp/inqueue.c sctp_chunk_free(chunk); chunk 64 net/sctp/inqueue.c void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) chunk 67 net/sctp/inqueue.c if (chunk->rcvr->dead) { chunk 68 net/sctp/inqueue.c sctp_chunk_free(chunk); chunk 77 net/sctp/inqueue.c list_add_tail(&chunk->list, &q->in_chunk_list); chunk 78 net/sctp/inqueue.c if (chunk->asoc) chunk 79 net/sctp/inqueue.c chunk->asoc->stats.ipackets++; chunk 86 net/sctp/inqueue.c struct sctp_chunk *chunk; chunk 89 net/sctp/inqueue.c chunk = queue->in_progress; chunk 91 net/sctp/inqueue.c if (chunk->singleton || chunk 92 net/sctp/inqueue.c chunk->end_of_packet || chunk 93 net/sctp/inqueue.c chunk->pdiscard) chunk 96 net/sctp/inqueue.c ch = (struct sctp_chunkhdr *)chunk->chunk_end; chunk 109 net/sctp/inqueue.c struct sctp_chunk *chunk; chunk 116 net/sctp/inqueue.c chunk = queue->in_progress; chunk 117 net/sctp/inqueue.c if (chunk) { chunk 121 net/sctp/inqueue.c if (chunk->singleton || chunk 122 net/sctp/inqueue.c chunk->end_of_packet || chunk 123 net/sctp/inqueue.c chunk->pdiscard) { chunk 124 net/sctp/inqueue.c if (chunk->head_skb == chunk->skb) { chunk 125 net/sctp/inqueue.c chunk->skb = skb_shinfo(chunk->skb)->frag_list; chunk 128 net/sctp/inqueue.c if (chunk->skb->next) { chunk 129 net/sctp/inqueue.c chunk->skb = chunk->skb->next; chunk 133 net/sctp/inqueue.c if (chunk->head_skb) chunk 134 net/sctp/inqueue.c chunk->skb = chunk->head_skb; chunk 135 net/sctp/inqueue.c sctp_chunk_free(chunk); chunk 136 net/sctp/inqueue.c chunk = queue->in_progress = NULL; chunk 139 net/sctp/inqueue.c ch = (struct sctp_chunkhdr *)chunk->chunk_end; chunk 141 net/sctp/inqueue.c skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); chunk 147 net/sctp/inqueue.c if (!chunk) { chunk 156 net/sctp/inqueue.c chunk = list_entry(entry, struct sctp_chunk, list); chunk 158 net/sctp/inqueue.c if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) { chunk 162 net/sctp/inqueue.c if (skb_shinfo(chunk->skb)->frag_list) chunk 163 net/sctp/inqueue.c chunk->head_skb = chunk->skb; chunk 166 net/sctp/inqueue.c if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) chunk 167 net/sctp/inqueue.c chunk->skb = skb_shinfo(chunk->skb)->frag_list; chunk 169 net/sctp/inqueue.c if (WARN_ON(!chunk->skb)) { chunk 170 net/sctp/inqueue.c __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS); chunk 171 net/sctp/inqueue.c sctp_chunk_free(chunk); chunk 176 net/sctp/inqueue.c if (chunk->asoc) chunk 177 net/sctp/inqueue.c sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb); chunk 179 net/sctp/inqueue.c queue->in_progress = chunk; chunk 183 net/sctp/inqueue.c ch = (struct sctp_chunkhdr *)chunk->skb->data; chunk 184 net/sctp/inqueue.c chunk->singleton = 1; chunk 185 net/sctp/inqueue.c chunk->data_accepted = 0; chunk 186 net/sctp/inqueue.c chunk->pdiscard = 0; chunk 187 net/sctp/inqueue.c chunk->auth = 0; chunk 188 net/sctp/inqueue.c chunk->has_asconf = 0; chunk 189 net/sctp/inqueue.c chunk->end_of_packet = 0; chunk 190 net/sctp/inqueue.c if (chunk->head_skb) { chunk 192 net/sctp/inqueue.c *cb = SCTP_INPUT_CB(chunk->skb), chunk 193 net/sctp/inqueue.c *head_cb = SCTP_INPUT_CB(chunk->head_skb); chunk 195 net/sctp/inqueue.c cb->chunk = head_cb->chunk; chunk 200 net/sctp/inqueue.c chunk->chunk_hdr = ch; chunk 201 net/sctp/inqueue.c chunk->chunk_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length)); chunk 202 net/sctp/inqueue.c skb_pull(chunk->skb, sizeof(*ch)); chunk 203 net/sctp/inqueue.c chunk->subh.v = NULL; /* Subheader is no longer valid. */ chunk 205 net/sctp/inqueue.c if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) { chunk 207 net/sctp/inqueue.c chunk->singleton = 0; chunk 208 net/sctp/inqueue.c } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { chunk 210 net/sctp/inqueue.c chunk->pdiscard = 1; chunk 211 net/sctp/inqueue.c chunk->chunk_end = skb_tail_pointer(chunk->skb); chunk 216 net/sctp/inqueue.c chunk->end_of_packet = 1; chunk 220 net/sctp/inqueue.c chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), chunk 221 net/sctp/inqueue.c ntohs(chunk->chunk_hdr->length), chunk->skb->len); chunk 223 net/sctp/inqueue.c return chunk; chunk 37 net/sctp/objcnt.c SCTP_DBG_OBJCNT(chunk); chunk 50 net/sctp/objcnt.c SCTP_DBG_OBJCNT_ENTRY(chunk), chunk 46 net/sctp/output.c struct sctp_chunk *chunk); chunk 48 net/sctp/output.c struct sctp_chunk *chunk); chunk 50 net/sctp/output.c struct sctp_chunk *chunk); chunk 52 net/sctp/output.c struct sctp_chunk *chunk, chunk 121 net/sctp/output.c struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); chunk 123 net/sctp/output.c if (chunk) chunk 124 net/sctp/output.c sctp_packet_append_chunk(packet, chunk); chunk 161 net/sctp/output.c struct sctp_chunk *chunk, *tmp; chunk 165 net/sctp/output.c list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { chunk 166 net/sctp/output.c list_del_init(&chunk->list); chunk 167 net/sctp/output.c sctp_chunk_free(chunk); chunk 179 net/sctp/output.c struct sctp_chunk *chunk, chunk 185 net/sctp/output.c packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); chunk 187 net/sctp/output.c switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { chunk 194 net/sctp/output.c chunk->skb->sk->sk_err = -error; chunk 201 net/sctp/output.c chunk); chunk 216 net/sctp/output.c struct sctp_chunk *chunk) chunk 229 net/sctp/output.c if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) chunk 235 net/sctp/output.c if (!chunk->auth) chunk 238 net/sctp/output.c auth = sctp_make_auth(asoc, chunk->shkey->key_id); chunk 242 net/sctp/output.c auth->shkey = chunk->shkey; chunk 255 net/sctp/output.c struct sctp_chunk *chunk) chunk 262 net/sctp/output.c if (sctp_chunk_is_data(chunk) && !pkt->has_sack && chunk 303 net/sctp/output.c struct sctp_chunk *chunk) chunk 305 net/sctp/output.c __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); chunk 309 net/sctp/output.c retval = sctp_packet_will_fit(packet, chunk, chunk_len); chunk 314 net/sctp/output.c switch (chunk->chunk_hdr->type) { chunk 318 net/sctp/output.c sctp_packet_append_data(packet, chunk); chunk 326 net/sctp/output.c chunk->sent_at = jiffies; chunk 328 net/sctp/output.c chunk->sent_count++; chunk 336 net/sctp/output.c if (chunk->asoc) chunk 337 net/sctp/output.c chunk->asoc->stats.osacks++; chunk 342 net/sctp/output.c packet->auth = chunk; chunk 347 net/sctp/output.c list_add_tail(&chunk->list, &packet->chunk_list); chunk 349 net/sctp/output.c chunk->transport = packet->transport; chunk 358 net/sctp/output.c struct sctp_chunk *chunk) chunk 362 net/sctp/output.c pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); chunk 368 net/sctp/output.c if (sctp_chunk_is_data(chunk)) { chunk 369 net/sctp/output.c retval = sctp_packet_can_append_data(packet, chunk); chunk 375 net/sctp/output.c retval = sctp_packet_bundle_auth(packet, chunk); chunk 380 net/sctp/output.c retval = sctp_packet_bundle_sack(packet, chunk); chunk 384 net/sctp/output.c retval = __sctp_packet_append_chunk(packet, chunk); chunk 411 net/sctp/output.c struct sctp_chunk *chunk, *tmp; chunk 429 net/sctp/output.c list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, chunk 431 net/sctp/output.c int padded = SCTP_PAD4(chunk->skb->len); chunk 433 net/sctp/output.c if (chunk == packet->auth) chunk 450 net/sctp/output.c list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { chunk 453 net/sctp/output.c list_del_init(&chunk->list); chunk 454 net/sctp/output.c if (sctp_chunk_is_data(chunk)) { chunk 455 net/sctp/output.c if (!sctp_chunk_retransmitted(chunk) && chunk 457 net/sctp/output.c chunk->rtt_in_progress = 1; chunk 462 net/sctp/output.c padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; chunk 464 net/sctp/output.c skb_put_zero(chunk->skb, padding); chunk 466 net/sctp/output.c if (chunk == packet->auth) chunk 470 net/sctp/output.c skb_put_data(nskb, chunk->skb->data, chunk->skb->len); chunk 473 net/sctp/output.c chunk, chunk 474 net/sctp/output.c sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), chunk 475 net/sctp/output.c chunk->has_tsn ? "TSN" : "No TSN", chunk 476 net/sctp/output.c chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, chunk 477 net/sctp/output.c ntohs(chunk->chunk_hdr->length), chunk->skb->len, chunk 478 net/sctp/output.c chunk->rtt_in_progress); chunk 480 net/sctp/output.c pkt_size -= SCTP_PAD4(chunk->skb->len); chunk 482 net/sctp/output.c if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) chunk 483 net/sctp/output.c sctp_chunk_free(chunk); chunk 549 net/sctp/output.c struct sctp_chunk *chunk, *tmp; chunk 559 net/sctp/output.c chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); chunk 560 net/sctp/output.c sk = chunk->skb->sk; chunk 634 net/sctp/output.c list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { chunk 635 net/sctp/output.c list_del_init(&chunk->list); chunk 636 net/sctp/output.c if (!sctp_chunk_is_data(chunk)) chunk 637 net/sctp/output.c sctp_chunk_free(chunk); chunk 649 net/sctp/output.c struct sctp_chunk *chunk) chunk 673 net/sctp/output.c datasize = sctp_data_size(chunk); chunk 693 net/sctp/output.c if (chunk->fast_retransmit != SCTP_NEED_FRTX && chunk 718 net/sctp/output.c if (chunk->skb->len + q->out_qlen > transport->pathmtu - chunk 719 net/sctp/output.c packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4) chunk 724 net/sctp/output.c if (!chunk->msg->can_delay) chunk 733 net/sctp/output.c struct sctp_chunk *chunk) chunk 736 net/sctp/output.c size_t datasize = sctp_data_size(chunk); chunk 753 net/sctp/output.c sctp_chunk_assign_tsn(chunk); chunk 754 net/sctp/output.c asoc->stream.si->assign_number(chunk); chunk 758 net/sctp/output.c struct sctp_chunk *chunk, chunk 769 net/sctp/output.c if ((packet->auth && chunk->shkey != packet->auth->shkey) || chunk 770 net/sctp/output.c (!packet->auth && chunk->shkey && chunk 771 net/sctp/output.c chunk->chunk_hdr->type != SCTP_CID_AUTH)) chunk 790 net/sctp/output.c (!packet->has_data && chunk->auth)) { chunk 816 net/sctp/output.c if (!sctp_chunk_is_data(chunk) && packet->has_data) chunk 209 net/sctp/outqueue.c struct sctp_chunk *chunk, *tmp; chunk 215 net/sctp/outqueue.c chunk = list_entry(lchunk, struct sctp_chunk, chunk 218 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); chunk 219 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 226 net/sctp/outqueue.c chunk = list_entry(lchunk, struct sctp_chunk, chunk 228 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); chunk 229 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 235 net/sctp/outqueue.c chunk = list_entry(lchunk, struct sctp_chunk, chunk 237 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); chunk 238 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 244 net/sctp/outqueue.c chunk = list_entry(lchunk, struct sctp_chunk, chunk 246 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); chunk 247 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 251 net/sctp/outqueue.c while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { chunk 252 net/sctp/outqueue.c sctp_sched_dequeue_done(q, chunk); chunk 255 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); chunk 256 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 260 net/sctp/outqueue.c list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { chunk 261 net/sctp/outqueue.c list_del_init(&chunk->list); chunk 262 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 280 net/sctp/outqueue.c void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) chunk 284 net/sctp/outqueue.c pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, chunk 285 net/sctp/outqueue.c chunk && chunk->chunk_hdr ? chunk 286 net/sctp/outqueue.c sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : chunk 292 net/sctp/outqueue.c if (sctp_chunk_is_data(chunk)) { chunk 294 net/sctp/outqueue.c __func__, q, chunk, chunk && chunk->chunk_hdr ? chunk 295 net/sctp/outqueue.c sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : chunk 298 net/sctp/outqueue.c sctp_outq_tail_data(q, chunk); chunk 299 net/sctp/outqueue.c if (chunk->asoc->peer.prsctp_capable && chunk 300 net/sctp/outqueue.c SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) chunk 301 net/sctp/outqueue.c chunk->asoc->sent_cnt_removable++; chunk 302 net/sctp/outqueue.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) chunk 307 net/sctp/outqueue.c list_add_tail(&chunk->list, &q->control_chunk_list); chunk 451 net/sctp/outqueue.c struct sctp_chunk *chunk; chunk 455 net/sctp/outqueue.c chunk = list_entry(lchunk, struct sctp_chunk, chunk 459 net/sctp/outqueue.c if (sctp_chunk_abandoned(chunk)) { chunk 468 net/sctp/outqueue.c if (!chunk->tsn_gap_acked) { chunk 469 net/sctp/outqueue.c if (chunk->transport) chunk 470 net/sctp/outqueue.c chunk->transport->flight_size -= chunk 471 net/sctp/outqueue.c sctp_data_size(chunk); chunk 472 net/sctp/outqueue.c q->outstanding_bytes -= sctp_data_size(chunk); chunk 473 net/sctp/outqueue.c q->asoc->peer.rwnd += sctp_data_size(chunk); chunk 483 net/sctp/outqueue.c (chunk->fast_retransmit == SCTP_NEED_FRTX)) || chunk 484 net/sctp/outqueue.c (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { chunk 493 net/sctp/outqueue.c q->asoc->peer.rwnd += sctp_data_size(chunk); chunk 494 net/sctp/outqueue.c q->outstanding_bytes -= sctp_data_size(chunk); chunk 495 net/sctp/outqueue.c if (chunk->transport) chunk 496 net/sctp/outqueue.c transport->flight_size -= sctp_data_size(chunk); chunk 503 net/sctp/outqueue.c chunk->tsn_missing_report = 0; chunk 511 net/sctp/outqueue.c if (chunk->rtt_in_progress) { chunk 512 net/sctp/outqueue.c chunk->rtt_in_progress = 0; chunk 595 net/sctp/outqueue.c struct sctp_chunk *chunk, *chunk1; chunk 629 net/sctp/outqueue.c list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { chunk 631 net/sctp/outqueue.c if (sctp_chunk_abandoned(chunk)) { chunk 632 net/sctp/outqueue.c list_del_init(&chunk->transmitted_list); chunk 634 net/sctp/outqueue.c &chunk->transmitted_list); chunk 643 net/sctp/outqueue.c if (chunk->tsn_gap_acked) { chunk 644 net/sctp/outqueue.c list_move_tail(&chunk->transmitted_list, chunk 652 net/sctp/outqueue.c if (fast_rtx && !chunk->fast_retransmit) chunk 657 net/sctp/outqueue.c status = sctp_packet_append_chunk(pkt, chunk); chunk 709 net/sctp/outqueue.c list_move_tail(&chunk->transmitted_list, chunk 715 net/sctp/outqueue.c if (chunk->fast_retransmit == SCTP_NEED_FRTX) chunk 716 net/sctp/outqueue.c chunk->fast_retransmit = SCTP_DONT_FRTX; chunk 761 net/sctp/outqueue.c struct sctp_chunk *chunk, gfp_t gfp) chunk 771 net/sctp/outqueue.c sctp_packet_append_chunk(&singleton, chunk); chunk 790 net/sctp/outqueue.c struct sctp_chunk *chunk) chunk 792 net/sctp/outqueue.c struct sctp_transport *new_transport = chunk->transport; chunk 795 net/sctp/outqueue.c if (!sctp_chunk_is_data(chunk)) { chunk 805 net/sctp/outqueue.c if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest, chunk 810 net/sctp/outqueue.c &chunk->dest); chunk 838 net/sctp/outqueue.c type = chunk->chunk_hdr->type; chunk 870 net/sctp/outqueue.c struct sctp_chunk *chunk, *tmp; chunk 874 net/sctp/outqueue.c list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) { chunk 884 net/sctp/outqueue.c chunk->chunk_hdr->type != SCTP_CID_ASCONF) chunk 887 net/sctp/outqueue.c list_del_init(&chunk->list); chunk 892 net/sctp/outqueue.c sctp_outq_select_transport(ctx, chunk); chunk 894 net/sctp/outqueue.c switch (chunk->chunk_hdr->type) { chunk 903 net/sctp/outqueue.c error = sctp_packet_singleton(ctx->transport, chunk, chunk 912 net/sctp/outqueue.c if (sctp_test_T_bit(chunk)) chunk 939 net/sctp/outqueue.c status = sctp_packet_transmit_chunk(ctx->packet, chunk, chunk 943 net/sctp/outqueue.c list_add(&chunk->list, &ctx->q->control_chunk_list); chunk 952 net/sctp/outqueue.c if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN || chunk 953 net/sctp/outqueue.c chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) { chunk 958 net/sctp/outqueue.c if (chunk == ctx->asoc->strreset_chunk) chunk 1020 net/sctp/outqueue.c struct sctp_chunk *chunk; chunk 1064 net/sctp/outqueue.c while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) { chunk 1065 net/sctp/outqueue.c __u32 sid = ntohs(chunk->subh.data_hdr->stream); chunk 1069 net/sctp/outqueue.c if (sctp_chunk_abandoned(chunk)) { chunk 1070 net/sctp/outqueue.c sctp_sched_dequeue_done(ctx->q, chunk); chunk 1071 net/sctp/outqueue.c sctp_chunk_fail(chunk, 0); chunk 1072 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 1077 net/sctp/outqueue.c sctp_outq_head_data(ctx->q, chunk); chunk 1081 net/sctp/outqueue.c sctp_outq_select_transport(ctx, chunk); chunk 1084 net/sctp/outqueue.c __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ? chunk 1085 net/sctp/outqueue.c sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : chunk 1086 net/sctp/outqueue.c "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), chunk 1087 net/sctp/outqueue.c chunk->skb ? chunk->skb->head : NULL, chunk->skb ? chunk 1088 net/sctp/outqueue.c refcount_read(&chunk->skb->users) : -1); chunk 1091 net/sctp/outqueue.c status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0, chunk 1098 net/sctp/outqueue.c __func__, ntohl(chunk->subh.data_hdr->tsn), chunk 1101 net/sctp/outqueue.c sctp_outq_head_data(ctx->q, chunk); chunk 1110 net/sctp/outqueue.c chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; chunk 1111 net/sctp/outqueue.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) chunk 1119 net/sctp/outqueue.c sctp_sched_dequeue_done(ctx->q, chunk); chunk 1121 net/sctp/outqueue.c list_add_tail(&chunk->transmitted_list, chunk 1221 net/sctp/outqueue.c int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) chunk 1224 net/sctp/outqueue.c struct sctp_sackhdr *sack = chunk->subh.sack_hdr; chunk 1303 net/sctp/outqueue.c transport, &chunk->source, sack, chunk 1343 net/sctp/outqueue.c SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) chunk 1679 net/sctp/outqueue.c struct sctp_chunk *chunk; chunk 1685 net/sctp/outqueue.c list_for_each_entry(chunk, transmitted_queue, transmitted_list) { chunk 1687 net/sctp/outqueue.c tsn = ntohl(chunk->subh.data_hdr->tsn); chunk 1696 net/sctp/outqueue.c if (chunk->fast_retransmit == SCTP_CAN_FRTX && chunk 1697 net/sctp/outqueue.c !chunk->tsn_gap_acked && chunk 1704 net/sctp/outqueue.c chunk->transport, chunk 1706 net/sctp/outqueue.c chunk->tsn_missing_report++; chunk 1709 net/sctp/outqueue.c __func__, tsn, chunk->tsn_missing_report); chunk 1719 net/sctp/outqueue.c if (chunk->tsn_missing_report >= 3) { chunk 1720 net/sctp/outqueue.c chunk->fast_retransmit = SCTP_NEED_FRTX; chunk 1794 net/sctp/outqueue.c struct sctp_chunk *chunk; chunk 1831 net/sctp/outqueue.c chunk = list_entry(lchunk, struct sctp_chunk, chunk 1833 net/sctp/outqueue.c tsn = ntohl(chunk->subh.data_hdr->tsn); chunk 1840 net/sctp/outqueue.c sctp_chunk_free(chunk); chunk 1844 net/sctp/outqueue.c if (chunk->chunk_hdr->flags & chunk 1849 net/sctp/outqueue.c chunk->subh.data_hdr->stream); chunk 1851 net/sctp/outqueue.c chunk->subh.data_hdr->stream; chunk 1853 net/sctp/outqueue.c chunk->subh.data_hdr->ssn; chunk 67 net/sctp/sm_make_chunk.c static void *sctp_addto_param(struct sctp_chunk *chunk, int len, chunk 73 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; chunk 75 net/sctp/sm_make_chunk.c if (chunk->shkey) { chunk 76 net/sctp/sm_make_chunk.c struct sctp_shared_key *shkey = chunk->shkey; chunk 77 net/sctp/sm_make_chunk.c struct sctp_association *asoc = chunk->asoc; chunk 93 net/sctp/sm_make_chunk.c sctp_auth_shkey_release(chunk->shkey); chunk 97 net/sctp/sm_make_chunk.c static void sctp_control_set_owner_w(struct sctp_chunk *chunk) chunk 99 net/sctp/sm_make_chunk.c struct sctp_association *asoc = chunk->asoc; chunk 100 net/sctp/sm_make_chunk.c struct sk_buff *skb = chunk->skb; chunk 109 net/sctp/sm_make_chunk.c if (chunk->auth) { chunk 110 net/sctp/sm_make_chunk.c chunk->shkey = asoc->shkey; chunk 111 net/sctp/sm_make_chunk.c sctp_auth_shkey_hold(chunk->shkey); chunk 114 net/sctp/sm_make_chunk.c skb_shinfo(skb)->destructor_arg = chunk; chunk 119 net/sctp/sm_make_chunk.c int sctp_chunk_iif(const struct sctp_chunk *chunk) chunk 121 net/sctp/sm_make_chunk.c struct sk_buff *skb = chunk->skb; chunk 143 net/sctp/sm_make_chunk.c int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, chunk 154 net/sctp/sm_make_chunk.c if (skb_tailroom(chunk->skb) < len) chunk 157 net/sctp/sm_make_chunk.c chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err); chunk 380 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 411 net/sctp/sm_make_chunk.c cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, chunk 485 net/sctp/sm_make_chunk.c if (chunk->transport) chunk 488 net/sctp/sm_make_chunk.c &chunk->transport->ipaddr); chunk 568 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 594 net/sctp/sm_make_chunk.c if (chunk) chunk 595 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 620 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 635 net/sctp/sm_make_chunk.c if (retval && chunk && chunk->transport) chunk 638 net/sctp/sm_make_chunk.c &chunk->transport->ipaddr); chunk 668 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 693 net/sctp/sm_make_chunk.c if (chunk) chunk 694 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 855 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 861 net/sctp/sm_make_chunk.c if (chunk && chunk->asoc) chunk 862 net/sctp/sm_make_chunk.c ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map); chunk 876 net/sctp/sm_make_chunk.c if (chunk) chunk 877 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 883 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 899 net/sctp/sm_make_chunk.c if (retval && chunk) chunk 900 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 907 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 930 net/sctp/sm_make_chunk.c if (retval && chunk) chunk 931 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 940 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 950 net/sctp/sm_make_chunk.c if (chunk && chunk->chunk_hdr && chunk 951 net/sctp/sm_make_chunk.c chunk->chunk_hdr->type == SCTP_CID_INIT) chunk 969 net/sctp/sm_make_chunk.c if (retval && chunk) chunk 970 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 978 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 984 net/sctp/sm_make_chunk.c retval = sctp_make_abort(asoc, chunk, chunk 1004 net/sctp/sm_make_chunk.c if (chunk) chunk 1005 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 1056 net/sctp/sm_make_chunk.c static void *sctp_addto_param(struct sctp_chunk *chunk, int len, chunk 1059 net/sctp/sm_make_chunk.c int chunklen = ntohs(chunk->chunk_hdr->length); chunk 1062 net/sctp/sm_make_chunk.c target = skb_put(chunk->skb, len); chunk 1070 net/sctp/sm_make_chunk.c chunk->chunk_hdr->length = htons(chunklen + len); chunk 1071 net/sctp/sm_make_chunk.c chunk->chunk_end = skb_tail_pointer(chunk->skb); chunk 1079 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 1086 net/sctp/sm_make_chunk.c retval = sctp_make_abort(asoc, chunk, sizeof(struct sctp_errhdr) + chunk 1094 net/sctp/sm_make_chunk.c phdr.type = htons(chunk->chunk_hdr->type); chunk 1095 net/sctp/sm_make_chunk.c phdr.length = chunk->chunk_hdr->length; chunk 1105 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 1113 net/sctp/sm_make_chunk.c retval = sctp_make_abort(asoc, chunk, payload_len); chunk 1128 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 1134 net/sctp/sm_make_chunk.c retval = sctp_make_abort(asoc, chunk, payload_len); chunk 1176 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 1198 net/sctp/sm_make_chunk.c if (chunk) chunk 1199 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 1210 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 1229 net/sctp/sm_make_chunk.c if (chunk) chunk 1230 net/sctp/sm_make_chunk.c retval->transport = chunk->transport; chunk 1245 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk) chunk 1257 net/sctp/sm_make_chunk.c return sctp_make_op_error_space(asoc, chunk, size); chunk 1262 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 1268 net/sctp/sm_make_chunk.c retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); chunk 1353 net/sctp/sm_make_chunk.c SCTP_DBG_OBJCNT_INC(chunk); chunk 1361 net/sctp/sm_make_chunk.c void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, chunk 1364 net/sctp/sm_make_chunk.c memcpy(&chunk->source, src, sizeof(union sctp_addr)); chunk 1365 net/sctp/sm_make_chunk.c memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); chunk 1369 net/sctp/sm_make_chunk.c const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) chunk 1372 net/sctp/sm_make_chunk.c if (chunk->transport) { chunk 1373 net/sctp/sm_make_chunk.c return &chunk->transport->ipaddr; chunk 1376 net/sctp/sm_make_chunk.c return &chunk->source; chunk 1443 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk; chunk 1445 net/sctp/sm_make_chunk.c chunk = _sctp_make_chunk(asoc, type, flags, paylen, gfp); chunk 1446 net/sctp/sm_make_chunk.c if (chunk) chunk 1447 net/sctp/sm_make_chunk.c sctp_control_set_owner_w(chunk); chunk 1449 net/sctp/sm_make_chunk.c return chunk; chunk 1453 net/sctp/sm_make_chunk.c static void sctp_chunk_destroy(struct sctp_chunk *chunk) chunk 1455 net/sctp/sm_make_chunk.c BUG_ON(!list_empty(&chunk->list)); chunk 1456 net/sctp/sm_make_chunk.c list_del_init(&chunk->transmitted_list); chunk 1458 net/sctp/sm_make_chunk.c consume_skb(chunk->skb); chunk 1459 net/sctp/sm_make_chunk.c consume_skb(chunk->auth_chunk); chunk 1461 net/sctp/sm_make_chunk.c SCTP_DBG_OBJCNT_DEC(chunk); chunk 1462 net/sctp/sm_make_chunk.c kmem_cache_free(sctp_chunk_cachep, chunk); chunk 1466 net/sctp/sm_make_chunk.c void sctp_chunk_free(struct sctp_chunk *chunk) chunk 1469 net/sctp/sm_make_chunk.c if (chunk->msg) chunk 1470 net/sctp/sm_make_chunk.c sctp_datamsg_put(chunk->msg); chunk 1472 net/sctp/sm_make_chunk.c sctp_chunk_put(chunk); chunk 1491 net/sctp/sm_make_chunk.c void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) chunk 1493 net/sctp/sm_make_chunk.c int chunklen = ntohs(chunk->chunk_hdr->length); chunk 1497 net/sctp/sm_make_chunk.c skb_put_zero(chunk->skb, padlen); chunk 1498 net/sctp/sm_make_chunk.c target = skb_put_data(chunk->skb, data, len); chunk 1501 net/sctp/sm_make_chunk.c chunk->chunk_hdr->length = htons(chunklen + padlen + len); chunk 1502 net/sctp/sm_make_chunk.c chunk->chunk_end = skb_tail_pointer(chunk->skb); chunk 1511 net/sctp/sm_make_chunk.c int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, chunk 1517 net/sctp/sm_make_chunk.c target = skb_put(chunk->skb, len); chunk 1524 net/sctp/sm_make_chunk.c chunk->chunk_hdr->length = chunk 1525 net/sctp/sm_make_chunk.c htons(ntohs(chunk->chunk_hdr->length) + len); chunk 1526 net/sctp/sm_make_chunk.c chunk->chunk_end = skb_tail_pointer(chunk->skb); chunk 1534 net/sctp/sm_make_chunk.c void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) chunk 1541 net/sctp/sm_make_chunk.c if (chunk->has_ssn) chunk 1545 net/sctp/sm_make_chunk.c sid = ntohs(chunk->subh.data_hdr->stream); chunk 1546 net/sctp/sm_make_chunk.c stream = &chunk->asoc->stream; chunk 1551 net/sctp/sm_make_chunk.c msg = chunk->msg; chunk 1570 net/sctp/sm_make_chunk.c void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) chunk 1572 net/sctp/sm_make_chunk.c if (!chunk->has_tsn) { chunk 1576 net/sctp/sm_make_chunk.c chunk->subh.data_hdr->tsn = chunk 1577 net/sctp/sm_make_chunk.c htonl(sctp_association_get_next_tsn(chunk->asoc)); chunk 1578 net/sctp/sm_make_chunk.c chunk->has_tsn = 1; chunk 1584 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 1592 net/sctp/sm_make_chunk.c scope = sctp_scope(sctp_source(chunk)); chunk 1597 net/sctp/sm_make_chunk.c skb = chunk->skb; chunk 1701 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, gfp_t gfp, chunk 1707 net/sctp/sm_make_chunk.c struct sk_buff *skb = chunk->skb; chunk 1720 net/sctp/sm_make_chunk.c bodysize = ntohs(chunk->chunk_hdr->length) - headersize; chunk 1727 net/sctp/sm_make_chunk.c len = ntohs(chunk->chunk_hdr->length); chunk 1736 net/sctp/sm_make_chunk.c cookie = chunk->subh.cookie_hdr; chunk 1774 net/sctp/sm_make_chunk.c if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { chunk 1779 net/sctp/sm_make_chunk.c if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || chunk 1780 net/sctp/sm_make_chunk.c ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { chunk 1810 net/sctp/sm_make_chunk.c *errp = sctp_make_op_error(asoc, chunk, chunk 1822 net/sctp/sm_make_chunk.c scope = sctp_scope(sctp_source(chunk)); chunk 1830 net/sctp/sm_make_chunk.c retval->peer.port = ntohs(chunk->sctp_hdr->source); chunk 1843 net/sctp/sm_make_chunk.c sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, chunk 1844 net/sctp/sm_make_chunk.c sizeof(chunk->dest), SCTP_ADDR_SRC, chunk 1887 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 1899 net/sctp/sm_make_chunk.c *errp = sctp_make_op_error_space(asoc, chunk, len); chunk 1915 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 1921 net/sctp/sm_make_chunk.c *errp = sctp_make_op_error_space(asoc, chunk, 0); chunk 1932 net/sctp/sm_make_chunk.c const struct sctp_chunk *chunk, chunk 1942 net/sctp/sm_make_chunk.c *errp = sctp_make_violation_paramlen(asoc, chunk, param); chunk 1953 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 1966 net/sctp/sm_make_chunk.c *errp = sctp_make_op_error(asoc, chunk, SCTP_ERROR_DNS_FAILED, chunk 2074 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 2093 net/sctp/sm_make_chunk.c *errp = sctp_make_op_error_limited(asoc, chunk); chunk 2129 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 2166 net/sctp/sm_make_chunk.c sctp_process_hn_param(asoc, param, chunk, err_chunk); chunk 2187 net/sctp/sm_make_chunk.c chunk, err_chunk); chunk 2203 net/sctp/sm_make_chunk.c chunk, err_chunk); chunk 2228 net/sctp/sm_make_chunk.c sctp_process_inv_paramlength(asoc, param.p, chunk, chunk 2238 net/sctp/sm_make_chunk.c retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); chunk 2248 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, struct sctp_chunk **errp) chunk 2262 net/sctp/sm_make_chunk.c return sctp_process_inv_mandatory(asoc, chunk, errp); chunk 2276 net/sctp/sm_make_chunk.c if (param.v != (void *)chunk->chunk_end) chunk 2277 net/sctp/sm_make_chunk.c return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); chunk 2284 net/sctp/sm_make_chunk.c chunk, errp); chunk 2289 net/sctp/sm_make_chunk.c chunk, errp); chunk 2310 net/sctp/sm_make_chunk.c int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, chunk 2335 net/sctp/sm_make_chunk.c if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) chunk 2344 net/sctp/sm_make_chunk.c chunk->sctp_hdr->source, 0); chunk 2345 net/sctp/sm_make_chunk.c if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) chunk 2973 net/sctp/sm_make_chunk.c static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, chunk 2999 net/sctp/sm_make_chunk.c sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); chunk 3007 net/sctp/sm_make_chunk.c sctp_addto_chunk(chunk, err_param_len, &err_param); chunk 3011 net/sctp/sm_make_chunk.c sctp_addto_chunk(chunk, asconf_param_len, asconf_param); chunk 3158 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, bool addr_param_needed, chunk 3165 net/sctp/sm_make_chunk.c addip = (struct sctp_addip_chunk *)chunk->chunk_hdr; chunk 3217 net/sctp/sm_make_chunk.c if (param.v != chunk->chunk_end) chunk 3832 net/sctp/sm_make_chunk.c struct sctp_chunk *chunk, chunk 3840 net/sctp/sm_make_chunk.c hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; chunk 87 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk) chunk 119 net/sctp/sm_sideeffect.c repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); chunk 613 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk, chunk 622 net/sctp/sm_sideeffect.c if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) chunk 624 net/sctp/sm_sideeffect.c (__u16)error, 0, 0, chunk, chunk 635 net/sctp/sm_sideeffect.c abort = sctp_make_violation_max_retrans(asoc, chunk); chunk 656 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk, chunk 667 net/sctp/sm_sideeffect.c if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) chunk 721 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk) chunk 777 net/sctp/sm_sideeffect.c hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; chunk 791 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk) chunk 795 net/sctp/sm_sideeffect.c if (sctp_outq_sack(&asoc->outqueue, chunk)) { chunk 813 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk) chunk 817 net/sctp/sm_sideeffect.c if (chunk->transport) chunk 818 net/sctp/sm_sideeffect.c t = chunk->transport; chunk 822 net/sctp/sm_sideeffect.c chunk->transport = t; chunk 940 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk) chunk 944 net/sctp/sm_sideeffect.c t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); chunk 946 net/sctp/sm_sideeffect.c chunk->transport = t; chunk 952 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk) chunk 957 net/sctp/sm_sideeffect.c while (chunk->chunk_end > chunk->skb->data) { chunk 958 net/sctp/sm_sideeffect.c err_hdr = (struct sctp_errhdr *)(chunk->skb->data); chunk 960 net/sctp/sm_sideeffect.c ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, chunk 1096 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk; chunk 1098 net/sctp/sm_sideeffect.c list_for_each_entry(chunk, &msg->chunks, frag_list) chunk 1099 net/sctp/sm_sideeffect.c sctp_outq_tail(&asoc->outqueue, chunk, gfp); chunk 1194 net/sctp/sm_sideeffect.c event_type, subtype.chunk); chunk 1222 net/sctp/sm_sideeffect.c state, subtype.chunk); chunk 1227 net/sctp/sm_sideeffect.c state, event_type, subtype.chunk); chunk 1232 net/sctp/sm_sideeffect.c state, event_type, subtype.chunk); chunk 1238 net/sctp/sm_sideeffect.c status, state, event_type, subtype.chunk); chunk 1263 net/sctp/sm_sideeffect.c struct sctp_chunk *chunk = NULL, *new_obj; chunk 1275 net/sctp/sm_sideeffect.c chunk = event_arg; chunk 1338 net/sctp/sm_sideeffect.c cmd->obj.chunk); chunk 1354 net/sctp/sm_sideeffect.c cmd->obj.chunk); chunk 1359 net/sctp/sm_sideeffect.c new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, chunk 1376 net/sctp/sm_sideeffect.c error = sctp_cmd_process_init(commands, asoc, chunk, chunk 1382 net/sctp/sm_sideeffect.c new_obj = sctp_make_cookie_echo(asoc, chunk); chunk 1384 net/sctp/sm_sideeffect.c if (cmd->obj.chunk) chunk 1385 net/sctp/sm_sideeffect.c sctp_chunk_free(cmd->obj.chunk); chunk 1395 net/sctp/sm_sideeffect.c if (cmd->obj.chunk) chunk 1397 net/sctp/sm_sideeffect.c SCTP_CHUNK(cmd->obj.chunk)); chunk 1433 net/sctp/sm_sideeffect.c new_obj = sctp_make_shutdown(asoc, chunk); chunk 1445 net/sctp/sm_sideeffect.c __func__, cmd->obj.chunk, &asoc->ulpq); chunk 1448 net/sctp/sm_sideeffect.c cmd->obj.chunk, chunk 1468 net/sctp/sm_sideeffect.c sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp); chunk 1498 net/sctp/sm_sideeffect.c chunk); chunk 1510 net/sctp/sm_sideeffect.c sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); chunk 1552 net/sctp/sm_sideeffect.c chunk = cmd->obj.chunk; chunk 1556 net/sctp/sm_sideeffect.c chunk->transport = t; chunk 1608 net/sctp/sm_sideeffect.c subtype, chunk, cmd->obj.u32); chunk 1653 net/sctp/sm_sideeffect.c sctp_cmd_transport_on(commands, asoc, t, chunk); chunk 1680 net/sctp/sm_sideeffect.c chunk->subh.sack_hdr = &sackh; chunk 1682 net/sctp/sm_sideeffect.c SCTP_CHUNK(chunk)); chunk 1690 net/sctp/sm_sideeffect.c chunk->pdiscard = 1; chunk 1708 net/sctp/sm_sideeffect.c cmd->obj.chunk, chunk 1713 net/sctp/sm_sideeffect.c sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); chunk 1717 net/sctp/sm_sideeffect.c sctp_cmd_process_operr(commands, asoc, chunk); chunk 1785 net/sctp/sm_sideeffect.c sctp_chunk_free(cmd->obj.chunk); chunk 1796 net/sctp/sm_sideeffect.c if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { chunk 1797 net/sctp/sm_sideeffect.c if (chunk->end_of_packet || chunk->singleton) chunk 54 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 57 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 62 net/sctp/sm_statefuns.c const struct sctp_chunk *chunk); chunk 66 net/sctp/sm_statefuns.c const struct sctp_chunk *chunk, chunk 90 net/sctp/sm_statefuns.c static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); chunk 142 net/sctp/sm_statefuns.c struct sctp_chunk *chunk); chunk 159 net/sctp/sm_statefuns.c static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, chunk 162 net/sctp/sm_statefuns.c __u16 chunk_length = ntohs(chunk->chunk_hdr->length); chunk 165 net/sctp/sm_statefuns.c if (unlikely(chunk->pdiscard)) chunk 174 net/sctp/sm_statefuns.c static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) chunk 178 net/sctp/sm_statefuns.c sctp_walk_errors(err, chunk->chunk_hdr); chunk 180 net/sctp/sm_statefuns.c return (void *)err == (void *)chunk->chunk_end; chunk 224 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 227 net/sctp/sm_statefuns.c if (!sctp_vtag_verify_either(chunk, asoc)) chunk 235 net/sctp/sm_statefuns.c if (!chunk->singleton) chunk 239 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 310 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg, *repl, *err_chunk; chunk 318 net/sctp/sm_statefuns.c chunk->skb)) chunk 330 net/sctp/sm_statefuns.c if (!chunk->singleton) chunk 344 net/sctp/sm_statefuns.c if (chunk->sctp_hdr->vtag != 0) chunk 352 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) chunk 366 net/sctp/sm_statefuns.c if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, chunk 367 net/sctp/sm_statefuns.c (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, chunk 396 net/sctp/sm_statefuns.c chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; chunk 399 net/sctp/sm_statefuns.c chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); chunk 401 net/sctp/sm_statefuns.c new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); chunk 406 net/sctp/sm_statefuns.c sctp_scope(sctp_source(chunk)), chunk 411 net/sctp/sm_statefuns.c if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), chunk 412 net/sctp/sm_statefuns.c (struct sctp_init_chunk *)chunk->chunk_hdr, chunk 426 net/sctp/sm_statefuns.c repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); chunk 510 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 514 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 521 net/sctp/sm_statefuns.c if (!chunk->singleton) chunk 525 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_initack_chunk))) chunk 529 net/sctp/sm_statefuns.c chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; chunk 533 net/sctp/sm_statefuns.c if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, chunk 534 net/sctp/sm_statefuns.c (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, chunk 577 net/sctp/sm_statefuns.c asoc, chunk->transport); chunk 583 net/sctp/sm_statefuns.c chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); chunk 585 net/sctp/sm_statefuns.c initchunk = (struct sctp_init_chunk *)chunk->chunk_hdr; chunk 621 net/sctp/sm_statefuns.c static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk, chunk 626 net/sctp/sm_statefuns.c if (!chunk->auth_chunk) chunk 641 net/sctp/sm_statefuns.c auth.skb = chunk->auth_chunk; chunk 642 net/sctp/sm_statefuns.c auth.asoc = chunk->asoc; chunk 643 net/sctp/sm_statefuns.c auth.sctp_hdr = chunk->sctp_hdr; chunk 645 net/sctp/sm_statefuns.c skb_push(chunk->auth_chunk, chunk 647 net/sctp/sm_statefuns.c skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); chunk 648 net/sctp/sm_statefuns.c auth.transport = chunk->transport; chunk 694 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 713 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 728 net/sctp/sm_statefuns.c chunk->subh.cookie_hdr = chunk 729 net/sctp/sm_statefuns.c (struct sctp_signed_cookie *)chunk->skb->data; chunk 730 net/sctp/sm_statefuns.c if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - chunk 738 net/sctp/sm_statefuns.c new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, chunk 756 net/sctp/sm_statefuns.c sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, chunk 775 net/sctp/sm_statefuns.c peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; chunk 777 net/sctp/sm_statefuns.c if (!sctp_process_init(new_asoc, chunk, chunk 778 net/sctp/sm_statefuns.c &chunk->subh.cookie_hdr->c.peer_addr, chunk 790 net/sctp/sm_statefuns.c if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) { chunk 795 net/sctp/sm_statefuns.c repl = sctp_make_cookie_ack(new_asoc, chunk); chunk 905 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 908 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 914 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 927 net/sctp/sm_statefuns.c security_inet_conn_established(ep->base.sk, chunk->skb); chunk 1122 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 1126 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 1130 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, chunk 1139 net/sctp/sm_statefuns.c chunk->subh.hb_hdr = (struct sctp_heartbeathdr *)chunk->skb->data; chunk 1140 net/sctp/sm_statefuns.c param_hdr = (struct sctp_paramhdr *)chunk->subh.hb_hdr; chunk 1141 net/sctp/sm_statefuns.c paylen = ntohs(chunk->chunk_hdr->length) - sizeof(struct sctp_chunkhdr); chunk 1147 net/sctp/sm_statefuns.c if (!pskb_pull(chunk->skb, paylen)) chunk 1150 net/sctp/sm_statefuns.c reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen); chunk 1197 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 1202 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 1206 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr) + chunk 1211 net/sctp/sm_statefuns.c hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; chunk 1459 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg, *repl, *err_chunk; chunk 1468 net/sctp/sm_statefuns.c chunk->skb)) chunk 1480 net/sctp/sm_statefuns.c if (!chunk->singleton) chunk 1486 net/sctp/sm_statefuns.c if (chunk->sctp_hdr->vtag != 0) chunk 1493 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) chunk 1497 net/sctp/sm_statefuns.c chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; chunk 1500 net/sctp/sm_statefuns.c chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); chunk 1504 net/sctp/sm_statefuns.c if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, chunk 1505 net/sctp/sm_statefuns.c (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, chunk 1539 net/sctp/sm_statefuns.c new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); chunk 1544 net/sctp/sm_statefuns.c sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) chunk 1551 net/sctp/sm_statefuns.c if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), chunk 1552 net/sctp/sm_statefuns.c (struct sctp_init_chunk *)chunk->chunk_hdr, chunk 1562 net/sctp/sm_statefuns.c if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, chunk 1582 net/sctp/sm_statefuns.c repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); chunk 1774 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 1787 net/sctp/sm_statefuns.c peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; chunk 1789 net/sctp/sm_statefuns.c if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, chunk 1796 net/sctp/sm_statefuns.c if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) chunk 1803 net/sctp/sm_statefuns.c if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) chunk 1814 net/sctp/sm_statefuns.c SCTP_ST_CHUNK(chunk->chunk_hdr->type), chunk 1815 net/sctp/sm_statefuns.c chunk, commands); chunk 1819 net/sctp/sm_statefuns.c err = sctp_make_op_error(asoc, chunk, chunk 1844 net/sctp/sm_statefuns.c repl = sctp_make_cookie_ack(new_asoc, chunk); chunk 1896 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 1906 net/sctp/sm_statefuns.c peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; chunk 1907 net/sctp/sm_statefuns.c if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, chunk 1914 net/sctp/sm_statefuns.c if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) chunk 1924 net/sctp/sm_statefuns.c repl = sctp_make_cookie_ack(new_asoc, chunk); chunk 1977 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 2000 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 2014 net/sctp/sm_statefuns.c if (!sctp_auth_chunk_verify(net, chunk, asoc)) chunk 2064 net/sctp/sm_statefuns.c repl = sctp_make_cookie_ack(asoc, chunk); chunk 2117 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2128 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 2135 net/sctp/sm_statefuns.c chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; chunk 2136 net/sctp/sm_statefuns.c if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - chunk 2145 net/sctp/sm_statefuns.c new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, chunk 2163 net/sctp/sm_statefuns.c sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, chunk 2174 net/sctp/sm_statefuns.c chunk->skb)) { chunk 2189 net/sctp/sm_statefuns.c retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands, chunk 2194 net/sctp/sm_statefuns.c retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands, chunk 2199 net/sctp/sm_statefuns.c retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands, chunk 2204 net/sctp/sm_statefuns.c retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands, chunk 2242 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2244 net/sctp/sm_statefuns.c if (!sctp_vtag_verify_either(chunk, asoc)) chunk 2257 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) chunk 2266 net/sctp/sm_statefuns.c sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) chunk 2269 net/sctp/sm_statefuns.c if (!sctp_err_chunk_valid(chunk)) chunk 2288 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2290 net/sctp/sm_statefuns.c if (!sctp_vtag_verify_either(chunk, asoc)) chunk 2303 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) chunk 2312 net/sctp/sm_statefuns.c sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) chunk 2315 net/sctp/sm_statefuns.c if (!sctp_err_chunk_valid(chunk)) chunk 2370 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2373 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 2379 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk))) chunk 2388 net/sctp/sm_statefuns.c sctp_walk_errors(err, chunk->chunk_hdr) { chunk 2436 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg, *reply; chunk 2450 net/sctp/sm_statefuns.c err = (struct sctp_errhdr *)(chunk->skb->data); chunk 2558 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2560 net/sctp/sm_statefuns.c if (!sctp_vtag_verify_either(chunk, asoc)) chunk 2573 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) chunk 2582 net/sctp/sm_statefuns.c sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) chunk 2585 net/sctp/sm_statefuns.c if (!sctp_err_chunk_valid(chunk)) chunk 2600 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2604 net/sctp/sm_statefuns.c len = ntohs(chunk->chunk_hdr->length); chunk 2606 net/sctp/sm_statefuns.c error = ((struct sctp_errhdr *)chunk->skb->data)->cause; chunk 2631 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2634 net/sctp/sm_statefuns.c if (!sctp_vtag_verify_either(chunk, asoc)) chunk 2647 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) chunk 2651 net/sctp/sm_statefuns.c len = ntohs(chunk->chunk_hdr->length); chunk 2653 net/sctp/sm_statefuns.c error = ((struct sctp_errhdr *)chunk->skb->data)->cause; chunk 2656 net/sctp/sm_statefuns.c chunk->transport); chunk 2761 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2766 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 2770 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk))) chunk 2775 net/sctp/sm_statefuns.c sdh = (struct sctp_shutdownhdr *)chunk->skb->data; chunk 2776 net/sctp/sm_statefuns.c skb_pull(chunk->skb, sizeof(*sdh)); chunk 2777 net/sctp/sm_statefuns.c chunk->subh.shutdown_hdr = sdh; chunk 2828 net/sctp/sm_statefuns.c SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack)); chunk 2850 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2854 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 2858 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk))) chunk 2862 net/sctp/sm_statefuns.c sdh = (struct sctp_shutdownhdr *)chunk->skb->data; chunk 2904 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2908 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 2916 net/sctp/sm_statefuns.c reply = sctp_make_shutdown_ack(asoc, chunk); chunk 2968 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 2972 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 2975 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk))) chunk 2979 net/sctp/sm_statefuns.c cwr = (struct sctp_cwrhdr *)chunk->skb->data; chunk 2980 net/sctp/sm_statefuns.c skb_pull(chunk->skb, sizeof(*cwr)); chunk 3023 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3026 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 3029 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk))) chunk 3033 net/sctp/sm_statefuns.c ecne = (struct sctp_ecnehdr *)chunk->skb->data; chunk 3034 net/sctp/sm_statefuns.c skb_pull(chunk->skb, sizeof(*ecne)); chunk 3081 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3084 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 3090 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream))) chunk 3094 net/sctp/sm_statefuns.c error = sctp_eat_data(asoc, chunk, commands); chunk 3109 net/sctp/sm_statefuns.c return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, chunk 3110 net/sctp/sm_statefuns.c (u8 *)chunk->subh.data_hdr, chunk 3116 net/sctp/sm_statefuns.c if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) chunk 3146 net/sctp/sm_statefuns.c if (chunk->end_of_packet) chunk 3166 net/sctp/sm_statefuns.c if (chunk->end_of_packet) chunk 3171 net/sctp/sm_statefuns.c if (chunk->end_of_packet) chunk 3201 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3204 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 3210 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream))) chunk 3214 net/sctp/sm_statefuns.c error = sctp_eat_data(asoc, chunk, commands); chunk 3225 net/sctp/sm_statefuns.c return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, chunk 3226 net/sctp/sm_statefuns.c (u8 *)chunk->subh.data_hdr, chunk 3240 net/sctp/sm_statefuns.c if (chunk->end_of_packet) { chunk 3292 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3296 net/sctp/sm_statefuns.c trace_sctp_probe(ep, asoc, chunk); chunk 3298 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 3302 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_sack_chunk))) chunk 3307 net/sctp/sm_statefuns.c sackh = sctp_sm_pull_sack(chunk); chunk 3311 net/sctp/sm_statefuns.c chunk->subh.sack_hdr = sackh; chunk 3335 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); chunk 3370 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3373 net/sctp/sm_statefuns.c packet = sctp_ootb_pkt_new(net, asoc, chunk); chunk 3380 net/sctp/sm_statefuns.c abort = sctp_make_abort(asoc, chunk, 0); chunk 3388 net/sctp/sm_statefuns.c packet->vtag = ntohl(chunk->sctp_hdr->vtag); chunk 3419 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3422 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 3426 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk))) chunk 3429 net/sctp/sm_statefuns.c sctp_walk_errors(err, chunk->chunk_hdr); chunk 3430 net/sctp/sm_statefuns.c if ((void *)err != (void *)chunk->chunk_end) chunk 3435 net/sctp/sm_statefuns.c SCTP_CHUNK(chunk)); chunk 3457 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3461 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 3465 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 3479 net/sctp/sm_statefuns.c reply = sctp_make_shutdown_complete(asoc, chunk); chunk 3539 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3540 net/sctp/sm_statefuns.c struct sk_buff *skb = chunk->skb; chunk 3549 net/sctp/sm_statefuns.c ch = (struct sctp_chunkhdr *)chunk->chunk_hdr; chunk 3634 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3637 net/sctp/sm_statefuns.c packet = sctp_ootb_pkt_new(net, asoc, chunk); chunk 3644 net/sctp/sm_statefuns.c shut = sctp_make_shutdown_complete(asoc, chunk); chunk 3652 net/sctp/sm_statefuns.c packet->vtag = ntohl(chunk->sctp_hdr->vtag); chunk 3667 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 3695 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3698 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 3722 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3726 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 3739 net/sctp/sm_statefuns.c (!net->sctp.addip_noauth && !chunk->auth)) chunk 3744 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk))) chunk 3748 net/sctp/sm_statefuns.c hdr = (struct sctp_addiphdr *)chunk->skb->data; chunk 3752 net/sctp/sm_statefuns.c if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) chunk 3764 net/sctp/sm_statefuns.c if (!chunk->has_asconf) chunk 3776 net/sctp/sm_statefuns.c asoc, chunk); chunk 3817 net/sctp/sm_statefuns.c asconf_ack->dest = chunk->source; chunk 3979 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 3983 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 3990 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(*hdr))) chunk 3994 net/sctp/sm_statefuns.c if (!sctp_verify_reconf(asoc, chunk, &err_param)) chunk 3998 net/sctp/sm_statefuns.c hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; chunk 4056 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4060 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 4070 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream))) chunk 4074 net/sctp/sm_statefuns.c fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; chunk 4075 net/sctp/sm_statefuns.c chunk->subh.fwdtsn_hdr = fwdtsn_hdr; chunk 4076 net/sctp/sm_statefuns.c len = ntohs(chunk->chunk_hdr->length); chunk 4078 net/sctp/sm_statefuns.c skb_pull(chunk->skb, len); chunk 4089 net/sctp/sm_statefuns.c if (!asoc->stream.si->validate_ftsn(chunk)) chunk 4095 net/sctp/sm_statefuns.c SCTP_CHUNK(chunk)); chunk 4123 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4127 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 4137 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream))) chunk 4141 net/sctp/sm_statefuns.c fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; chunk 4142 net/sctp/sm_statefuns.c chunk->subh.fwdtsn_hdr = fwdtsn_hdr; chunk 4143 net/sctp/sm_statefuns.c len = ntohs(chunk->chunk_hdr->length); chunk 4145 net/sctp/sm_statefuns.c skb_pull(chunk->skb, len); chunk 4156 net/sctp/sm_statefuns.c if (!asoc->stream.si->validate_ftsn(chunk)) chunk 4162 net/sctp/sm_statefuns.c SCTP_CHUNK(chunk)); chunk 4204 net/sctp/sm_statefuns.c struct sctp_chunk *chunk) chunk 4214 net/sctp/sm_statefuns.c auth_hdr = (struct sctp_authhdr *)chunk->skb->data; chunk 4215 net/sctp/sm_statefuns.c chunk->subh.auth_hdr = auth_hdr; chunk 4216 net/sctp/sm_statefuns.c skb_pull(chunk->skb, sizeof(*auth_hdr)); chunk 4237 net/sctp/sm_statefuns.c sig_len = ntohs(chunk->chunk_hdr->length) - chunk 4251 net/sctp/sm_statefuns.c skb_pull(chunk->skb, sig_len); chunk 4259 net/sctp/sm_statefuns.c sctp_auth_calculate_hmac(asoc, chunk->skb, chunk 4260 net/sctp/sm_statefuns.c (struct sctp_auth_chunk *)chunk->chunk_hdr, chunk 4270 net/sctp/sm_statefuns.c chunk->auth = 1; chunk 4283 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4292 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) { chunk 4299 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) chunk 4303 net/sctp/sm_statefuns.c auth_hdr = (struct sctp_authhdr *)chunk->skb->data; chunk 4304 net/sctp/sm_statefuns.c error = sctp_sf_authenticate(asoc, chunk); chunk 4310 net/sctp/sm_statefuns.c err_chunk = sctp_make_op_error(asoc, chunk, chunk 4384 net/sctp/sm_statefuns.c pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk); chunk 4397 net/sctp/sm_statefuns.c switch (type.chunk & SCTP_CID_ACTION_MASK) { chunk 4460 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4466 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 4470 net/sctp/sm_statefuns.c pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk); chunk 4527 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4530 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) chunk 4550 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4568 net/sctp/sm_statefuns.c abort = sctp_make_abort_violation(asoc, chunk, payload, paylen); chunk 4574 net/sctp/sm_statefuns.c if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK && chunk 4578 net/sctp/sm_statefuns.c initack = (struct sctp_initack_chunk *)chunk->chunk_hdr; chunk 4579 net/sctp/sm_statefuns.c if (!sctp_chunk_length_valid(chunk, sizeof(*initack))) chunk 4608 net/sctp/sm_statefuns.c packet = sctp_ootb_pkt_new(net, asoc, chunk); chunk 4614 net/sctp/sm_statefuns.c packet->vtag = ntohl(chunk->sctp_hdr->vtag); chunk 4687 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 4693 net/sctp/sm_statefuns.c abort = sctp_make_violation_paramlen(asoc, chunk, param); chunk 5375 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 5377 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); chunk 5380 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); chunk 5392 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 5394 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); chunk 5537 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = arg; chunk 5547 net/sctp/sm_statefuns.c if (chunk) { chunk 5548 net/sctp/sm_statefuns.c if (!sctp_vtag_verify(chunk, asoc)) chunk 5554 net/sctp/sm_statefuns.c chunk, sizeof(struct sctp_shutdown_chunk))) chunk 5562 net/sctp/sm_statefuns.c reply = sctp_make_shutdown_ack(asoc, chunk); chunk 5945 net/sctp/sm_statefuns.c struct sctp_chunk *chunk = asoc->addip_last_asconf; chunk 5946 net/sctp/sm_statefuns.c struct sctp_transport *transport = chunk->transport; chunk 5959 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); chunk 6134 net/sctp/sm_statefuns.c pr_debug("%s: timer %d ignored\n", __func__, type.chunk); chunk 6144 net/sctp/sm_statefuns.c static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) chunk 6154 net/sctp/sm_statefuns.c sack = (struct sctp_sackhdr *) chunk->skb->data; chunk 6160 net/sctp/sm_statefuns.c if (len > chunk->skb->len) chunk 6163 net/sctp/sm_statefuns.c skb_pull(chunk->skb, len); chunk 6175 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 6181 net/sctp/sm_statefuns.c packet = sctp_ootb_pkt_new(net, asoc, chunk); chunk 6187 net/sctp/sm_statefuns.c abort = sctp_make_abort(asoc, chunk, paylen); chunk 6195 net/sctp/sm_statefuns.c packet->vtag = ntohl(chunk->sctp_hdr->vtag); chunk 6216 net/sctp/sm_statefuns.c const struct sctp_chunk *chunk) chunk 6224 net/sctp/sm_statefuns.c sport = ntohs(chunk->sctp_hdr->dest); chunk 6225 net/sctp/sm_statefuns.c dport = ntohs(chunk->sctp_hdr->source); chunk 6234 net/sctp/sm_statefuns.c switch (chunk->chunk_hdr->type) { chunk 6239 net/sctp/sm_statefuns.c initack = (struct sctp_initack_chunk *)chunk->chunk_hdr; chunk 6251 net/sctp/sm_statefuns.c switch (chunk->chunk_hdr->type) { chunk 6256 net/sctp/sm_statefuns.c init = (struct sctp_init_chunk *)chunk->chunk_hdr; chunk 6261 net/sctp/sm_statefuns.c vtag = ntohl(chunk->sctp_hdr->vtag); chunk 6267 net/sctp/sm_statefuns.c transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC); chunk 6274 net/sctp/sm_statefuns.c sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, chunk 6297 net/sctp/sm_statefuns.c const struct sctp_chunk *chunk, chunk 6304 net/sctp/sm_statefuns.c packet = sctp_ootb_pkt_new(net, asoc, chunk); chunk 6309 net/sctp/sm_statefuns.c cookie = chunk->subh.cookie_hdr; chunk 6326 net/sctp/sm_statefuns.c struct sctp_chunk *chunk, chunk 6339 net/sctp/sm_statefuns.c data_hdr = (struct sctp_datahdr *)chunk->skb->data; chunk 6340 net/sctp/sm_statefuns.c chunk->subh.data_hdr = data_hdr; chunk 6341 net/sctp/sm_statefuns.c skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream)); chunk 6358 net/sctp/sm_statefuns.c if (asoc->peer.ecn_capable && !chunk->ecn_ce_done) { chunk 6359 net/sctp/sm_statefuns.c struct sctp_af *af = SCTP_INPUT_CB(chunk->skb)->af; chunk 6360 net/sctp/sm_statefuns.c chunk->ecn_ce_done = 1; chunk 6362 net/sctp/sm_statefuns.c if (af->is_ce(sctp_gso_headskb(chunk->skb))) { chunk 6374 net/sctp/sm_statefuns.c if (chunk->asoc) chunk 6375 net/sctp/sm_statefuns.c chunk->asoc->stats.outofseqtsns++; chunk 6388 net/sctp/sm_statefuns.c datalen = ntohs(chunk->chunk_hdr->length); chunk 6407 net/sctp/sm_statefuns.c if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || chunk 6455 net/sctp/sm_statefuns.c err = sctp_make_abort_no_data(asoc, chunk, tsn); chunk 6473 net/sctp/sm_statefuns.c chunk->data_accepted = 1; chunk 6478 net/sctp/sm_statefuns.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { chunk 6480 net/sctp/sm_statefuns.c if (chunk->asoc) chunk 6481 net/sctp/sm_statefuns.c chunk->asoc->stats.iuodchunks++; chunk 6484 net/sctp/sm_statefuns.c if (chunk->asoc) chunk 6485 net/sctp/sm_statefuns.c chunk->asoc->stats.iodchunks++; chunk 6500 net/sctp/sm_statefuns.c err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, chunk 6517 net/sctp/sm_statefuns.c if (!asoc->stream.si->validate_data(chunk)) chunk 6524 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); chunk 73 net/sctp/sm_statetable.c return sctp_chunk_event_lookup(net, event_subtype.chunk, state); chunk 87 net/sctp/socket.c struct sctp_chunk *chunk); chunk 122 net/sctp/socket.c static inline void sctp_set_owner_w(struct sctp_chunk *chunk) chunk 124 net/sctp/socket.c struct sctp_association *asoc = chunk->asoc; chunk 130 net/sctp/socket.c if (chunk->shkey) chunk 131 net/sctp/socket.c sctp_auth_shkey_hold(chunk->shkey); chunk 133 net/sctp/socket.c skb_set_owner_w(chunk->skb, sk); chunk 135 net/sctp/socket.c chunk->skb->destructor = sctp_wfree; chunk 137 net/sctp/socket.c skb_shinfo(chunk->skb)->destructor_arg = chunk; chunk 140 net/sctp/socket.c asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk); chunk 141 net/sctp/socket.c sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk); chunk 142 net/sctp/socket.c sk_mem_charge(sk, chunk->skb->truesize); chunk 145 net/sctp/socket.c static void sctp_clear_owner_w(struct sctp_chunk *chunk) chunk 147 net/sctp/socket.c skb_orphan(chunk->skb); chunk 152 net/sctp/socket.c msg = chunk->msg; \ chunk 170 net/sctp/socket.c struct sctp_chunk *chunk, *c; chunk 174 net/sctp/socket.c list_for_each_entry(chunk, &t->transmitted, transmitted_list) chunk 177 net/sctp/socket.c list_for_each_entry(chunk, &q->retransmit, transmitted_list) chunk 180 net/sctp/socket.c list_for_each_entry(chunk, &q->sacked, transmitted_list) chunk 183 net/sctp/socket.c list_for_each_entry(chunk, &q->abandoned, transmitted_list) chunk 186 net/sctp/socket.c list_for_each_entry(chunk, &q->out_chunk_list, list) chunk 452 net/sctp/socket.c struct sctp_chunk *chunk) chunk 461 net/sctp/socket.c list_add_tail(&chunk->list, &asoc->addip_chunk_list); chunk 466 net/sctp/socket.c sctp_chunk_hold(chunk); chunk 467 net/sctp/socket.c retval = sctp_primitive_ASCONF(net, asoc, chunk); chunk 469 net/sctp/socket.c sctp_chunk_free(chunk); chunk 471 net/sctp/socket.c asoc->addip_last_asconf = chunk; chunk 547 net/sctp/socket.c struct sctp_chunk *chunk; chunk 604 net/sctp/socket.c chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, chunk 606 net/sctp/socket.c if (!chunk) { chunk 640 net/sctp/socket.c retval = sctp_send_asconf(asoc, chunk); chunk 750 net/sctp/socket.c struct sctp_chunk *chunk; chunk 759 net/sctp/socket.c chunk = NULL; chunk 851 net/sctp/socket.c chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, chunk 853 net/sctp/socket.c if (!chunk) { chunk 886 net/sctp/socket.c retval = sctp_send_asconf(asoc, chunk); chunk 1525 net/sctp/socket.c struct sctp_chunk *chunk; chunk 1527 net/sctp/socket.c chunk = sctp_make_abort_user(asoc, NULL, 0); chunk 1528 net/sctp/socket.c sctp_primitive_ABORT(net, asoc, chunk); chunk 1776 net/sctp/socket.c struct sctp_chunk *chunk; chunk 1778 net/sctp/socket.c chunk = sctp_make_abort_user(asoc, msg, msg_len); chunk 1779 net/sctp/socket.c if (!chunk) chunk 1783 net/sctp/socket.c sctp_primitive_ABORT(net, asoc, chunk); chunk 1802 net/sctp/socket.c struct sctp_chunk *chunk; chunk 1868 net/sctp/socket.c list_for_each_entry(chunk, &datamsg->chunks, frag_list) { chunk 1869 net/sctp/socket.c sctp_chunk_hold(chunk); chunk 1870 net/sctp/socket.c sctp_set_owner_w(chunk); chunk 1871 net/sctp/socket.c chunk->transport = transport; chunk 2133 net/sctp/socket.c if (event->chunk && event->chunk->head_skb) chunk 2134 net/sctp/socket.c head_skb = event->chunk->head_skb; chunk 3350 net/sctp/socket.c struct sctp_chunk *chunk; chunk 3396 net/sctp/socket.c chunk = sctp_make_asconf_set_prim(asoc, chunk 3398 net/sctp/socket.c if (!chunk) chunk 3401 net/sctp/socket.c err = sctp_send_asconf(asoc, chunk); chunk 8983 net/sctp/socket.c struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; chunk 8984 net/sctp/socket.c struct sctp_association *asoc = chunk->asoc; chunk 8993 net/sctp/socket.c if (chunk->shkey) { chunk 8994 net/sctp/socket.c struct sctp_shared_key *shkey = chunk->shkey; chunk 9010 net/sctp/socket.c sctp_auth_shkey_release(chunk->shkey); chunk 223 net/sctp/stream.c struct sctp_chunk *chunk) chunk 228 net/sctp/stream.c retval = sctp_primitive_RECONF(net, asoc, chunk); chunk 230 net/sctp/stream.c sctp_chunk_free(chunk); chunk 264 net/sctp/stream.c struct sctp_chunk *chunk; chunk 328 net/sctp/stream.c chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); chunk 332 net/sctp/stream.c if (!chunk) { chunk 347 net/sctp/stream.c asoc->strreset_chunk = chunk; chunk 350 net/sctp/stream.c retval = sctp_send_reconf(asoc, chunk); chunk 377 net/sctp/stream.c struct sctp_chunk *chunk = NULL; chunk 391 net/sctp/stream.c chunk = sctp_make_strreset_tsnreq(asoc); chunk 392 net/sctp/stream.c if (!chunk) chunk 399 net/sctp/stream.c asoc->strreset_chunk = chunk; chunk 402 net/sctp/stream.c retval = sctp_send_reconf(asoc, chunk); chunk 422 net/sctp/stream.c struct sctp_chunk *chunk = NULL; chunk 454 net/sctp/stream.c chunk = sctp_make_strreset_addstrm(asoc, out, in); chunk 455 net/sctp/stream.c if (!chunk) { chunk 460 net/sctp/stream.c asoc->strreset_chunk = chunk; chunk 463 net/sctp/stream.c retval = sctp_send_reconf(asoc, chunk); chunk 480 net/sctp/stream.c struct sctp_chunk *chunk = asoc->strreset_chunk; chunk 484 net/sctp/stream.c if (!chunk) chunk 487 net/sctp/stream.c hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; chunk 607 net/sctp/stream.c struct sctp_chunk *chunk = NULL; chunk 649 net/sctp/stream.c chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0); chunk 650 net/sctp/stream.c if (!chunk) chunk 661 net/sctp/stream.c asoc->strreset_chunk = chunk; chunk 670 net/sctp/stream.c if (!chunk) chunk 671 net/sctp/stream.c chunk = sctp_make_strreset_resp(asoc, result, request_seq); chunk 673 net/sctp/stream.c return chunk; chunk 851 net/sctp/stream.c struct sctp_chunk *chunk = NULL; chunk 887 net/sctp/stream.c chunk = sctp_make_strreset_addstrm(asoc, out, 0); chunk 888 net/sctp/stream.c if (!chunk) chunk 891 net/sctp/stream.c asoc->strreset_chunk = chunk; chunk 902 net/sctp/stream.c if (!chunk) chunk 903 net/sctp/stream.c chunk = sctp_make_strreset_resp(asoc, result, request_seq); chunk 905 net/sctp/stream.c return chunk; chunk 48 net/sctp/stream_interleave.c static void sctp_chunk_assign_mid(struct sctp_chunk *chunk) chunk 55 net/sctp/stream_interleave.c if (chunk->has_mid) chunk 58 net/sctp/stream_interleave.c sid = sctp_chunk_stream_no(chunk); chunk 59 net/sctp/stream_interleave.c stream = &chunk->asoc->stream; chunk 61 net/sctp/stream_interleave.c list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { chunk 87 net/sctp/stream_interleave.c static bool sctp_validate_data(struct sctp_chunk *chunk) chunk 92 net/sctp/stream_interleave.c if (chunk->chunk_hdr->type != SCTP_CID_DATA) chunk 95 net/sctp/stream_interleave.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) chunk 98 net/sctp/stream_interleave.c stream = &chunk->asoc->stream; chunk 99 net/sctp/stream_interleave.c sid = sctp_chunk_stream_no(chunk); chunk 100 net/sctp/stream_interleave.c ssn = ntohs(chunk->subh.data_hdr->ssn); chunk 105 net/sctp/stream_interleave.c static bool sctp_validate_idata(struct sctp_chunk *chunk) chunk 111 net/sctp/stream_interleave.c if (chunk->chunk_hdr->type != SCTP_CID_I_DATA) chunk 114 net/sctp/stream_interleave.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) chunk 117 net/sctp/stream_interleave.c stream = &chunk->asoc->stream; chunk 118 net/sctp/stream_interleave.c sid = sctp_chunk_stream_no(chunk); chunk 119 net/sctp/stream_interleave.c mid = ntohl(chunk->subh.idata_hdr->mid); chunk 829 net/sctp/stream_interleave.c struct sctp_chunk *chunk, gfp_t gfp) chunk 835 net/sctp/stream_interleave.c event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); chunk 839 net/sctp/stream_interleave.c event->mid = ntohl(chunk->subh.idata_hdr->mid); chunk 841 net/sctp/stream_interleave.c event->ppid = chunk->subh.idata_hdr->ppid; chunk 843 net/sctp/stream_interleave.c event->fsn = ntohl(chunk->subh.idata_hdr->fsn); chunk 963 net/sctp/stream_interleave.c static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, chunk 970 net/sctp/stream_interleave.c needed = ntohs(chunk->chunk_hdr->length) - chunk 983 net/sctp/stream_interleave.c if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) chunk 1116 net/sctp/stream_interleave.c struct sctp_chunk *chunk; chunk 1126 net/sctp/stream_interleave.c chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); chunk 1127 net/sctp/stream_interleave.c tsn = ntohl(chunk->subh.data_hdr->tsn); chunk 1131 net/sctp/stream_interleave.c sctp_chunk_free(chunk); chunk 1133 net/sctp/stream_interleave.c __be16 sid = chunk->subh.idata_hdr->stream; chunk 1134 net/sctp/stream_interleave.c __be32 mid = chunk->subh.idata_hdr->mid; chunk 1137 net/sctp/stream_interleave.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) chunk 1166 net/sctp/stream_interleave.c #define _sctp_walk_ifwdtsn(pos, chunk, end) \ chunk 1167 net/sctp/stream_interleave.c for (pos = chunk->subh.ifwdtsn_hdr->skip; \ chunk 1168 net/sctp/stream_interleave.c (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++) chunk 1174 net/sctp/stream_interleave.c static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk) chunk 1179 net/sctp/stream_interleave.c if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN) chunk 1182 net/sctp/stream_interleave.c incnt = chunk->asoc->stream.incnt; chunk 1183 net/sctp/stream_interleave.c sctp_walk_fwdtsn(skip, chunk) chunk 1190 net/sctp/stream_interleave.c static bool sctp_validate_iftsn(struct sctp_chunk *chunk) chunk 1195 net/sctp/stream_interleave.c if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN) chunk 1198 net/sctp/stream_interleave.c incnt = chunk->asoc->stream.incnt; chunk 1199 net/sctp/stream_interleave.c sctp_walk_ifwdtsn(skip, chunk) chunk 1252 net/sctp/stream_interleave.c static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) chunk 1257 net/sctp/stream_interleave.c sctp_walk_fwdtsn(skip, chunk) chunk 1289 net/sctp/stream_interleave.c static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) chunk 1294 net/sctp/stream_interleave.c sctp_walk_ifwdtsn(skip, chunk) chunk 82 net/sctp/stream_sched.c struct sctp_chunk *chunk) chunk 79 net/sctp/ulpevent.c struct sctp_chunk *chunk = event->chunk; chunk 90 net/sctp/ulpevent.c if (chunk && chunk->head_skb && !chunk->head_skb->sk) chunk 91 net/sctp/ulpevent.c chunk->head_skb->sk = asoc->base.sk; chunk 117 net/sctp/ulpevent.c __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) chunk 126 net/sctp/ulpevent.c if (chunk) { chunk 130 net/sctp/ulpevent.c skb = skb_copy_expand(chunk->skb, chunk 145 net/sctp/ulpevent.c ntohs(chunk->chunk_hdr->length) - chunk 356 net/sctp/ulpevent.c struct sctp_chunk *chunk, __u16 flags, chunk 366 net/sctp/ulpevent.c ch = (struct sctp_errhdr *)(chunk->skb->data); chunk 371 net/sctp/ulpevent.c skb_pull(chunk->skb, sizeof(*ch)); chunk 376 net/sctp/ulpevent.c skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp); chunk 379 net/sctp/ulpevent.c skb_pull(chunk->skb, elen); chunk 412 net/sctp/ulpevent.c const struct sctp_association *asoc, struct sctp_chunk *chunk, chunk 420 net/sctp/ulpevent.c int len = ntohs(chunk->chunk_hdr->length); chunk 423 net/sctp/ulpevent.c skb = skb_copy_expand(chunk->skb, chunk 490 net/sctp/ulpevent.c memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); chunk 495 net/sctp/ulpevent.c ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; chunk 618 net/sctp/ulpevent.c struct sctp_chunk *chunk, chunk 622 net/sctp/ulpevent.c struct sk_buff *skb = chunk->skb; chunk 637 net/sctp/ulpevent.c datalen = ntohs(chunk->chunk_hdr->length); chunk 643 net/sctp/ulpevent.c skb = skb_clone(chunk->skb, gfp); chunk 651 net/sctp/ulpevent.c ntohl(chunk->subh.data_hdr->tsn), chunk 652 net/sctp/ulpevent.c chunk->transport)) chunk 670 net/sctp/ulpevent.c skb_trim(skb, chunk->chunk_end - padding - skb->data); chunk 684 net/sctp/ulpevent.c sctp_chunk_hold(chunk); chunk 685 net/sctp/ulpevent.c event->chunk = chunk; chunk 689 net/sctp/ulpevent.c event->stream = ntohs(chunk->subh.data_hdr->stream); chunk 690 net/sctp/ulpevent.c if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { chunk 694 net/sctp/ulpevent.c event->tsn = ntohl(chunk->subh.data_hdr->tsn); chunk 695 net/sctp/ulpevent.c event->msg_flags |= chunk->chunk_hdr->flags; chunk 1075 net/sctp/ulpevent.c sctp_chunk_put(event->chunk); chunk 1098 net/sctp/ulpevent.c sctp_chunk_put(event->chunk); chunk 85 net/sctp/ulpqueue.c int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, chunk 93 net/sctp/ulpqueue.c event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); chunk 97 net/sctp/ulpqueue.c event->ssn = ntohs(chunk->subh.data_hdr->ssn); chunk 98 net/sctp/ulpqueue.c event->ppid = chunk->subh.data_hdr->ppid; chunk 1076 net/sctp/ulpqueue.c void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, chunk 1083 net/sctp/ulpqueue.c needed = ntohs(chunk->chunk_hdr->length) - chunk 1092 net/sctp/ulpqueue.c if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) && chunk 1094 net/sctp/ulpqueue.c int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); chunk 291 net/smc/smc_rx.c int readable, chunk; chunk 395 net/smc/smc_rx.c for (chunk = 0; chunk < 2; chunk++) { chunk 143 net/smc/smc_tx.c int rc, chunk; chunk 191 net/smc/smc_tx.c for (chunk = 0; chunk < 2; chunk++) { chunk 172 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) chunk 182 net/sunrpc/xprtrdma/svc_rdma_rw.c info->wi_nsegs = be32_to_cpup(++chunk); chunk 183 net/sunrpc/xprtrdma/svc_rdma_rw.c info->wi_segs = ++chunk; chunk 49 net/tls/tls_sw.c int i, chunk = start - offset; chunk 56 net/tls/tls_sw.c if (chunk > 0) { chunk 57 net/tls/tls_sw.c if (chunk > len) chunk 58 net/tls/tls_sw.c chunk = len; chunk 60 net/tls/tls_sw.c len -= chunk; chunk 63 net/tls/tls_sw.c offset += chunk; chunk 72 net/tls/tls_sw.c chunk = end - offset; chunk 73 net/tls/tls_sw.c if (chunk > 0) { chunk 74 net/tls/tls_sw.c if (chunk > len) chunk 75 net/tls/tls_sw.c chunk = len; chunk 77 net/tls/tls_sw.c len -= chunk; chunk 80 net/tls/tls_sw.c offset += chunk; chunk 92 net/tls/tls_sw.c chunk = end - offset; chunk 93 net/tls/tls_sw.c if (chunk > 0) { chunk 94 net/tls/tls_sw.c if (chunk > len) chunk 95 net/tls/tls_sw.c chunk = len; chunk 96 net/tls/tls_sw.c ret = __skb_nsg(frag_iter, offset - start, chunk, chunk 101 net/tls/tls_sw.c len -= chunk; chunk 104 net/tls/tls_sw.c offset += chunk; chunk 1393 net/tls/tls_sw.c int *chunk, bool *zc, bool async) chunk 1494 net/tls/tls_sw.c *chunk = 0; chunk 1496 net/tls/tls_sw.c &pages, chunk, &sgout[1], chunk 1509 net/tls/tls_sw.c *chunk = data_len; chunk 1528 net/tls/tls_sw.c struct iov_iter *dest, int *chunk, bool *zc, chunk 1546 net/tls/tls_sw.c err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, chunk 1580 net/tls/tls_sw.c int chunk; chunk 1582 net/tls/tls_sw.c return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false); chunk 1653 net/tls/tls_sw.c int chunk = min_t(unsigned int, rxm->full_len - skip, len); chunk 1678 net/tls/tls_sw.c msg, chunk); chunk 1683 net/tls/tls_sw.c len = len - chunk; chunk 1684 net/tls/tls_sw.c copied = copied + chunk; chunk 1688 net/tls/tls_sw.c rxm->offset = rxm->offset + chunk; chunk 1689 net/tls/tls_sw.c rxm->full_len = rxm->full_len - chunk; chunk 1772 net/tls/tls_sw.c int chunk = 0; chunk 1813 net/tls/tls_sw.c &chunk, &zc, async_capable); chunk 1859 net/tls/tls_sw.c chunk = len; chunk 1861 net/tls/tls_sw.c chunk = rxm->full_len; chunk 1865 net/tls/tls_sw.c msg, chunk); chunk 1870 net/tls/tls_sw.c rxm->offset = rxm->offset + chunk; chunk 1871 net/tls/tls_sw.c rxm->full_len = rxm->full_len - chunk; chunk 1876 net/tls/tls_sw.c if (chunk > len) chunk 1877 net/tls/tls_sw.c chunk = len; chunk 1879 net/tls/tls_sw.c decrypted += chunk; chunk 1880 net/tls/tls_sw.c len -= chunk; chunk 1888 net/tls/tls_sw.c if (tls_sw_advance_skb(sk, skb, chunk)) { chunk 1961 net/tls/tls_sw.c int chunk; chunk 1973 net/tls/tls_sw.c err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); chunk 1989 net/tls/tls_sw.c chunk = min_t(unsigned int, rxm->full_len, len); chunk 1990 net/tls/tls_sw.c copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); chunk 2245 net/unix/af_unix.c int chunk; chunk 2326 net/unix/af_unix.c chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); chunk 2328 net/unix/af_unix.c chunk = state->recv_actor(skb, skip, chunk, state); chunk 2332 net/unix/af_unix.c if (chunk < 0) { chunk 2337 net/unix/af_unix.c copied += chunk; chunk 2338 net/unix/af_unix.c size -= chunk; chunk 2354 net/unix/af_unix.c UNIXCB(skb).consumed += chunk; chunk 2356 net/unix/af_unix.c sk_peek_offset_bwd(sk, chunk); chunk 2375 net/unix/af_unix.c sk_peek_offset_fwd(sk, chunk); chunk 2402 net/unix/af_unix.c int skip, int chunk, chunk 2408 net/unix/af_unix.c state->msg, chunk); chunk 2409 net/unix/af_unix.c return ret ?: chunk; chunk 2427 net/unix/af_unix.c int skip, int chunk, chunk 2432 net/unix/af_unix.c state->pipe, chunk, state->splice_flags); chunk 219 security/apparmor/policy_unpack.c static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk) chunk 230 security/apparmor/policy_unpack.c *chunk = e->pos; chunk 163 security/keys/dh.c size_t chunk = min_t(size_t, zlen, sizeof(tmpbuffer)); chunk 164 security/keys/dh.c memset(tmpbuffer, 0, chunk); chunk 168 security/keys/dh.c chunk); chunk 172 security/keys/dh.c zlen -= chunk; chunk 173 security/keys/dh.c chunk = min_t(size_t, zlen, sizeof(tmpbuffer)); chunk 271 security/keys/keyring.c unsigned long chunk = 0; chunk 273 security/keys/keyring.c int desc_len = index_key->desc_len, n = sizeof(chunk); chunk 296 security/keys/keyring.c chunk <<= 8; chunk 297 security/keys/keyring.c chunk |= *d++; chunk 299 security/keys/keyring.c return chunk; chunk 60 sound/core/sgbuf.c unsigned int i, pages, chunk, maxpages; chunk 93 sound/core/sgbuf.c chunk = pages; chunk 95 sound/core/sgbuf.c if (chunk > maxpages) chunk 96 sound/core/sgbuf.c chunk = maxpages; chunk 97 sound/core/sgbuf.c chunk <<= PAGE_SHIFT; chunk 99 sound/core/sgbuf.c chunk, &tmpb) < 0) { chunk 107 sound/core/sgbuf.c chunk = tmpb.bytes >> PAGE_SHIFT; chunk 108 sound/core/sgbuf.c for (i = 0; i < chunk; i++) { chunk 112 sound/core/sgbuf.c table->addr |= chunk; /* mark head */ chunk 118 sound/core/sgbuf.c sgbuf->pages += chunk; chunk 119 sound/core/sgbuf.c pages -= chunk; chunk 120 sound/core/sgbuf.c if (chunk < maxpages) chunk 121 sound/core/sgbuf.c maxpages = chunk; chunk 368 sound/hda/hdac_stream.c int chunk; chunk 378 sound/hda/hdac_stream.c chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); chunk 383 sound/hda/hdac_stream.c if (chunk > remain) chunk 384 sound/hda/hdac_stream.c chunk = remain; chunk 386 sound/hda/hdac_stream.c bdl[2] = cpu_to_le32(chunk); chunk 390 sound/hda/hdac_stream.c size -= chunk; chunk 394 sound/hda/hdac_stream.c ofs += chunk; chunk 1360 sound/pci/es1968.c struct esm_memory *chunk = kmalloc(sizeof(*chunk), GFP_KERNEL); chunk 1361 sound/pci/es1968.c if (chunk == NULL) { chunk 1365 sound/pci/es1968.c chunk->buf = buf->buf; chunk 1366 sound/pci/es1968.c chunk->buf.bytes -= size; chunk 1367 sound/pci/es1968.c chunk->buf.area += size; chunk 1368 sound/pci/es1968.c chunk->buf.addr += size; chunk 1369 sound/pci/es1968.c chunk->empty = 1; chunk 1371 sound/pci/es1968.c list_add(&chunk->list, &buf->list); chunk 1381 sound/pci/es1968.c struct esm_memory *chunk; chunk 1386 sound/pci/es1968.c chunk = list_entry(buf->list.prev, struct esm_memory, list); chunk 1387 sound/pci/es1968.c if (chunk->empty) { chunk 1388 sound/pci/es1968.c chunk->buf.bytes += buf->buf.bytes; chunk 1391 sound/pci/es1968.c buf = chunk; chunk 1395 sound/pci/es1968.c chunk = list_entry(buf->list.next, struct esm_memory, list); chunk 1396 sound/pci/es1968.c if (chunk->empty) { chunk 1397 sound/pci/es1968.c buf->buf.bytes += chunk->buf.bytes; chunk 1398 sound/pci/es1968.c list_del(&chunk->list); chunk 1399 sound/pci/es1968.c kfree(chunk); chunk 1413 sound/pci/es1968.c struct esm_memory *chunk = list_entry(p, struct esm_memory, list); chunk 1415 sound/pci/es1968.c kfree(chunk); chunk 1423 sound/pci/es1968.c struct esm_memory *chunk; chunk 1444 sound/pci/es1968.c chunk = kmalloc(sizeof(*chunk), GFP_KERNEL); chunk 1445 sound/pci/es1968.c if (chunk == NULL) { chunk 1450 sound/pci/es1968.c chunk->buf = chip->dma; chunk 1451 sound/pci/es1968.c chunk->buf.area += ESM_MEM_ALIGN; chunk 1452 sound/pci/es1968.c chunk->buf.addr += ESM_MEM_ALIGN; chunk 1453 sound/pci/es1968.c chunk->buf.bytes -= ESM_MEM_ALIGN; chunk 1454 sound/pci/es1968.c chunk->empty = 1; chunk 1455 sound/pci/es1968.c list_add(&chunk->list, &chip->buf_list); chunk 313 sound/pci/lola/lola_pcm.c int chunk; chunk 323 sound/pci/lola/lola_pcm.c chunk = snd_pcm_sgbuf_get_chunk_size(substream, ofs, size); chunk 324 sound/pci/lola/lola_pcm.c bdl[2] = cpu_to_le32(chunk); chunk 328 sound/pci/lola/lola_pcm.c size -= chunk; chunk 332 sound/pci/lola/lola_pcm.c ofs += chunk; chunk 53 sound/soc/codecs/sigmadsp.c struct sigma_fw_chunk chunk; chunk 59 sound/soc/codecs/sigmadsp.c struct sigma_fw_chunk chunk; chunk 67 sound/soc/codecs/sigmadsp.c struct sigma_fw_chunk chunk; chunk 198 sound/soc/codecs/sigmadsp.c const struct sigma_fw_chunk *chunk, unsigned int length) chunk 210 sound/soc/codecs/sigmadsp.c ctrl_chunk = (const struct sigma_fw_chunk_control *)chunk; chunk 236 sound/soc/codecs/sigmadsp.c ctrl->samplerates = le32_to_cpu(chunk->samplerates); chunk 249 sound/soc/codecs/sigmadsp.c const struct sigma_fw_chunk *chunk, unsigned int length) chunk 257 sound/soc/codecs/sigmadsp.c data_chunk = (struct sigma_fw_chunk_data *)chunk; chunk 267 sound/soc/codecs/sigmadsp.c data->samplerates = le32_to_cpu(chunk->samplerates); chunk 275 sound/soc/codecs/sigmadsp.c const struct sigma_fw_chunk *chunk, unsigned int length) chunk 282 sound/soc/codecs/sigmadsp.c rate_chunk = (const struct sigma_fw_chunk_samplerate *)chunk; chunk 309 sound/soc/codecs/sigmadsp.c struct sigma_fw_chunk *chunk; chunk 317 sound/soc/codecs/sigmadsp.c if (fw->size < sizeof(*chunk) + sizeof(struct sigma_firmware_header)) chunk 322 sound/soc/codecs/sigmadsp.c while (pos < fw->size - sizeof(*chunk)) { chunk 323 sound/soc/codecs/sigmadsp.c chunk = (struct sigma_fw_chunk *)(fw->data + pos); chunk 325 sound/soc/codecs/sigmadsp.c length = le32_to_cpu(chunk->length); chunk 327 sound/soc/codecs/sigmadsp.c if (length > fw->size - pos || length < sizeof(*chunk)) chunk 330 sound/soc/codecs/sigmadsp.c switch (le32_to_cpu(chunk->tag)) { chunk 332 sound/soc/codecs/sigmadsp.c ret = sigma_fw_load_data(sigmadsp, chunk, length); chunk 335 sound/soc/codecs/sigmadsp.c ret = sigma_fw_load_control(sigmadsp, chunk, length); chunk 338 sound/soc/codecs/sigmadsp.c ret = sigma_fw_load_samplerates(sigmadsp, chunk, length); chunk 342 sound/soc/codecs/sigmadsp.c chunk->tag); chunk 39 sound/soc/sof/intel/hda-stream.c int chunk; chunk 51 sound/soc/sof/intel/hda-stream.c chunk = snd_sgbuf_get_chunk_size(dmab, offset, size); chunk 56 sound/soc/sof/intel/hda-stream.c if (chunk > remain) chunk 57 sound/soc/sof/intel/hda-stream.c chunk = remain; chunk 59 sound/soc/sof/intel/hda-stream.c bdl->size = cpu_to_le32(chunk); chunk 61 sound/soc/sof/intel/hda-stream.c size -= chunk; chunk 65 sound/soc/sof/intel/hda-stream.c offset += chunk; chunk 68 sound/soc/sof/intel/hda-stream.c stream->frags, chunk); chunk 43 sound/usb/line6/midi.c unsigned char chunk[LINE6_FALLBACK_MAXPACKETSIZE]; chunk 48 sound/usb/line6/midi.c done = snd_rawmidi_transmit_peek(substream, chunk, req); chunk 53 sound/usb/line6/midi.c line6_midibuf_write(mb, chunk, done); chunk 58 sound/usb/line6/midi.c done = line6_midibuf_read(mb, chunk, chunk 64 sound/usb/line6/midi.c send_midi_async(line6, chunk, done); chunk 15 tools/testing/radix-tree/main.c void __gang_check(unsigned long middle, long down, long up, int chunk, int hop) chunk 30 tools/testing/radix-tree/main.c if (chunk > 0) { chunk 32 tools/testing/radix-tree/main.c chunk, hop); chunk 33 tools/testing/radix-tree/main.c item_full_scan(&tree, middle - down, down + up, chunk); chunk 118 tools/testing/radix-tree/test.c int chunk, int hop) chunk 120 tools/testing/radix-tree/test.c struct item *items[chunk]; chunk 125 tools/testing/radix-tree/test.c int nr_to_find = chunk; chunk 144 tools/testing/radix-tree/test.c unsigned long nr, int chunk) chunk 146 tools/testing/radix-tree/test.c struct item *items[chunk]; chunk 155 tools/testing/radix-tree/test.c chunk))) { chunk 165 tools/testing/radix-tree/test.c if (chunk) chunk 168 tools/testing/radix-tree/test.c this_index, chunk); chunk 25 tools/testing/radix-tree/test.h int chunk, int hop); chunk 27 tools/testing/radix-tree/test.h unsigned long nr, int chunk); chunk 1148 tools/testing/selftests/kvm/lib/x86_64/processor.c const uint32_t *chunk; chunk 1157 tools/testing/selftests/kvm/lib/x86_64/processor.c chunk = (const uint32_t *)("GenuineIntel"); chunk 1158 tools/testing/selftests/kvm/lib/x86_64/processor.c return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);