nsegs 64 block/blk-merge.c unsigned *nsegs) nsegs 71 block/blk-merge.c *nsegs = 1; nsegs 106 block/blk-merge.c struct bio *bio, struct bio_set *bs, unsigned *nsegs) nsegs 108 block/blk-merge.c *nsegs = 0; nsegs 122 block/blk-merge.c unsigned *nsegs) nsegs 124 block/blk-merge.c *nsegs = 1; nsegs 194 block/blk-merge.c const struct bio_vec *bv, unsigned *nsegs, nsegs 203 block/blk-merge.c while (len && *nsegs < max_segs) { nsegs 207 block/blk-merge.c (*nsegs)++; nsegs 247 block/blk-merge.c unsigned nsegs = 0, sectors = 0; nsegs 259 block/blk-merge.c if (nsegs < max_segs && nsegs 262 block/blk-merge.c nsegs++; nsegs 264 block/blk-merge.c } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs, nsegs 273 block/blk-merge.c *segs = nsegs; nsegs 276 block/blk-merge.c *segs = nsegs; nsegs 403 block/blk-merge.c unsigned nsegs = 0, total = 0; nsegs 426 block/blk-merge.c nsegs++; nsegs 429 block/blk-merge.c return nsegs; nsegs 468 block/blk-merge.c int nsegs = 0; nsegs 483 block/blk-merge.c nsegs += __blk_bvec_map_sg(bvec, sglist, sg); nsegs 485 block/blk-merge.c nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); nsegs 495 block/blk-merge.c return nsegs; nsegs 506 block/blk-merge.c int nsegs = 0; nsegs 509 block/blk-merge.c nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg); nsegs 511 block/blk-merge.c nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg); nsegs 513 block/blk-merge.c nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); nsegs 534 block/blk-merge.c nsegs++; nsegs 545 block/blk-merge.c WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); nsegs 547 block/blk-merge.c return nsegs; nsegs 164 crypto/rsa-pkcs1pad.c int nsegs = next ? 2 : 1; nsegs 166 crypto/rsa-pkcs1pad.c sg_init_table(sg, nsegs); nsegs 170 crypto/rsa-pkcs1pad.c sg_chain(sg, nsegs, next); nsegs 3990 drivers/media/dvb-frontends/dib8000.c int guard, rate_num, rate_denum = 1, bits_per_symbol, nsegs; nsegs 4032 drivers/media/dvb-frontends/dib8000.c nsegs = c->layer[i].segment_count; nsegs 4033 drivers/media/dvb-frontends/dib8000.c if (nsegs == 0 || nsegs > 13) nsegs 4076 drivers/media/dvb-frontends/dib8000.c denom += bits_per_symbol * rate_num * fft_div * nsegs * 384; nsegs 4197 drivers/media/dvb-frontends/dib8000.c unsigned nsegs = c->layer[i].segment_count; nsegs 4199 drivers/media/dvb-frontends/dib8000.c if (nsegs == 0 || nsegs > 13) nsegs 4751 drivers/net/ethernet/broadcom/bnxt/bnxt.c u32 nsegs, n, segs = 0, flags; nsegs 4773 drivers/net/ethernet/broadcom/bnxt/bnxt.c nsegs = (MAX_SKB_FRAGS - 1) * n; nsegs 4778 drivers/net/ethernet/broadcom/bnxt/bnxt.c nsegs = (MAX_SKB_FRAGS - n) / n; nsegs 4785 drivers/net/ethernet/broadcom/bnxt/bnxt.c segs = ilog2(nsegs); nsegs 479 drivers/net/ethernet/google/gve/gve_tx.c int nsegs; nsegs 493 drivers/net/ethernet/google/gve/gve_tx.c nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev); nsegs 499 drivers/net/ethernet/google/gve/gve_tx.c tx->req += nsegs; nsegs 216 drivers/nvme/host/tcp.c int nsegs; nsegs 221 drivers/nvme/host/tcp.c nsegs = 1; nsegs 228 drivers/nvme/host/tcp.c nsegs = bio_segments(bio); nsegs 233 drivers/nvme/host/tcp.c iov_iter_bvec(&req->iter, dir, vec, nsegs, size); nsegs 3636 drivers/scsi/bfa/bfa_fcpim.c u16 nsegs, idx, per_seg_ios, num_io_req; nsegs 3670 drivers/scsi/bfa/bfa_fcpim.c nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); nsegs 3673 drivers/scsi/bfa/bfa_fcpim.c bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { nsegs 3693 drivers/scsi/bfa/bfa_fcpim.c u16 idx, nsegs, num_io_req; nsegs 3706 drivers/scsi/bfa/bfa_fcpim.c nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); nsegs 3708 drivers/scsi/bfa/bfa_fcpim.c bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { nsegs 458 drivers/scsi/bfa/bfa_svc.c u16 nsegs, idx, per_seg_fcxp; nsegs 471 drivers/scsi/bfa/bfa_svc.c nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz); nsegs 474 drivers/scsi/bfa/bfa_svc.c bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) { nsegs 5150 drivers/scsi/bfa/bfa_svc.c u16 nsegs, idx, per_seg_sgpg, num_sgpg; nsegs 5160 drivers/scsi/bfa/bfa_svc.c nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); nsegs 5163 drivers/scsi/bfa/bfa_svc.c bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) { nsegs 5188 drivers/scsi/bfa/bfa_svc.c u16 i, idx, nsegs, per_seg_sgpg, num_sgpg; nsegs 5203 drivers/scsi/bfa/bfa_svc.c nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); nsegs 5208 drivers/scsi/bfa/bfa_svc.c bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) { nsegs 5432 drivers/scsi/bfa/bfa_svc.c u16 nsegs, idx, per_seg_uf = 0; nsegs 5434 drivers/scsi/bfa/bfa_svc.c nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ); nsegs 5437 drivers/scsi/bfa/bfa_svc.c bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) { nsegs 563 fs/binfmt_elf_fdpic.c len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs; nsegs 575 fs/binfmt_elf_fdpic.c interp_params->loadmap->nsegs; nsegs 761 fs/binfmt_elf_fdpic.c loadmap->nsegs = nloads; nsegs 786 fs/binfmt_elf_fdpic.c for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { nsegs 811 fs/binfmt_elf_fdpic.c for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { nsegs 832 fs/binfmt_elf_fdpic.c for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { nsegs 866 fs/binfmt_elf_fdpic.c nloads = loadmap->nsegs; nsegs 879 fs/binfmt_elf_fdpic.c loadmap->nsegs--; nsegs 896 fs/binfmt_elf_fdpic.c for (loop = 0; loop < loadmap->nsegs; loop++, seg++) nsegs 873 fs/nilfs2/ioctl.c size_t len, nsegs; nsegs 888 fs/nilfs2/ioctl.c nsegs = argv[4].v_nmembs; nsegs 891 fs/nilfs2/ioctl.c if (nsegs > UINT_MAX / sizeof(__u64)) nsegs 900 fs/nilfs2/ioctl.c nsegs * sizeof(__u64)); nsegs 912 fs/nilfs2/ioctl.c if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment) nsegs 156 fs/nilfs2/sufile.c int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, nsegs 168 fs/nilfs2/sufile.c if (unlikely(nsegs == 0)) nsegs 172 fs/nilfs2/sufile.c for (seg = segnumv; seg < segnumv + nsegs; seg++) { nsegs 198 fs/nilfs2/sufile.c if (++seg >= segnumv + nsegs) nsegs 270 fs/nilfs2/sufile.c __u64 nsegs; nsegs 274 fs/nilfs2/sufile.c nsegs = nilfs_sufile_get_nsegments(sufile); nsegs 276 fs/nilfs2/sufile.c if (start <= end && end < nsegs) { nsegs 649 fs/nilfs2/sufile.c unsigned long nsegs, ncleaned; nsegs 656 fs/nilfs2/sufile.c nsegs = nilfs_sufile_get_nsegments(sufile); nsegs 659 fs/nilfs2/sufile.c if (start > end || start >= nsegs) nsegs 751 fs/nilfs2/sufile.c unsigned long nsegs, nrsvsegs; nsegs 756 fs/nilfs2/sufile.c nsegs = nilfs_sufile_get_nsegments(sufile); nsegs 757 fs/nilfs2/sufile.c if (nsegs == newnsegs) nsegs 762 fs/nilfs2/sufile.c if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) nsegs 769 fs/nilfs2/sufile.c if (newnsegs > nsegs) { nsegs 770 fs/nilfs2/sufile.c sui->ncleansegs += newnsegs - nsegs; nsegs 772 fs/nilfs2/sufile.c ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); nsegs 776 fs/nilfs2/sufile.c sui->ncleansegs -= nsegs - newnsegs; nsegs 821 fs/nilfs2/sufile.c unsigned long nsegs, segusages_per_block; nsegs 828 fs/nilfs2/sufile.c nsegs = min_t(unsigned long, nsegs 831 fs/nilfs2/sufile.c for (i = 0; i < nsegs; i += n, segnum += n) { nsegs 835 fs/nilfs2/sufile.c nsegs - i); nsegs 863 fs/nilfs2/sufile.c ret = nsegs; nsegs 85 fs/nilfs2/sufile.h size_t nsegs, size_t *ndone) nsegs 87 fs/nilfs2/sufile.h return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone, nsegs 102 fs/nilfs2/sufile.h __u64 *segnumv, size_t nsegs, nsegs 105 fs/nilfs2/sufile.h return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone, nsegs 364 fs/nilfs2/the_nilfs.c unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) nsegs 367 fs/nilfs2/the_nilfs.c DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage, nsegs 371 fs/nilfs2/the_nilfs.c void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs) nsegs 373 fs/nilfs2/the_nilfs.c nilfs->ns_nsegments = nsegs; nsegs 374 fs/nilfs2/the_nilfs.c nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs); nsegs 658 fs/nilfs2/the_nilfs.c size_t nsegs) nsegs 668 fs/nilfs2/the_nilfs.c for (sn = segnump; sn < segnump + nsegs; sn++) { nsegs 279 fs/nilfs2/the_nilfs.h unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs); nsegs 280 fs/nilfs2/the_nilfs.h void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs); nsegs 93 include/trace/events/rpcrdma.h int nsegs nsegs 96 include/trace/events/rpcrdma.h TP_ARGS(task, pos, mr, nsegs), nsegs 106 include/trace/events/rpcrdma.h __field(int, nsegs) nsegs 117 include/trace/events/rpcrdma.h __entry->nsegs = nsegs; nsegs 124 include/trace/events/rpcrdma.h __entry->nents < __entry->nsegs ? "more" : "last" nsegs 134 include/trace/events/rpcrdma.h int nsegs \ nsegs 136 include/trace/events/rpcrdma.h TP_ARGS(task, pos, mr, nsegs)) nsegs 142 include/trace/events/rpcrdma.h int nsegs nsegs 145 include/trace/events/rpcrdma.h TP_ARGS(task, mr, nsegs), nsegs 154 include/trace/events/rpcrdma.h __field(int, nsegs) nsegs 164 include/trace/events/rpcrdma.h __entry->nsegs = nsegs; nsegs 171 include/trace/events/rpcrdma.h __entry->nents < __entry->nsegs ? "more" : "last" nsegs 180 include/trace/events/rpcrdma.h int nsegs \ nsegs 182 include/trace/events/rpcrdma.h TP_ARGS(task, mr, nsegs)) nsegs 29 include/uapi/linux/elf-fdpic.h Elf32_Half nsegs; /* number of segments */ nsegs 324 net/sunrpc/xprtrdma/frwr_ops.c int nsegs, bool writing, __be32 xid, nsegs 333 net/sunrpc/xprtrdma/frwr_ops.c if (nsegs > ia->ri_max_frwr_depth) nsegs 334 net/sunrpc/xprtrdma/frwr_ops.c nsegs = ia->ri_max_frwr_depth; nsegs 335 net/sunrpc/xprtrdma/frwr_ops.c for (i = 0; i < nsegs;) { nsegs 349 net/sunrpc/xprtrdma/frwr_ops.c if ((i < nsegs && offset_in_page(seg->mr_offset)) || nsegs 348 net/sunrpc/xprtrdma/rpc_rdma.c int nsegs, bool writing, nsegs 361 net/sunrpc/xprtrdma/rpc_rdma.c return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); nsegs 394 net/sunrpc/xprtrdma/rpc_rdma.c int nsegs; nsegs 403 net/sunrpc/xprtrdma/rpc_rdma.c nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, nsegs 405 net/sunrpc/xprtrdma/rpc_rdma.c if (nsegs < 0) nsegs 406 net/sunrpc/xprtrdma/rpc_rdma.c return nsegs; nsegs 409 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr); nsegs 416 net/sunrpc/xprtrdma/rpc_rdma.c trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs); nsegs 418 net/sunrpc/xprtrdma/rpc_rdma.c nsegs -= mr->mr_nents; nsegs 419 net/sunrpc/xprtrdma/rpc_rdma.c } while (nsegs); nsegs 448 net/sunrpc/xprtrdma/rpc_rdma.c int nsegs, nchunks; nsegs 455 net/sunrpc/xprtrdma/rpc_rdma.c nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, nsegs 458 net/sunrpc/xprtrdma/rpc_rdma.c if (nsegs < 0) nsegs 459 net/sunrpc/xprtrdma/rpc_rdma.c return nsegs; nsegs 470 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); nsegs 477 net/sunrpc/xprtrdma/rpc_rdma.c trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs); nsegs 481 net/sunrpc/xprtrdma/rpc_rdma.c nsegs -= mr->mr_nents; nsegs 482 net/sunrpc/xprtrdma/rpc_rdma.c } while (nsegs); nsegs 511 net/sunrpc/xprtrdma/rpc_rdma.c int nsegs, nchunks; nsegs 518 net/sunrpc/xprtrdma/rpc_rdma.c nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); nsegs 519 net/sunrpc/xprtrdma/rpc_rdma.c if (nsegs < 0) nsegs 520 net/sunrpc/xprtrdma/rpc_rdma.c return nsegs; nsegs 531 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); nsegs 538 net/sunrpc/xprtrdma/rpc_rdma.c trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs); nsegs 542 net/sunrpc/xprtrdma/rpc_rdma.c nsegs -= mr->mr_nents; nsegs 543 net/sunrpc/xprtrdma/rpc_rdma.c } while (nsegs); nsegs 332 net/sunrpc/xprtrdma/svc_rdma_sendto.c unsigned int nsegs; nsegs 342 net/sunrpc/xprtrdma/svc_rdma_sendto.c nsegs = be32_to_cpup(p++); nsegs 343 net/sunrpc/xprtrdma/svc_rdma_sendto.c p += nsegs * rpcrdma_segment_maxsz; nsegs 348 net/sunrpc/xprtrdma/svc_rdma_sendto.c nsegs = be32_to_cpup(p++); nsegs 349 net/sunrpc/xprtrdma/svc_rdma_sendto.c p += nsegs * rpcrdma_segment_maxsz; nsegs 364 net/sunrpc/xprtrdma/svc_rdma_sendto.c unsigned int i, nsegs; nsegs 371 net/sunrpc/xprtrdma/svc_rdma_sendto.c nsegs = be32_to_cpup(src); nsegs 374 net/sunrpc/xprtrdma/svc_rdma_sendto.c for (i = nsegs; i; i--) { nsegs 396 net/sunrpc/xprtrdma/svc_rdma_sendto.c return nsegs; nsegs 410 net/sunrpc/xprtrdma/svc_rdma_sendto.c unsigned int nsegs; nsegs 418 net/sunrpc/xprtrdma/svc_rdma_sendto.c nsegs = xdr_encode_write_chunk(p, q, consumed); nsegs 419 net/sunrpc/xprtrdma/svc_rdma_sendto.c q += 2 + nsegs * rpcrdma_segment_maxsz; nsegs 420 net/sunrpc/xprtrdma/svc_rdma_sendto.c p += 2 + nsegs * rpcrdma_segment_maxsz; nsegs 556 net/sunrpc/xprtrdma/xprt_rdma.h int nsegs, bool writing, __be32 xid,