nr_segs            65 arch/powerpc/mm/dma-noncoherent.c 	int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
nr_segs            84 arch/powerpc/mm/dma-noncoherent.c 	} while (seg_nr < nr_segs);
nr_segs          2214 block/bfq-iosched.c 		unsigned int nr_segs)
nr_segs          2237 block/bfq-iosched.c 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
nr_segs          1148 block/bio.c    	if (data->nr_segs > UIO_MAXIOV)
nr_segs          1151 block/bio.c    	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
nr_segs          1154 block/bio.c    	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
nr_segs           600 block/blk-core.c 		unsigned int nr_segs)
nr_segs           604 block/blk-core.c 	if (!ll_back_merge_fn(req, bio, nr_segs))
nr_segs           622 block/blk-core.c 		unsigned int nr_segs)
nr_segs           626 block/blk-core.c 	if (!ll_front_merge_fn(req, bio, nr_segs))
nr_segs           693 block/blk-core.c 		unsigned int nr_segs, struct request **same_queue_rq)
nr_segs           722 block/blk-core.c 			merged = bio_attempt_back_merge(rq, bio, nr_segs);
nr_segs           725 block/blk-core.c 			merged = bio_attempt_front_merge(rq, bio, nr_segs);
nr_segs            23 block/blk-map.c 	unsigned int nr_segs = 0;
nr_segs            28 block/blk-map.c 		nr_segs++;
nr_segs            31 block/blk-map.c 		blk_rq_bio_prep(rq, *bio, nr_segs);
nr_segs            33 block/blk-map.c 		if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
nr_segs           294 block/blk-merge.c 		unsigned int *nr_segs)
nr_segs           301 block/blk-merge.c 		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
nr_segs           305 block/blk-merge.c 				nr_segs);
nr_segs           309 block/blk-merge.c 				nr_segs);
nr_segs           312 block/blk-merge.c 		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
nr_segs           350 block/blk-merge.c 	unsigned int nr_segs;
nr_segs           352 block/blk-merge.c 	__blk_queue_split(q, bio, &nr_segs);
nr_segs           572 block/blk-merge.c int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
nr_segs           585 block/blk-merge.c 	return ll_new_hw_segment(req, bio, nr_segs);
nr_segs           588 block/blk-merge.c int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
nr_segs           601 block/blk-merge.c 	return ll_new_hw_segment(req, bio, nr_segs);
nr_segs           227 block/blk-mq-sched.c 		unsigned int nr_segs, struct request **merged_request)
nr_segs           235 block/blk-mq-sched.c 		if (!bio_attempt_back_merge(rq, bio, nr_segs))
nr_segs           244 block/blk-mq-sched.c 		if (!bio_attempt_front_merge(rq, bio, nr_segs))
nr_segs           263 block/blk-mq-sched.c 			   struct bio *bio, unsigned int nr_segs)
nr_segs           281 block/blk-mq-sched.c 						nr_segs);
nr_segs           286 block/blk-mq-sched.c 						nr_segs);
nr_segs           310 block/blk-mq-sched.c 				 unsigned int nr_segs)
nr_segs           316 block/blk-mq-sched.c 	if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
nr_segs           325 block/blk-mq-sched.c 		unsigned int nr_segs)
nr_segs           334 block/blk-mq-sched.c 		return e->type->ops.bio_merge(hctx, bio, nr_segs);
nr_segs           341 block/blk-mq-sched.c 		ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
nr_segs            15 block/blk-mq-sched.h 		unsigned int nr_segs, struct request **merged_request);
nr_segs            17 block/blk-mq-sched.h 		unsigned int nr_segs);
nr_segs            36 block/blk-mq-sched.h 		unsigned int nr_segs)
nr_segs            41 block/blk-mq-sched.h 	return __blk_mq_sched_bio_merge(q, bio, nr_segs);
nr_segs          1784 block/blk-mq.c 		unsigned int nr_segs)
nr_segs          1791 block/blk-mq.c 	blk_rq_bio_prep(rq, bio, nr_segs);
nr_segs          1961 block/blk-mq.c 	unsigned int nr_segs;
nr_segs          1965 block/blk-mq.c 	__blk_queue_split(q, &bio, &nr_segs);
nr_segs          1971 block/blk-mq.c 	    blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
nr_segs          1974 block/blk-mq.c 	if (blk_mq_sched_bio_merge(q, bio, nr_segs))
nr_segs          1994 block/blk-mq.c 	blk_mq_bio_to_request(rq, bio, nr_segs);
nr_segs           111 block/blk.h    		unsigned int nr_segs)
nr_segs           113 block/blk.h    	rq->nr_phys_segments = nr_segs;
nr_segs           180 block/blk.h    		unsigned int nr_segs);
nr_segs           182 block/blk.h    		unsigned int nr_segs);
nr_segs           186 block/blk.h    		unsigned int nr_segs, struct request **same_queue_rq);
nr_segs           230 block/blk.h    		unsigned int *nr_segs);
nr_segs           232 block/blk.h    		unsigned int nr_segs);
nr_segs           234 block/blk.h    		unsigned int nr_segs);
nr_segs           566 block/kyber-iosched.c 		unsigned int nr_segs)
nr_segs           576 block/kyber-iosched.c 	merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
nr_segs           463 block/mq-deadline.c 		unsigned int nr_segs)
nr_segs           471 block/mq-deadline.c 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
nr_segs           759 drivers/block/xen-blkback/blkback.c 	invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
nr_segs           949 drivers/block/xen-blkback/blkback.c 			   pending_req->nr_segs,
nr_segs           965 drivers/block/xen-blkback/blkback.c 	nseg = pending_req->nr_segs;
nr_segs          1295 drivers/block/xen-blkback/blkback.c 	pending_req->nr_segs   = nseg;
nr_segs          1418 drivers/block/xen-blkback/blkback.c 	                pending_req->nr_segs);
nr_segs           345 drivers/block/xen-blkback/common.h 	int			nr_segs;
nr_segs            73 drivers/hwtracing/intel_th/msu.c 	unsigned int		nr_segs;
nr_segs           326 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
nr_segs           421 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
nr_segs           660 drivers/hwtracing/intel_th/msu.c 		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
nr_segs           990 drivers/hwtracing/intel_th/msu.c 				  unsigned int nr_segs)
nr_segs           996 drivers/hwtracing/intel_th/msu.c 	ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
nr_segs          1000 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
nr_segs          1010 drivers/hwtracing/intel_th/msu.c 	return nr_segs;
nr_segs          1023 drivers/hwtracing/intel_th/msu.c static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
nr_segs          1028 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
nr_segs          1040 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
nr_segs          1048 drivers/hwtracing/intel_th/msu.c msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
nr_segs          1098 drivers/hwtracing/intel_th/msu.c 	win->nr_segs = ret;
nr_segs          1123 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
nr_segs          1190 drivers/hwtracing/intel_th/msu.c 		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
nr_segs          1201 drivers/hwtracing/intel_th/msu.c 			if (blk == win->nr_segs - 1) {
nr_segs          1385 drivers/hwtracing/intel_th/msu.c 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
nr_segs           308 drivers/infiniband/hw/hfi1/file_ops.c 	unsigned long dim = from->nr_segs;
nr_segs          2248 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!iter_is_iovec(from) || !from->nr_segs || !pq)
nr_segs          2251 drivers/infiniband/hw/qib/qib_file_ops.c 	return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
nr_segs          1471 drivers/net/tun.c 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
nr_segs          1489 drivers/net/tun.c 	for (i = 1; i < it->nr_segs; i++) {
nr_segs            91 drivers/nvme/target/io-cmd-file.c 		unsigned long nr_segs, size_t count, int ki_flags)
nr_segs           108 drivers/nvme/target/io-cmd-file.c 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
nr_segs          1058 drivers/scsi/xen-scsifront.c 	unsigned int sg_grant, nr_segs;
nr_segs          1062 drivers/scsi/xen-scsifront.c 	nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
nr_segs          1063 drivers/scsi/xen-scsifront.c 	nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
nr_segs          1064 drivers/scsi/xen-scsifront.c 	nr_segs = min_t(unsigned int, nr_segs,
nr_segs          1069 drivers/scsi/xen-scsifront.c 		dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs);
nr_segs          1070 drivers/scsi/xen-scsifront.c 	else if (info->pause && nr_segs < host->sg_tablesize)
nr_segs          1073 drivers/scsi/xen-scsifront.c 			 host->sg_tablesize, nr_segs);
nr_segs          1075 drivers/scsi/xen-scsifront.c 	host->sg_tablesize = nr_segs;
nr_segs          1076 drivers/scsi/xen-scsifront.c 	host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
nr_segs           341 fs/afs/rxrpc.c 		nr = msg->msg_iter.nr_segs;
nr_segs          8723 fs/btrfs/inode.c 	for (seg = 0; seg < iter->nr_segs; seg++) {
nr_segs          8724 fs/btrfs/inode.c 		for (i = seg + 1; i < iter->nr_segs; i++) {
nr_segs           747 fs/cifs/smb1ops.c 		struct kvec *iov, unsigned long nr_segs)
nr_segs           751 fs/cifs/smb1ops.c 	return CIFSSMBWrite2(xid, parms, written, iov, nr_segs);
nr_segs          1782 fs/cifs/smb2ops.c 		struct kvec *iov, unsigned long nr_segs)
nr_segs          1787 fs/cifs/smb2ops.c 	return SMB2_write(xid, parms, written, iov, nr_segs);
nr_segs           644 fs/fuse/dev.c  	unsigned long nr_segs;
nr_segs           700 fs/fuse/dev.c  			BUG_ON(!cs->nr_segs);
nr_segs           706 fs/fuse/dev.c  			cs->nr_segs--;
nr_segs           708 fs/fuse/dev.c  			if (cs->nr_segs == cs->pipe->buffers)
nr_segs           724 fs/fuse/dev.c  			cs->nr_segs++;
nr_segs           799 fs/fuse/dev.c  	BUG_ON(!cs->nr_segs);
nr_segs           803 fs/fuse/dev.c  	cs->nr_segs--;
nr_segs           884 fs/fuse/dev.c  	if (cs->nr_segs == cs->pipe->buffers)
nr_segs           900 fs/fuse/dev.c  	cs->nr_segs++;
nr_segs          1358 fs/fuse/dev.c  	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
nr_segs          1363 fs/fuse/dev.c  	for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
nr_segs          1377 fs/fuse/dev.c  	for (; page_nr < cs.nr_segs; page_nr++)
nr_segs          2002 fs/fuse/dev.c  	cs.nr_segs = nbuf;
nr_segs          1230 fs/io_uring.c  			iter->nr_segs -= seg_skip;
nr_segs          2297 fs/ocfs2/file.c 		(unsigned int)from->nr_segs);	/* GRRRRR */
nr_segs          2445 fs/ocfs2/file.c 			to->nr_segs);	/* GRRRRR */
nr_segs           765 fs/read_write.c 			      unsigned long nr_segs, unsigned long fast_segs,
nr_segs           778 fs/read_write.c 	if (nr_segs == 0) {
nr_segs           787 fs/read_write.c 	if (nr_segs > UIO_MAXIOV) {
nr_segs           791 fs/read_write.c 	if (nr_segs > fast_segs) {
nr_segs           792 fs/read_write.c 		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
nr_segs           798 fs/read_write.c 	if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
nr_segs           813 fs/read_write.c 	for (seg = 0; seg < nr_segs; seg++) {
nr_segs           841 fs/read_write.c 		const struct compat_iovec __user *uvector, unsigned long nr_segs,
nr_segs           855 fs/read_write.c 	if (nr_segs == 0)
nr_segs           859 fs/read_write.c 	if (nr_segs > UIO_MAXIOV)
nr_segs           861 fs/read_write.c 	if (nr_segs > fast_segs) {
nr_segs           863 fs/read_write.c 		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
nr_segs           870 fs/read_write.c 	if (!access_ok(uvector, nr_segs*sizeof(*uvector)))
nr_segs           883 fs/read_write.c 	for (seg = 0; seg < nr_segs; seg++) {
nr_segs          1362 fs/splice.c    		unsigned long, nr_segs, unsigned int, flags)
nr_segs          1376 fs/splice.c    	error = import_iovec(type, uiov, nr_segs,
nr_segs          1388 fs/splice.c    		    unsigned int, nr_segs, unsigned int, flags)
nr_segs          1402 fs/splice.c    	error = compat_import_iovec(type, iov32, nr_segs,
nr_segs           485 include/linux/compat.h 		unsigned long nr_segs,
nr_segs           640 include/linux/compat.h 				    unsigned int nr_segs, unsigned int flags);
nr_segs          1912 include/linux/fs.h 			      unsigned long nr_segs, unsigned long fast_segs,
nr_segs           504 include/linux/syscalls.h 			     unsigned long nr_segs, unsigned int flags);
nr_segs            46 include/linux/uio.h 		unsigned long nr_segs;
nr_segs            96 include/linux/uio.h static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
nr_segs           101 include/linux/uio.h 	for (seg = 0; seg < nr_segs; seg++)
nr_segs           217 include/linux/uio.h 			unsigned long nr_segs, size_t count);
nr_segs           219 include/linux/uio.h 			unsigned long nr_segs, size_t count);
nr_segs           221 include/linux/uio.h 			unsigned long nr_segs, size_t count);
nr_segs           271 include/linux/uio.h 		 unsigned nr_segs, unsigned fast_segs,
nr_segs           277 include/linux/uio.h 		 unsigned nr_segs, unsigned fast_segs,
nr_segs           785 include/trace/events/afs.h 		    __entry->nr = msg->msg_iter.nr_segs;
nr_segs           108 lib/iov_iter.c 			i->nr_segs -= i->bvec - bvec;		\
nr_segs           118 lib/iov_iter.c 			i->nr_segs -= kvec - i->kvec;		\
nr_segs           130 lib/iov_iter.c 			i->nr_segs -= iov - i->iov;		\
nr_segs           234 lib/iov_iter.c 	i->nr_segs -= iov - i->iov;
nr_segs           318 lib/iov_iter.c 	i->nr_segs -= iov - i->iov;
nr_segs           437 lib/iov_iter.c 			const struct iovec *iov, unsigned long nr_segs,
nr_segs           451 lib/iov_iter.c 	i->nr_segs = nr_segs;
nr_segs          1089 lib/iov_iter.c 			i->nr_segs++;
nr_segs          1101 lib/iov_iter.c 			i->nr_segs++;
nr_segs          1120 lib/iov_iter.c 	if (i->nr_segs == 1)
nr_segs          1132 lib/iov_iter.c 			const struct kvec *kvec, unsigned long nr_segs,
nr_segs          1138 lib/iov_iter.c 	i->nr_segs = nr_segs;
nr_segs          1145 lib/iov_iter.c 			const struct bio_vec *bvec, unsigned long nr_segs,
nr_segs          1151 lib/iov_iter.c 	i->nr_segs = nr_segs;
nr_segs          1607 lib/iov_iter.c 				    new->nr_segs * sizeof(struct bio_vec),
nr_segs          1612 lib/iov_iter.c 				   new->nr_segs * sizeof(struct iovec),
nr_segs          1640 lib/iov_iter.c 		 unsigned nr_segs, unsigned fast_segs,
nr_segs          1645 lib/iov_iter.c 	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
nr_segs          1653 lib/iov_iter.c 	iov_iter_init(i, type, p, nr_segs, n);
nr_segs          1664 lib/iov_iter.c 		unsigned nr_segs, unsigned fast_segs,
nr_segs          1669 lib/iov_iter.c 	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
nr_segs          1677 lib/iov_iter.c 	iov_iter_init(i, type, p, nr_segs, n);
nr_segs           151 mm/swap.c      int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
nr_segs           156 mm/swap.c      	for (seg = 0; seg < nr_segs; seg++) {
nr_segs           339 net/ipv4/tcp_input.c 	u32 nr_segs;
nr_segs           351 net/ipv4/tcp_input.c 	nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
nr_segs           352 net/ipv4/tcp_input.c 	nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
nr_segs           359 net/ipv4/tcp_input.c 	sndmem *= nr_segs * per_mss;
nr_segs          3129 sound/core/pcm_native.c 	if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
nr_segs          3134 sound/core/pcm_native.c 	bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
nr_segs          3137 sound/core/pcm_native.c 	for (i = 0; i < to->nr_segs; ++i)
nr_segs          3165 sound/core/pcm_native.c 	if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
nr_segs          3169 sound/core/pcm_native.c 	bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
nr_segs          3172 sound/core/pcm_native.c 	for (i = 0; i < from->nr_segs; ++i)