sgs                29 arch/powerpc/platforms/powernv/opal-sensor-groups.c } *sgs;
sgs               171 arch/powerpc/platforms/powernv/opal-sensor-groups.c 	sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL);
sgs               172 arch/powerpc/platforms/powernv/opal-sensor-groups.c 	if (!sgs)
sgs               193 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		sgs[i].sgattrs = kcalloc(nr_attrs, sizeof(*sgs[i].sgattrs),
sgs               195 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		if (!sgs[i].sgattrs)
sgs               198 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		sgs[i].sg.attrs = kcalloc(nr_attrs + 1,
sgs               199 arch/powerpc/platforms/powernv/opal-sensor-groups.c 					  sizeof(*sgs[i].sg.attrs),
sgs               202 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		if (!sgs[i].sg.attrs) {
sgs               203 arch/powerpc/platforms/powernv/opal-sensor-groups.c 			kfree(sgs[i].sgattrs);
sgs               213 arch/powerpc/platforms/powernv/opal-sensor-groups.c 			sprintf(sgs[i].name, "%pOFn%d", node, chipid);
sgs               215 arch/powerpc/platforms/powernv/opal-sensor-groups.c 			sprintf(sgs[i].name, "%pOFn", node);
sgs               217 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		sgs[i].sg.name = sgs[i].name;
sgs               218 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		if (add_attr_group(ops, len, &sgs[i], sgid)) {
sgs               220 arch/powerpc/platforms/powernv/opal-sensor-groups.c 				sgs[i].sg.name);
sgs               230 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		kfree(sgs[i].sgattrs);
sgs               231 arch/powerpc/platforms/powernv/opal-sensor-groups.c 		kfree(sgs[i].sg.attrs);
sgs               235 arch/powerpc/platforms/powernv/opal-sensor-groups.c 	kfree(sgs);
sgs               107 drivers/block/virtio_blk.c 	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
sgs               111 drivers/block/virtio_blk.c 	sgs[num_out++] = &hdr;
sgs               113 drivers/block/virtio_blk.c 	sgs[num_out++] = &cmd;
sgs               117 drivers/block/virtio_blk.c 			sgs[num_out++] = data_sg;
sgs               119 drivers/block/virtio_blk.c 			sgs[num_out + num_in++] = data_sg;
sgs               123 drivers/block/virtio_blk.c 	sgs[num_out + num_in++] = &sense;
sgs               125 drivers/block/virtio_blk.c 	sgs[num_out + num_in++] = &inhdr;
sgs               127 drivers/block/virtio_blk.c 	sgs[num_out + num_in++] = &status;
sgs               129 drivers/block/virtio_blk.c 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
sgs               174 drivers/block/virtio_blk.c 	struct scatterlist hdr, status, *sgs[3];
sgs               178 drivers/block/virtio_blk.c 	sgs[num_out++] = &hdr;
sgs               182 drivers/block/virtio_blk.c 			sgs[num_out++] = data_sg;
sgs               184 drivers/block/virtio_blk.c 			sgs[num_out + num_in++] = data_sg;
sgs               188 drivers/block/virtio_blk.c 	sgs[num_out + num_in++] = &status;
sgs               190 drivers/block/virtio_blk.c 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
sgs               119 drivers/crypto/virtio/virtio_crypto_algs.c 	struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
sgs               155 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out++] = &outhdr;
sgs               159 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out++] = &key_sg;
sgs               163 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out + num_in++] = &inhdr;
sgs               165 drivers/crypto/virtio/virtio_crypto_algs.c 	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
sgs               207 drivers/crypto/virtio/virtio_crypto_algs.c 	struct scatterlist outhdr, status_sg, *sgs[2];
sgs               232 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out++] = &outhdr;
sgs               237 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out + num_in++] = &status_sg;
sgs               239 drivers/crypto/virtio/virtio_crypto_algs.c 	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
sgs               355 drivers/crypto/virtio/virtio_crypto_algs.c 	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
sgs               375 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
sgs               377 drivers/crypto/virtio/virtio_crypto_algs.c 	if (!sgs)
sgs               383 drivers/crypto/virtio/virtio_crypto_algs.c 		kfree(sgs);
sgs               429 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out++] = &outhdr;
sgs               450 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out++] = &iv_sg;
sgs               455 drivers/crypto/virtio/virtio_crypto_algs.c 		sgs[num_out++] = sg;
sgs               459 drivers/crypto/virtio/virtio_crypto_algs.c 		sgs[num_out + num_in++] = sg;
sgs               463 drivers/crypto/virtio/virtio_crypto_algs.c 	sgs[num_out + num_in++] = &status_sg;
sgs               465 drivers/crypto/virtio/virtio_crypto_algs.c 	vc_req->sgs = sgs;
sgs               468 drivers/crypto/virtio/virtio_crypto_algs.c 	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
sgs               481 drivers/crypto/virtio/virtio_crypto_algs.c 	kfree(sgs);
sgs                94 drivers/crypto/virtio/virtio_crypto_common.h 	struct scatterlist **sgs;
sgs                21 drivers/crypto/virtio/virtio_crypto_core.c 		kfree(vc_req->sgs);
sgs               426 drivers/crypto/virtio/virtio_crypto_core.c 			kfree(vc_req->sgs);
sgs               261 drivers/gpu/drm/virtio/virtgpu_vq.c 	struct scatterlist *sgs[3], vcmd, vout, vresp;
sgs               269 drivers/gpu/drm/virtio/virtgpu_vq.c 	sgs[outcnt + incnt] = &vcmd;
sgs               274 drivers/gpu/drm/virtio/virtgpu_vq.c 		sgs[outcnt + incnt] = &vout;
sgs               280 drivers/gpu/drm/virtio/virtgpu_vq.c 		sgs[outcnt + incnt] = &vresp;
sgs               285 drivers/gpu/drm/virtio/virtgpu_vq.c 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
sgs               350 drivers/gpu/drm/virtio/virtgpu_vq.c 	struct scatterlist *sgs[1], ccmd;
sgs               358 drivers/gpu/drm/virtio/virtgpu_vq.c 	sgs[0] = &ccmd;
sgs               363 drivers/gpu/drm/virtio/virtgpu_vq.c 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
sgs              1635 drivers/net/virtio_net.c 	struct scatterlist *sgs[4], hdr, stat;
sgs              1646 drivers/net/virtio_net.c 	sgs[out_num++] = &hdr;
sgs              1649 drivers/net/virtio_net.c 		sgs[out_num++] = out;
sgs              1653 drivers/net/virtio_net.c 	sgs[out_num] = &stat;
sgs              1655 drivers/net/virtio_net.c 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
sgs              1656 drivers/net/virtio_net.c 	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
sgs                43 drivers/nvdimm/nd_virtio.c 	struct scatterlist *sgs[2], sg, ret;
sgs                58 drivers/nvdimm/nd_virtio.c 	sgs[0] = &sg;
sgs                60 drivers/nvdimm/nd_virtio.c 	sgs[1] = &ret;
sgs                69 drivers/nvdimm/nd_virtio.c 	while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
sgs              1693 drivers/nvme/host/core.c 	if (ctrl->nr_streams && ns->sws && ns->sgs)
sgs              1694 drivers/nvme/host/core.c 		size *= ns->sws * ns->sgs;
sgs              3467 drivers/nvme/host/core.c 	ns->sgs = le16_to_cpu(s.sgs);
sgs              3473 drivers/nvme/host/core.c 		if (ns->sgs)
sgs              3474 drivers/nvme/host/core.c 			blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
sgs               369 drivers/nvme/host/nvme.h 	u16 sgs;
sgs               410 drivers/scsi/virtio_scsi.c 	struct scatterlist *sgs[6], req, resp;
sgs               425 drivers/scsi/virtio_scsi.c 	sgs[out_num++] = &req;
sgs               431 drivers/scsi/virtio_scsi.c 			sgs[out_num++] = scsi_prot_sglist(sc);
sgs               432 drivers/scsi/virtio_scsi.c 		sgs[out_num++] = out->sgl;
sgs               437 drivers/scsi/virtio_scsi.c 	sgs[out_num + in_num++] = &resp;
sgs               443 drivers/scsi/virtio_scsi.c 			sgs[out_num + in_num++] = scsi_prot_sglist(sc);
sgs               444 drivers/scsi/virtio_scsi.c 		sgs[out_num + in_num++] = in->sgl;
sgs               447 drivers/scsi/virtio_scsi.c 	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
sgs               172 drivers/spi/spi-mxs.c 	const int sgs = DIV_ROUND_UP(len, desc_len);
sgs               185 drivers/spi/spi-mxs.c 	dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
sgs               201 drivers/spi/spi-mxs.c 	for (sg_count = 0; sg_count < sgs; sg_count++) {
sgs               209 drivers/spi/spi-mxs.c 		if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
sgs               820 drivers/spi/spi.c 	int sgs;
sgs               829 drivers/spi/spi.c 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
sgs               832 drivers/spi/spi.c 		sgs = DIV_ROUND_UP(len, desc_len);
sgs               837 drivers/spi/spi.c 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
sgs               842 drivers/spi/spi.c 	for (i = 0; i < sgs; i++) {
sgs               416 drivers/virtio/virtio_ring.c 				      struct scatterlist *sgs[],
sgs               482 drivers/virtio/virtio_ring.c 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
sgs               495 drivers/virtio/virtio_ring.c 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
sgs               980 drivers/virtio/virtio_ring.c 				       struct scatterlist *sgs[],
sgs              1008 drivers/virtio/virtio_ring.c 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
sgs              1092 drivers/virtio/virtio_ring.c 				       struct scatterlist *sgs[],
sgs              1122 drivers/virtio/virtio_ring.c 		return virtqueue_add_indirect_packed(vq, sgs, total_sg,
sgs              1147 drivers/virtio/virtio_ring.c 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
sgs              1694 drivers/virtio/virtio_ring.c 				struct scatterlist *sgs[],
sgs              1704 drivers/virtio/virtio_ring.c 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
sgs              1706 drivers/virtio/virtio_ring.c 				 virtqueue_add_split(_vq, sgs, total_sg,
sgs              1725 drivers/virtio/virtio_ring.c 		      struct scatterlist *sgs[],
sgs              1737 drivers/virtio/virtio_ring.c 		for (sg = sgs[i]; sg; sg = sg_next(sg))
sgs              1740 drivers/virtio/virtio_ring.c 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
sgs               323 fs/fuse/virtio_fs.c 	struct scatterlist *sgs[] = {&sg};
sgs               349 fs/fuse/virtio_fs.c 		ret = virtqueue_add_sgs(vq, sgs, 1, 0, forget, GFP_ATOMIC);
sgs               714 fs/fuse/virtio_fs.c 	struct scatterlist *sgs[] = {&sg};
sgs               756 fs/fuse/virtio_fs.c 	ret = virtqueue_add_sgs(vq, sgs, 1, 0, forget, GFP_ATOMIC);
sgs               881 fs/fuse/virtio_fs.c 	struct scatterlist **sgs = stack_sgs;
sgs               897 fs/fuse/virtio_fs.c 		sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
sgs               899 fs/fuse/virtio_fs.c 		if (!sgs || !sg) {
sgs               930 fs/fuse/virtio_fs.c 		sgs[i] = &sg[i];
sgs               941 fs/fuse/virtio_fs.c 	ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
sgs               970 fs/fuse/virtio_fs.c 	if (sgs != stack_sgs) {
sgs               971 fs/fuse/virtio_fs.c 		kfree(sgs);
sgs              1206 include/linux/nvme.h 	__le16	sgs;
sgs                55 include/linux/virtio.h 		      struct scatterlist *sgs[],
sgs              7954 kernel/sched/fair.c group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
sgs              7956 kernel/sched/fair.c 	if (sgs->sum_nr_running < sgs->group_weight)
sgs              7959 kernel/sched/fair.c 	if ((sgs->group_capacity * 100) >
sgs              7960 kernel/sched/fair.c 			(sgs->group_util * env->sd->imbalance_pct))
sgs              7975 kernel/sched/fair.c group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
sgs              7977 kernel/sched/fair.c 	if (sgs->sum_nr_running <= sgs->group_weight)
sgs              7980 kernel/sched/fair.c 	if ((sgs->group_capacity * 100) <
sgs              7981 kernel/sched/fair.c 			(sgs->group_util * env->sd->imbalance_pct))
sgs              8009 kernel/sched/fair.c 			  struct sg_lb_stats *sgs)
sgs              8011 kernel/sched/fair.c 	if (sgs->group_no_capacity)
sgs              8017 kernel/sched/fair.c 	if (sgs->group_misfit_task_load)
sgs              8054 kernel/sched/fair.c 				      struct sg_lb_stats *sgs,
sgs              8059 kernel/sched/fair.c 	memset(sgs, 0, sizeof(*sgs));
sgs              8067 kernel/sched/fair.c 		sgs->group_load += cpu_runnable_load(rq);
sgs              8068 kernel/sched/fair.c 		sgs->group_util += cpu_util(i);
sgs              8069 kernel/sched/fair.c 		sgs->sum_nr_running += rq->cfs.h_nr_running;
sgs              8079 kernel/sched/fair.c 		sgs->nr_numa_running += rq->nr_numa_running;
sgs              8080 kernel/sched/fair.c 		sgs->nr_preferred_running += rq->nr_preferred_running;
sgs              8086 kernel/sched/fair.c 			sgs->idle_cpus++;
sgs              8089 kernel/sched/fair.c 		    sgs->group_misfit_task_load < rq->misfit_task_load) {
sgs              8090 kernel/sched/fair.c 			sgs->group_misfit_task_load = rq->misfit_task_load;
sgs              8096 kernel/sched/fair.c 	sgs->group_capacity = group->sgc->capacity;
sgs              8097 kernel/sched/fair.c 	sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
sgs              8099 kernel/sched/fair.c 	if (sgs->sum_nr_running)
sgs              8100 kernel/sched/fair.c 		sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
sgs              8102 kernel/sched/fair.c 	sgs->group_weight = group->group_weight;
sgs              8104 kernel/sched/fair.c 	sgs->group_no_capacity = group_is_overloaded(env, sgs);
sgs              8105 kernel/sched/fair.c 	sgs->group_type = group_classify(group, sgs);
sgs              8124 kernel/sched/fair.c 				   struct sg_lb_stats *sgs)
sgs              8134 kernel/sched/fair.c 	if (sgs->group_type == group_misfit_task &&
sgs              8139 kernel/sched/fair.c 	if (sgs->group_type > busiest->group_type)
sgs              8142 kernel/sched/fair.c 	if (sgs->group_type < busiest->group_type)
sgs              8145 kernel/sched/fair.c 	if (sgs->avg_load <= busiest->avg_load)
sgs              8157 kernel/sched/fair.c 	if (sgs->sum_nr_running <= sgs->group_weight &&
sgs              8164 kernel/sched/fair.c 	if (sgs->group_type == group_misfit_task &&
sgs              8165 kernel/sched/fair.c 	    sgs->group_misfit_task_load < busiest->group_misfit_task_load)
sgs              8181 kernel/sched/fair.c 	if (sgs->sum_nr_running &&
sgs              8196 kernel/sched/fair.c static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
sgs              8198 kernel/sched/fair.c 	if (sgs->sum_nr_running > sgs->nr_numa_running)
sgs              8200 kernel/sched/fair.c 	if (sgs->sum_nr_running > sgs->nr_preferred_running)
sgs              8214 kernel/sched/fair.c static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
sgs              8245 kernel/sched/fair.c 		struct sg_lb_stats *sgs = &tmp_sgs;
sgs              8251 kernel/sched/fair.c 			sgs = local;
sgs              8258 kernel/sched/fair.c 		update_sg_lb_stats(env, sg, sgs, &sg_status);
sgs              8275 kernel/sched/fair.c 		    (sgs->sum_nr_running > local->sum_nr_running + 1)) {
sgs              8276 kernel/sched/fair.c 			sgs->group_no_capacity = 1;
sgs              8277 kernel/sched/fair.c 			sgs->group_type = group_classify(sg, sgs);
sgs              8280 kernel/sched/fair.c 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sgs              8282 kernel/sched/fair.c 			sds->busiest_stat = *sgs;
sgs              8287 kernel/sched/fair.c 		sds->total_running += sgs->sum_nr_running;
sgs              8288 kernel/sched/fair.c 		sds->total_load += sgs->group_load;
sgs              8289 kernel/sched/fair.c 		sds->total_capacity += sgs->group_capacity;
sgs               257 net/9p/trans_virtio.c 	struct scatterlist *sgs[2];
sgs               270 net/9p/trans_virtio.c 		sgs[out_sgs++] = chan->sg;
sgs               275 net/9p/trans_virtio.c 		sgs[out_sgs + in_sgs++] = chan->sg + out;
sgs               277 net/9p/trans_virtio.c 	err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
sgs               396 net/9p/trans_virtio.c 	struct scatterlist *sgs[4];
sgs               448 net/9p/trans_virtio.c 		sgs[out_sgs++] = chan->sg;
sgs               451 net/9p/trans_virtio.c 		sgs[out_sgs++] = chan->sg + out;
sgs               466 net/9p/trans_virtio.c 		sgs[out_sgs + in_sgs++] = chan->sg + out;
sgs               469 net/9p/trans_virtio.c 		sgs[out_sgs + in_sgs++] = chan->sg + out + in;
sgs               474 net/9p/trans_virtio.c 	BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
sgs               475 net/9p/trans_virtio.c 	err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
sgs               148 net/vmw_vsock/virtio_transport.c 		struct scatterlist hdr, buf, *sgs[2];
sgs               168 net/vmw_vsock/virtio_transport.c 		sgs[out_sg++] = &hdr;
sgs               171 net/vmw_vsock/virtio_transport.c 			sgs[out_sg++] = &buf;
sgs               174 net/vmw_vsock/virtio_transport.c 		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
sgs               293 net/vmw_vsock/virtio_transport.c 	struct scatterlist hdr, buf, *sgs[2];
sgs               314 net/vmw_vsock/virtio_transport.c 		sgs[0] = &hdr;
sgs               317 net/vmw_vsock/virtio_transport.c 		sgs[1] = &buf;
sgs               318 net/vmw_vsock/virtio_transport.c 		ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
sgs                30 tools/virtio/linux/virtio.h 		      struct scatterlist *sgs[],
sgs               442 tools/virtio/vringh_test.c 	struct scatterlist guest_sg[RINGSIZE], *sgs[2];
sgs               505 tools/virtio/vringh_test.c 	sgs[0] = &guest_sg[0];
sgs               506 tools/virtio/vringh_test.c 	sgs[1] = &guest_sg[1];
sgs               510 tools/virtio/vringh_test.c 	err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);