Searched refs:sgs (Results 1 - 20 of 20) sorted by relevance

/linux-4.4.14/net/9p/
H A Dtrans_virtio.c267 struct scatterlist *sgs[2]; p9_virtio_request() local
280 sgs[out_sgs++] = chan->sg; p9_virtio_request()
285 sgs[out_sgs + in_sgs++] = chan->sg + out; p9_virtio_request()
287 err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, p9_virtio_request()
405 struct scatterlist *sgs[4]; p9_virtio_zc_request() local
445 sgs[out_sgs++] = chan->sg; p9_virtio_zc_request()
448 sgs[out_sgs++] = chan->sg + out; p9_virtio_zc_request()
463 sgs[out_sgs + in_sgs++] = chan->sg + out; p9_virtio_zc_request()
466 sgs[out_sgs + in_sgs++] = chan->sg + out + in; p9_virtio_zc_request()
471 BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs)); p9_virtio_zc_request()
472 err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, p9_virtio_zc_request()
/linux-4.4.14/tools/virtio/linux/
H A Dvirtio.h25 struct scatterlist *sgs[],
/linux-4.4.14/drivers/block/
H A Dvirtio_blk.c79 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; __virtblk_add_req() local
84 sgs[num_out++] = &hdr; __virtblk_add_req()
94 sgs[num_out++] = &cmd; __virtblk_add_req()
99 sgs[num_out++] = data_sg; __virtblk_add_req()
101 sgs[num_out + num_in++] = data_sg; __virtblk_add_req()
106 sgs[num_out + num_in++] = &sense; __virtblk_add_req()
108 sgs[num_out + num_in++] = &inhdr; __virtblk_add_req()
112 sgs[num_out + num_in++] = &status; __virtblk_add_req()
114 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); __virtblk_add_req()
H A Dcciss_scsi.c335 printk("sgs..........Errorinfo:\n");
907 c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ cciss_map_one()
/linux-4.4.14/drivers/gpu/drm/virtio/
H A Dvirtgpu_vq.c300 struct scatterlist *sgs[3], vcmd, vout, vresp; virtio_gpu_queue_ctrl_buffer_locked() local
308 sgs[outcnt+incnt] = &vcmd; virtio_gpu_queue_ctrl_buffer_locked()
313 sgs[outcnt + incnt] = &vout; virtio_gpu_queue_ctrl_buffer_locked()
319 sgs[outcnt + incnt] = &vresp; virtio_gpu_queue_ctrl_buffer_locked()
324 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); virtio_gpu_queue_ctrl_buffer_locked()
386 struct scatterlist *sgs[1], ccmd; virtio_gpu_queue_cursor() local
394 sgs[0] = &ccmd; virtio_gpu_queue_cursor()
399 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); virtio_gpu_queue_cursor()
/linux-4.4.14/drivers/spi/
H A Dspi-mxs.c180 const int sgs = DIV_ROUND_UP(len, desc_len); mxs_spi_txrx_dma() local
193 dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL); mxs_spi_txrx_dma()
209 for (sg_count = 0; sg_count < sgs; sg_count++) { mxs_spi_txrx_dma()
217 if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS)) mxs_spi_txrx_dma()
H A Dspi.c689 int sgs; spi_map_buf() local
697 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); spi_map_buf()
700 sgs = DIV_ROUND_UP(len, desc_len); spi_map_buf()
703 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); spi_map_buf()
707 for (i = 0; i < sgs; i++) { spi_map_buf()
/linux-4.4.14/kernel/sched/
H A Dfair.c6244 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) group_has_capacity() argument
6246 if (sgs->sum_nr_running < sgs->group_weight) group_has_capacity()
6249 if ((sgs->group_capacity * 100) > group_has_capacity()
6250 (sgs->group_util * env->sd->imbalance_pct)) group_has_capacity()
6265 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) group_is_overloaded() argument
6267 if (sgs->sum_nr_running <= sgs->group_weight) group_is_overloaded()
6270 if ((sgs->group_capacity * 100) < group_is_overloaded()
6271 (sgs->group_util * env->sd->imbalance_pct)) group_is_overloaded()
6279 struct sg_lb_stats *sgs) group_classify()
6281 if (sgs->group_no_capacity) group_classify()
6296 * @sgs: variable to hold the statistics for this group.
6301 int local_group, struct sg_lb_stats *sgs, update_sg_lb_stats()
6307 memset(sgs, 0, sizeof(*sgs)); update_sg_lb_stats()
6318 sgs->group_load += load; for_each_cpu_and()
6319 sgs->group_util += cpu_util(i); for_each_cpu_and()
6320 sgs->sum_nr_running += rq->cfs.h_nr_running; for_each_cpu_and()
6326 sgs->nr_numa_running += rq->nr_numa_running; for_each_cpu_and()
6327 sgs->nr_preferred_running += rq->nr_preferred_running; for_each_cpu_and()
6329 sgs->sum_weighted_load += weighted_cpuload(i); for_each_cpu_and()
6331 sgs->idle_cpus++; for_each_cpu_and()
6335 sgs->group_capacity = group->sgc->capacity;
6336 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6338 if (sgs->sum_nr_running)
6339 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
6341 sgs->group_weight = group->group_weight;
6343 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6344 sgs->group_type = group_classify(group, sgs);
6352 * @sgs: sched_group statistics
6363 struct sg_lb_stats *sgs) update_sd_pick_busiest()
6367 if (sgs->group_type > busiest->group_type) update_sd_pick_busiest()
6370 if (sgs->group_type < busiest->group_type) update_sd_pick_busiest()
6373 if (sgs->avg_load <= busiest->avg_load) update_sd_pick_busiest()
6385 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) { update_sd_pick_busiest()
6397 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) fbq_classify_group() argument
6399 if (sgs->sum_nr_running > sgs->nr_numa_running) fbq_classify_group()
6401 if (sgs->sum_nr_running > sgs->nr_preferred_running) fbq_classify_group()
6415 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) fbq_classify_group() argument
6445 struct sg_lb_stats *sgs = &tmp_sgs; update_sd_lb_stats() local
6451 sgs = &sds->local_stat; update_sd_lb_stats()
6458 update_sg_lb_stats(env, sg, load_idx, local_group, sgs, update_sd_lb_stats()
6476 (sgs->sum_nr_running > 1)) { update_sd_lb_stats()
6477 sgs->group_no_capacity = 1; update_sd_lb_stats()
6478 sgs->group_type = group_classify(sg, sgs); update_sd_lb_stats()
6481 if (update_sd_pick_busiest(env, sds, sg, sgs)) { update_sd_lb_stats()
6483 sds->busiest_stat = *sgs; update_sd_lb_stats()
6488 sds->total_load += sgs->group_load; update_sd_lb_stats()
6489 sds->total_capacity += sgs->group_capacity; update_sd_lb_stats()
6278 group_classify(struct sched_group *group, struct sg_lb_stats *sgs) group_classify() argument
6299 update_sg_lb_stats(struct lb_env *env, struct sched_group *group, int load_idx, int local_group, struct sg_lb_stats *sgs, bool *overload) update_sg_lb_stats() argument
6360 update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg, struct sg_lb_stats *sgs) update_sd_pick_busiest() argument
/linux-4.4.14/drivers/scsi/
H A Dvirtio_scsi.c430 struct scatterlist *sgs[6], req, resp; virtscsi_add_cmd() local
445 sgs[out_num++] = &req; virtscsi_add_cmd()
451 sgs[out_num++] = scsi_prot_sglist(sc); virtscsi_add_cmd()
452 sgs[out_num++] = out->sgl; virtscsi_add_cmd()
457 sgs[out_num + in_num++] = &resp; virtscsi_add_cmd()
463 sgs[out_num + in_num++] = scsi_prot_sglist(sc); virtscsi_add_cmd()
464 sgs[out_num + in_num++] = in->sgl; virtscsi_add_cmd()
467 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); virtscsi_add_cmd()
H A Dhpsa.c2623 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ hpsa_map_one()
4245 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
/linux-4.4.14/drivers/virtio/
H A Dvirtio_ring.c130 struct scatterlist *sgs[], virtqueue_add()
213 for (sg = sgs[n]; sg; sg = sg_next(sg)) { virtqueue_add()
222 for (sg = sgs[n]; sg; sg = sg_next(sg)) { virtqueue_add()
268 * @sgs: array of terminated scatterlists.
280 struct scatterlist *sgs[], virtqueue_add_sgs()
291 for (sg = sgs[i]; sg; sg = sg_next(sg)) virtqueue_add_sgs()
294 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); virtqueue_add_sgs()
129 virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) virtqueue_add() argument
279 virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) virtqueue_add_sgs() argument
/linux-4.4.14/tools/virtio/
H A Dvringh_test.c440 struct scatterlist guest_sg[RINGSIZE], *sgs[2]; main() local
503 sgs[0] = &guest_sg[0]; main()
504 sgs[1] = &guest_sg[1]; main()
508 err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL); main()
/linux-4.4.14/drivers/crypto/nx/
H A Dnx-sha256.c134 * by sg list limits and number of sgs we already used nx_sha256_update()
H A Dnx-sha512.c133 * by sg list limits and number of sgs we already used nx_sha512_update()
/linux-4.4.14/include/linux/
H A Dvirtio.h48 struct scatterlist *sgs[],
/linux-4.4.14/drivers/net/
H A Dvirtio_net.c984 struct scatterlist *sgs[4], hdr, stat; virtnet_send_command() local
995 sgs[out_num++] = &hdr; virtnet_send_command()
998 sgs[out_num++] = out; virtnet_send_command()
1002 sgs[out_num] = &stat; virtnet_send_command()
1004 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); virtnet_send_command()
1005 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); virtnet_send_command()
/linux-4.4.14/net/ceph/
H A Dcrypto.c98 * number of sgs by squeezing physically contiguous pages together is
/linux-4.4.14/net/rds/
H A Dsend.c1029 /* size of rm including all sgs */ rds_sendmsg()
/linux-4.4.14/drivers/scsi/aic7xxx/
H A Daic79xx.h662 int sgs_left; /* unallocated sgs in head map_node */
/linux-4.4.14/arch/powerpc/mm/
H A Dtlb_low_64e.S359 rlwinm r10,r10,0,0x80000fff /* tgs,tlpid -> sgs,slpid */

Completed in 819 milliseconds