Searched refs:sg_list (Results 1 - 125 of 125) sorted by relevance

/linux-4.1.27/drivers/crypto/caam/
H A Dsg_sw_sec4.h59 static inline int __sg_count(struct scatterlist *sg_list, int nbytes, __sg_count() argument
62 struct scatterlist *sg = sg_list; __sg_count()
77 static inline int sg_count(struct scatterlist *sg_list, int nbytes, sg_count() argument
80 int sg_nents = __sg_count(sg_list, nbytes, chained); sg_count()
/linux-4.1.27/arch/blackfin/kernel/
H A Ddma-mapping.c116 dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents, dma_map_sg() argument
122 for_each_sg(sg_list, sg, nents, i) { for_each_sg()
131 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list, dma_sync_sg_for_device() argument
137 for_each_sg(sg_list, sg, nelems, i) { for_each_sg()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_qp.c73 if ((plen + wr->sg_list[i].length) < plen) build_rdma_send()
76 plen += wr->sg_list[i].length; build_rdma_send()
77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); build_rdma_send()
78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); build_rdma_send()
79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); build_rdma_send()
110 if ((plen + wr->sg_list[i].length) < plen) { build_rdma_write()
113 plen += wr->sg_list[i].length; build_rdma_write()
115 cpu_to_be32(wr->sg_list[i].lkey); build_rdma_write()
117 cpu_to_be32(wr->sg_list[i].length); build_rdma_write()
119 cpu_to_be64(wr->sg_list[i].addr); build_rdma_write()
142 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); build_rdma_read()
143 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); build_rdma_read()
144 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); build_rdma_read()
200 static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, iwch_sgl2pbl_map() argument
208 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); iwch_sgl2pbl_map()
222 if (sg_list[i].addr < mhp->attr.va_fbo) { iwch_sgl2pbl_map()
226 if (sg_list[i].addr + ((u64) sg_list[i].length) < iwch_sgl2pbl_map()
227 sg_list[i].addr) { iwch_sgl2pbl_map()
231 if (sg_list[i].addr + ((u64) sg_list[i].length) > iwch_sgl2pbl_map()
236 offset = sg_list[i].addr - mhp->attr.va_fbo; iwch_sgl2pbl_map()
254 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr, build_rdma_recv()
264 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); build_rdma_recv()
265 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); build_rdma_recv()
268 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) & build_rdma_recv()
321 if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN) build_zero_stag_recv()
328 if (wr->sg_list[i].lkey != 0) build_zero_stag_recv()
331 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); build_zero_stag_recv()
332 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); build_zero_stag_recv()
497 if (wr->sg_list[0].lkey) iwch_post_receive()
/linux-4.1.27/drivers/scsi/qla2xxx/
H A Dqla_bsg.c41 bsg_job->request_payload.sg_list, qla2x00_bsg_sp_free()
46 bsg_job->reply_payload.sg_list, qla2x00_bsg_sp_free()
49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla2x00_bsg_sp_free()
52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, qla2x00_bsg_sp_free()
183 bsg_job->reply_payload.sg_list, qla24xx_proc_fcp_prio_cfg_cmd()
210 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla24xx_proc_fcp_prio_cfg_cmd()
336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla2x00_process_els()
343 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, qla2x00_process_els()
395 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla2x00_process_els()
397 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, qla2x00_process_els()
436 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla2x00_process_ct()
445 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, qla2x00_process_ct()
544 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla2x00_process_ct()
546 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, qla2x00_process_ct()
725 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, qla2x00_process_loopback()
735 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, qla2x00_process_loopback()
776 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_process_loopback()
905 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla2x00_process_loopback()
926 bsg_job->reply_payload.sg_list, qla2x00_process_loopback()
930 bsg_job->request_payload.sg_list, qla2x00_process_loopback()
992 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla84xx_updatefw()
1019 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla84xx_updatefw()
1071 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla84xx_updatefw()
1115 bsg_job->reply_payload.sg_list, qla84xx_mgmt_cmd()
1165 bsg_job->request_payload.sg_list, qla84xx_mgmt_cmd()
1196 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla84xx_mgmt_cmd()
1249 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla84xx_mgmt_cmd()
1260 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, qla84xx_mgmt_cmd()
1263 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, qla84xx_mgmt_cmd()
1450 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla2x00_read_optrom()
1482 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_update_optrom()
1517 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_update_fru_versions()
1565 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_read_fru_status()
1579 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla2x00_read_fru_status()
1613 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_write_fru_status()
1657 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_write_i2c()
1700 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla2x00_read_i2c()
1713 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla2x00_read_i2c()
1821 bsg_job->request_payload.sg_list, qla24xx_process_bidir_cmd()
1831 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, qla24xx_process_bidir_cmd()
1890 bsg_job->reply_payload.sg_list, qla24xx_process_bidir_cmd()
1894 bsg_job->request_payload.sg_list, qla24xx_process_bidir_cmd()
1940 bsg_job->request_payload.sg_list, qlafx00_mgmt_cmd()
1952 bsg_job->reply_payload.sg_list, qlafx00_mgmt_cmd()
2020 bsg_job->reply_payload.sg_list, qlafx00_mgmt_cmd()
2025 bsg_job->request_payload.sg_list, qlafx00_mgmt_cmd()
2042 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla26xx_serdes_op()
2052 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla26xx_serdes_op()
2082 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla8044_serdes_op()
2092 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla8044_serdes_op()
H A Dqla_iocb.c2066 (bsg_job->request_payload.sg_list))); qla24xx_els_iocb()
2068 (bsg_job->request_payload.sg_list))); qla24xx_els_iocb()
2070 (bsg_job->request_payload.sg_list)); qla24xx_els_iocb()
2073 (bsg_job->reply_payload.sg_list))); qla24xx_els_iocb()
2075 (bsg_job->reply_payload.sg_list))); qla24xx_els_iocb()
2077 (bsg_job->reply_payload.sg_list)); qla24xx_els_iocb()
2115 (bsg_job->request_payload.sg_list))); qla2x00_ct_iocb()
2117 (bsg_job->request_payload.sg_list))); qla2x00_ct_iocb()
2121 (bsg_job->reply_payload.sg_list))); qla2x00_ct_iocb()
2123 (bsg_job->reply_payload.sg_list))); qla2x00_ct_iocb()
2131 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { qla2x00_ct_iocb()
2195 (bsg_job->request_payload.sg_list))); qla24xx_ct_iocb()
2197 (bsg_job->request_payload.sg_list))); qla24xx_ct_iocb()
2199 (bsg_job->request_payload.sg_list)); qla24xx_ct_iocb()
2206 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { qla24xx_ct_iocb()
2716 for_each_sg(bsg_job->request_payload.sg_list, sg, qla25xx_build_bidir_iocb()
2742 for_each_sg(bsg_job->reply_payload.sg_list, sg, qla25xx_build_bidir_iocb()
H A Dqla_mr.c3335 for_each_sg(bsg_job->request_payload.sg_list, sg, qlafx00_fxdisc_iocb()
3398 for_each_sg(bsg_job->reply_payload.sg_list, sg, qlafx00_fxdisc_iocb()
/linux-4.1.27/block/
H A Dbsg-lib.c39 kfree(job->request_payload.sg_list); bsg_destroy_job()
40 kfree(job->reply_payload.sg_list); bsg_destroy_job()
96 buf->sg_list = kzalloc(sz, GFP_KERNEL); bsg_map_buffer()
97 if (!buf->sg_list) bsg_map_buffer()
99 sg_init_table(buf->sg_list, req->nr_phys_segments); bsg_map_buffer()
100 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); bsg_map_buffer()
148 kfree(job->request_payload.sg_list); bsg_create_job()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_ruc.c92 ss->sg_list = qp->r_sg_list; qib_init_sge()
95 if (wqe->sg_list[i].length == 0) qib_init_sge()
98 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, qib_init_sge()
99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) qib_init_sge()
101 qp->r_len += wqe->sg_list[i].length; qib_init_sge()
111 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; qib_init_sge()
427 sqp->s_sge.sge = wqe->sg_list[0]; qib_ruc_loopback()
428 sqp->s_sge.sg_list = wqe->sg_list + 1; qib_ruc_loopback()
465 qp->r_sge.sg_list = NULL; qib_ruc_loopback()
479 sqp->s_sge.sg_list = NULL; qib_ruc_loopback()
481 qp->r_sge.sge = wqe->sg_list[0]; qib_ruc_loopback()
482 qp->r_sge.sg_list = wqe->sg_list + 1; qib_ruc_loopback()
530 *sge = *sqp->s_sge.sg_list++; qib_ruc_loopback()
780 struct qib_sge *sge = &wqe->sg_list[i]; qib_send_complete()
H A Dqib_srq.c83 wqe->sg_list[i] = wr->sg_list[i]; qib_post_srq_receive()
289 p->sg_list[i] = wqe->sg_list[i]; qib_modify_srq()
H A Dqib_ud.c172 ssge.sg_list = swqe->sg_list + 1; qib_ud_loopback()
173 ssge.sge = *swqe->sg_list; qib_ud_loopback()
190 *sge = *ssge.sg_list++; qib_ud_loopback()
320 qp->s_sge.sge = wqe->sg_list[0]; qib_make_ud_req()
321 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_ud_req()
H A Dqib_uc.c99 qp->s_sge.sge = wqe->sg_list[0]; qib_make_uc_req()
100 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_uc_req()
434 qp->r_sge.sg_list = NULL;
H A Dqib_verbs.h337 * The size of the sg_list is determined when the QP is created and stored
341 struct ib_send_wr wr; /* don't use wr.sg_list */
345 u32 length; /* total length of data in sg_list */
346 struct qib_sge sg_list[0]; member in struct:qib_swqe
351 * The size of the sg_list is determined when the QP (or SRQ) is created
357 struct ib_sge sg_list[0]; member in struct:qib_rwqe
391 struct qib_sge *sg_list; /* next SGE to be used if any */ member in struct:qib_sge_state
452 u8 s_max_sge; /* size of s_wq->sg_list */
1071 ss->sge = *ss->sg_list++; qib_put_ss()
H A Dqib_verbs.c189 *sge = *ss->sg_list++; qib_copy_sge()
230 *sge = *ss->sg_list++; qib_skip_sge()
253 struct qib_sge *sg_list = ss->sg_list; qib_count_sge() local
277 sge = *sg_list++; qib_count_sge()
315 *sge = *ss->sg_list++; qib_copy_from_sge()
383 wr->sg_list[0].length < sizeof(u64) || qib_post_one_send()
384 wr->sg_list[0].addr & (sizeof(u64) - 1))) qib_post_one_send()
407 u32 length = wr->sg_list[i].length; qib_post_one_send()
412 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], qib_post_one_send()
413 &wr->sg_list[i], acc); qib_post_one_send()
438 struct qib_sge *sge = &wqe->sg_list[--j]; qib_post_one_send()
535 wqe->sg_list[i] = wr->sg_list[i]; qib_post_receive()
744 *sge = *ss->sg_list++; update_sge()
H A Dqib_rc.c49 ss->sge = wqe->sg_list[0]; restart_sge()
50 ss->sg_list = wqe->sg_list + 1; restart_sge()
487 qp->s_sge.sge = wqe->sg_list[0]; qib_make_rc_req()
488 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_rc_req()
1013 struct qib_sge *sge = &wqe->sg_list[i]; qib_rc_send_complete()
1069 struct qib_sge *sge = &wqe->sg_list[i]; do_rc_completion()
1205 u64 *vaddr = wqe->sg_list[0].vaddr; do_rc_ack()
2063 qp->r_sge.sg_list = NULL; qib_rc_rcv()
H A Dqib_qp.c433 struct qib_sge *sge = &wqe->sg_list[i]; clear_mr_refs()
H A Dqib_sdma.c622 *sge = *ss->sg_list++; qib_sdma_verbs_send()
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_reqs.c85 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); ehca_write_rwqe()
91 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr = ehca_write_rwqe()
92 recv_wr->sg_list[cnt_ds].addr; ehca_write_rwqe()
93 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey = ehca_write_rwqe()
94 recv_wr->sg_list[cnt_ds].lkey; ehca_write_rwqe()
95 wqe_p->u.all_rcv.sg_list[cnt_ds].length = ehca_write_rwqe()
96 recv_wr->sg_list[cnt_ds].length; ehca_write_rwqe()
119 struct ib_sge *sge = send_wr->sg_list; trace_send_wr_ud()
175 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); ehca_write_swqe()
247 wqe_p->u.ud_av.sg_list[idx].vaddr = ehca_write_swqe()
248 send_wr->sg_list[idx].addr; ehca_write_swqe()
249 wqe_p->u.ud_av.sg_list[idx].lkey = ehca_write_swqe()
250 send_wr->sg_list[idx].lkey; ehca_write_swqe()
251 wqe_p->u.ud_av.sg_list[idx].length = ehca_write_swqe()
252 send_wr->sg_list[idx].length; ehca_write_swqe()
281 wqe_p->u.nud.sg_list[idx].vaddr = ehca_write_swqe()
282 send_wr->sg_list[idx].addr; ehca_write_swqe()
283 wqe_p->u.nud.sg_list[idx].lkey = ehca_write_swqe()
284 send_wr->sg_list[idx].lkey; ehca_write_swqe()
285 wqe_p->u.nud.sg_list[idx].length = ehca_write_swqe()
286 send_wr->sg_list[idx].length; ehca_write_swqe()
287 dma_length += send_wr->sg_list[idx].length; ehca_write_swqe()
H A Dehca_qes.h156 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; member in struct:ehca_wqe::__anon4846::__anon4847
164 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; member in struct:ehca_wqe::__anon4846::__anon4848
168 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES - member in struct:ehca_wqe::__anon4846::__anon4849
176 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; member in struct:ehca_wqe::__anon4846::__anon4850
H A Dehca_qp.c370 u.nud.sg_list[act_nr_sge]); ehca_calc_wqe_size()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_ruc.c131 if (wqe->sg_list[i].length == 0) ipath_init_sge()
134 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, ipath_init_sge()
135 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) ipath_init_sge()
137 *lengthp += wqe->sg_list[i].length; ipath_init_sge()
210 qp->r_sge.sg_list = qp->r_sg_list; ipath_get_rwqe()
328 sqp->s_sge.sge = wqe->sg_list[0]; ipath_ruc_loopback()
329 sqp->s_sge.sg_list = wqe->sg_list + 1; ipath_ruc_loopback()
370 qp->r_sge.sge = wqe->sg_list[0]; ipath_ruc_loopback()
371 qp->r_sge.sg_list = wqe->sg_list + 1; ipath_ruc_loopback()
414 *sge = *sqp->s_sge.sg_list++; ipath_ruc_loopback()
H A Dipath_srq.c83 wqe->sg_list[i] = wr->sg_list[i]; ipath_post_srq_receive()
293 p->sg_list[i] = wqe->sg_list[i]; ipath_modify_srq()
H A Dipath_uc.c98 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_uc_req()
99 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_uc_req()
453 qp->r_sge.sg_list = NULL;
H A Dipath_keys.c226 ss->sg_list = NULL; ipath_rkey_ok()
263 ss->sg_list = NULL; ipath_rkey_ok()
H A Dipath_verbs.h276 * The size of the sg_list is determined when the QP is created and stored
280 struct ib_send_wr wr; /* don't use wr.sg_list */
284 u32 length; /* total length of data in sg_list */
285 struct ipath_sge sg_list[0]; member in struct:ipath_swqe
290 * The size of the sg_list is determined when the QP (or SRQ) is created
296 struct ib_sge sg_list[0]; member in struct:ipath_rwqe
329 struct ipath_sge *sg_list; /* next SGE to be used if any */ member in struct:ipath_sge_state
409 u8 s_max_sge; /* size of s_wq->sg_list */
H A Dipath_verbs.c189 *sge = *ss->sg_list++; ipath_copy_sge()
228 *sge = *ss->sg_list++; ipath_skip_sge()
251 struct ipath_sge *sg_list = ss->sg_list; ipath_count_sge() local
275 sge = *sg_list++; ipath_count_sge()
314 *sge = *ss->sg_list++; ipath_copy_from_sge()
383 wr->sg_list[0].length < sizeof(u64) || ipath_post_one_send()
384 wr->sg_list[0].addr & (sizeof(u64) - 1))) ipath_post_one_send()
404 u32 length = wr->sg_list[i].length; ipath_post_one_send()
409 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], ipath_post_one_send()
410 &wr->sg_list[i], acc); ipath_post_one_send()
515 wqe->sg_list[i] = wr->sg_list[i]; ipath_post_receive()
789 *sge = *ss->sg_list++; update_sge()
H A Dipath_ud.c134 rsge.sg_list = qp->r_ud_sg_list; ipath_ud_loopback()
184 sge = swqe->sg_list; ipath_ud_loopback()
323 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_ud_req()
324 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_ud_req()
H A Dipath_rc.c48 ss->sge = wqe->sg_list[0]; restart_sge()
49 ss->sg_list = wqe->sg_list + 1; restart_sge()
461 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_rc_req()
462 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_rc_req()
936 *(u64 *) wqe->sg_list[0].vaddr = val; do_rc_ack()
1452 e->rdma_sge.sg_list = NULL; OP()
1779 qp->r_sge.sg_list = NULL;
1839 e->rdma_sge.sg_list = NULL; OP()
H A Dipath_sdma.c764 *sge = *ss->sg_list++; ipath_sdma_verbs_send()
/linux-4.1.27/drivers/virt/
H A Dfsl_hypervisor.c153 struct fh_sg_list *sg_list = NULL; ioctl_memcpy() local
233 * sg_list is the list of fh_sg_list objects that we pass to the ioctl_memcpy()
243 sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); ioctl_memcpy()
265 sg_list[0].source = page_to_phys(pages[0]) + lb_offset; ioctl_memcpy()
266 sg_list[0].target = param.remote_paddr; ioctl_memcpy()
268 sg_list[0].source = param.remote_paddr; ioctl_memcpy()
269 sg_list[0].target = page_to_phys(pages[0]) + lb_offset; ioctl_memcpy()
271 sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset); ioctl_memcpy()
273 remote_paddr = param.remote_paddr + sg_list[0].size; ioctl_memcpy()
274 count = param.count - sg_list[0].size; ioctl_memcpy()
279 sg_list[i].source = page_to_phys(pages[i]); ioctl_memcpy()
280 sg_list[i].target = remote_paddr; ioctl_memcpy()
283 sg_list[i].source = remote_paddr; ioctl_memcpy()
284 sg_list[i].target = page_to_phys(pages[i]); ioctl_memcpy()
286 sg_list[i].size = min_t(uint64_t, count, PAGE_SIZE); ioctl_memcpy()
288 remote_paddr += sg_list[i].size; ioctl_memcpy()
289 count -= sg_list[i].size; ioctl_memcpy()
293 virt_to_phys(sg_list), num_pages); ioctl_memcpy()
/linux-4.1.27/drivers/crypto/qce/
H A Ddma.h52 int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
/linux-4.1.27/drivers/dma/
H A Dimx-dma.c165 struct scatterlist *sg_list; member in struct:imxdma_channel
804 kfree(imxdmac->sg_list); imxdma_free_chan_resources()
805 imxdmac->sg_list = NULL; imxdma_free_chan_resources()
879 kfree(imxdmac->sg_list); imxdma_prep_dma_cyclic()
881 imxdmac->sg_list = kcalloc(periods + 1, imxdma_prep_dma_cyclic()
883 if (!imxdmac->sg_list) imxdma_prep_dma_cyclic()
886 sg_init_table(imxdmac->sg_list, periods); imxdma_prep_dma_cyclic()
889 imxdmac->sg_list[i].page_link = 0; imxdma_prep_dma_cyclic()
890 imxdmac->sg_list[i].offset = 0; imxdma_prep_dma_cyclic()
891 imxdmac->sg_list[i].dma_address = dma_addr; imxdma_prep_dma_cyclic()
892 sg_dma_len(&imxdmac->sg_list[i]) = period_len; imxdma_prep_dma_cyclic()
897 imxdmac->sg_list[periods].offset = 0; imxdma_prep_dma_cyclic()
898 sg_dma_len(&imxdmac->sg_list[periods]) = 0; imxdma_prep_dma_cyclic()
899 imxdmac->sg_list[periods].page_link = imxdma_prep_dma_cyclic()
900 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; imxdma_prep_dma_cyclic()
903 desc->sg = imxdmac->sg_list; imxdma_prep_dma_cyclic()
/linux-4.1.27/include/linux/
H A Dbsg-lib.h36 struct scatterlist *sg_list; member in struct:bsg_buffer
H A Dagp_backend.h85 struct scatterlist *sg_list; member in struct:agp_memory
H A Defi.h523 unsigned long sg_list);
/linux-4.1.27/drivers/staging/i2o/
H A Di2o_config.c535 struct i2o_dma sg_list[SG_TABLESIZE]; i2o_cfg_passthru32() local
602 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); i2o_cfg_passthru32()
635 p = &(sg_list[sg_index]); i2o_cfg_passthru32()
713 sg_list[j].virt, sg_size)) { i2o_cfg_passthru32()
716 c->name, sg_list[j].virt, i2o_cfg_passthru32()
742 i2o_dma_free(&c->pdev->dev, &sg_list[i]); i2o_cfg_passthru32()
786 struct i2o_dma sg_list[SG_TABLESIZE]; i2o_cfg_passthru() local
844 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); i2o_cfg_passthru()
877 p = &(sg_list[sg_index]); i2o_cfg_passthru()
952 ((void __user *)sg[j].addr_bus, sg_list[j].virt, i2o_cfg_passthru()
956 c->name, sg_list[j].virt, i2o_cfg_passthru()
983 i2o_dma_free(&c->pdev->dev, &sg_list[i]); i2o_cfg_passthru()
/linux-4.1.27/drivers/xen/
H A Defi.c223 unsigned long sg_list) xen_efi_update_capsule()
233 efi_data(op).u.update_capsule.sg_list = sg_list; xen_efi_update_capsule()
221 xen_efi_update_capsule(efi_capsule_header_t **capsules, unsigned long count, unsigned long sg_list) xen_efi_update_capsule() argument
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dqp.c374 if ((plen + wr->sg_list[i].length) > max) build_immd()
376 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; build_immd()
377 plen += wr->sg_list[i].length; build_immd()
378 rem = wr->sg_list[i].length; build_immd()
404 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, build_isgl()
413 if ((plen + sg_list[i].length) < plen) build_isgl()
415 plen += sg_list[i].length; build_isgl()
416 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | build_isgl()
417 sg_list[i].length); build_isgl()
420 *flitp = cpu_to_be64(sg_list[i].addr); build_isgl()
482 wr->sg_list, wr->num_sge, &plen); build_rdma_send()
525 wr->sg_list, wr->num_sge, &plen); build_rdma_write()
553 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); build_rdma_read()
554 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); build_rdma_read()
555 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr build_rdma_read()
557 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); build_rdma_read()
580 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); build_rdma_recv()
792 swsqe->read_len = wr->sg_list[0].length; c4iw_post_send()
403 build_isgl(__be64 *queue_start, __be64 *queue_end, struct fw_ri_isgl *isglp, struct ib_sge *sg_list, int num_sge, u32 *plenp) build_isgl() argument
/linux-4.1.27/drivers/scsi/aacraid/
H A Dcommctrl.c480 void *sg_list[32]; aac_send_raw_srb() local
507 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ aac_send_raw_srb()
560 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { aac_send_raw_srb()
619 sg_list[i] = p; // save so we can clean up later aac_send_raw_srb()
671 sg_list[i] = p; // save so we can clean up later aac_send_raw_srb()
725 sg_list[i] = p; // save so we can clean up later aac_send_raw_srb()
761 sg_list[i] = p; // save so we can clean up later aac_send_raw_srb()
804 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ aac_send_raw_srb()
823 kfree(sg_list[i]); aac_send_raw_srb()
/linux-4.1.27/drivers/scsi/qla4xxx/
H A Dql4_bsg.c63 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla4xxx_read_flash()
123 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla4xxx_update_flash()
187 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla4xxx_get_acb_state()
258 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla4xxx_read_nvram()
322 sg_copy_to_buffer(bsg_job->request_payload.sg_list, qla4xxx_update_nvram()
436 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, qla4xxx_bsg_get_acb()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h120 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member in struct:ehea_swqe::__anon6479::__anon6480
129 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; member in struct:ehea_swqe::__anon6479::__anon6481
146 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member in struct:ehea_rwqe
H A Dehea_main.c503 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; ehea_refill_rq_def()
504 rwqe->sg_list[0].vaddr = tmp_addr; ehea_refill_rq_def()
505 rwqe->sg_list[0].len = packet_size; ehea_refill_rq_def()
1672 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; write_swqe2_data() local
1678 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; write_swqe2_data()
1701 sgentry = &sg_list[i - sg1entry_contains_frag_data]; write_swqe2_data()
2628 rwqe->sg_list[0].l_key = lkey; ehea_update_rqs()
2632 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); ehea_update_rqs()
2637 rwqe->sg_list[0].l_key = lkey; ehea_update_rqs()
2641 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); ehea_update_rqs()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dsrq.c349 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); mlx4_ib_post_srq_recv()
350 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); mlx4_ib_post_srq_recv()
351 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); mlx4_ib_post_srq_recv()
H A Dmad.c597 wr.sg_list = &list; mlx4_ib_send_to_slave()
1121 struct ib_sge sg_list; mlx4_ib_post_pv_qp_buf() local
1128 sg_list.addr = tun_qp->ring[index].map; mlx4_ib_post_pv_qp_buf()
1129 sg_list.length = size; mlx4_ib_post_pv_qp_buf()
1130 sg_list.lkey = ctx->mr->lkey; mlx4_ib_post_pv_qp_buf()
1133 recv_wr.sg_list = &sg_list; mlx4_ib_post_pv_qp_buf()
1250 wr.sg_list = &list; mlx4_ib_send_to_wire()
H A Dqp.c2055 send_size += wr->sg_list[i].length; build_sriov_qp0_header()
2178 send_size += wr->sg_list[i].length; build_mlx_header()
2876 set_data_seg(dseg, wr->sg_list + i); mlx4_ib_post_send()
3002 scat->lkey = cpu_to_be32(wr->sg_list->lkey); mlx4_ib_post_recv()
3009 __set_data_seg(scat + i, wr->sg_list + i); mlx4_ib_post_recv()
/linux-4.1.27/drivers/scsi/
H A Dhptiop.h222 struct hpt_iopsg sg_list[1]; member in struct:hpt_iop_request_block_command
239 struct hpt_iopsg sg_list[1]; member in struct:hpt_iop_request_scsi_command
H A D3w-9xxx.c1348 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) twa_interrupt()
1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); twa_interrupt()
1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); twa_load_sgl()
1394 newcommand->sg_list[0].length = cpu_to_le32(length); twa_load_sgl()
1855 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); twa_scsiop_execute_scsi()
1856 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); twa_scsiop_execute_scsi()
1863 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); scsi_for_each_sg()
1864 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); scsi_for_each_sg()
1865 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { scsi_for_each_sg()
1876 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1877 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1878 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
H A Dips.c1794 /* Fill in a single scb sg_list element from an address */
1814 scb->sg_list.enh_list[indx].address_lo = ips_fill_scb_sg_single()
1816 scb->sg_list.enh_list[indx].address_hi = ips_fill_scb_sg_single()
1818 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len); ips_fill_scb_sg_single()
1820 scb->sg_list.std_list[indx].address = ips_fill_scb_sg_single()
1822 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len); ips_fill_scb_sg_single()
1838 IPS_SG_LIST sg_list; ips_flash_firmware() local
1853 sg_list.list = scb->sg_list.list; ips_flash_firmware()
1858 scb->sg_list.list = sg_list.list; ips_flash_firmware()
1911 IPS_SG_LIST sg_list; ips_usrcmd() local
1920 sg_list.list = scb->sg_list.list; ips_usrcmd()
1927 scb->sg_list.list = sg_list.list; ips_usrcmd()
4300 ha->scbs->sg_list.list, ips_deallocatescbs()
4352 scb_p->sg_list.enh_list = ips_allocatescbs()
4357 scb_p->sg_list.std_list = ips_allocatescbs()
4386 IPS_SG_LIST sg_list; ips_init_scb() local
4393 sg_list.list = scb->sg_list.list; ips_init_scb()
4409 scb->sg_list.list = sg_list.list; ips_init_scb()
H A D3w-sas.c345 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); scsi_for_each_sg()
346 command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg)); scsi_for_each_sg()
353 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
354 command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
697 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); twl_load_sgl()
698 newcommand->sg_list[0].length = TW_CPU_TO_SGL(length); twl_load_sgl()
1220 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) twl_interrupt()
1221 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); twl_interrupt()
H A Ddpt_i2o.c1713 void *sg_list[pHba->sg_tablesize]; adpt_i2o_passthru() local
1757 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); adpt_i2o_passthru()
1785 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. adpt_i2o_passthru()
1857 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) { adpt_i2o_passthru()
1858 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); adpt_i2o_passthru()
1886 if(sg_list[--sg_index]) { adpt_i2o_passthru()
1889 sg_list[sg_index], adpt_i2o_passthru()
H A Dwd719x.h62 struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
H A D3w-sas.h252 TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH]; member in struct:TAG_TW_Command_Apache
H A Dwd719x.c274 offsetof(struct wd719x_scb, sg_list)); wd719x_queuecommand()
277 scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg)); scsi_for_each_sg()
278 scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); scsi_for_each_sg()
H A Dadvansys.c367 ASC_SG_LIST sg_list[0]; member in struct:asc_sg_head
423 ASC_SG_LIST sg_list[7]; member in struct:asc_risc_sg_list_q
1828 } sg_list[NO_OF_SG_PER_BLOCK]; member in struct:asc_sg_block
2624 i, (ulong)le32_to_cpu(sgp->sg_list[i].addr), asc_prt_asc_scsi_q()
2625 (ulong)le32_to_cpu(sgp->sg_list[i].bytes)); asc_prt_asc_scsi_q()
2663 i, (ulong)b->sg_list[i].sg_addr, asc_prt_adv_sgblock()
2664 (ulong)b->sg_list[i].sg_count); asc_prt_adv_sgblock()
6984 sg_list[scsiq->next_sg_index], AscIsrChipHalted()
7948 asc_sg_head->sg_list[sgcnt].addr = scsi_for_each_sg()
7950 asc_sg_head->sg_list[sgcnt].bytes = scsi_for_each_sg()
8058 sg_block->sg_list[i].sg_addr = adv_get_sglist()
8060 sg_block->sg_list[i].sg_count = adv_get_sglist()
8209 static int AscSgListToQueue(int sg_list) AscSgListToQueue() argument
8213 n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q); AscSgListToQueue()
8214 if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0) AscSgListToQueue()
8374 scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr; AscPutReadySgListQueue()
8375 scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes; AscPutReadySgListQueue()
8474 sg_list[sg_index], AscPutReadySgListQueue()
8612 (ADV_PADDR)sg_head->sg_list[0].addr; AscExeScsiQueue()
8614 (ADV_DCNT)sg_head->sg_list[0].bytes; AscExeScsiQueue()
8627 (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i]. AscExeScsiQueue()
8667 sg_list AscExeScsiQueue()
8671 sg_list AscExeScsiQueue()
8688 sg_list AscExeScsiQueue()
8694 sg_list AscExeScsiQueue()
H A Dnsp32.c2661 data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE, nsp32_detect()
2663 if (data->sg_list == NULL) { nsp32_detect()
2677 .sglun = &(data->sg_list[offset]), nsp32_detect()
2768 data->sg_list, data->sg_paddr); nsp32_detect()
2790 if (data->sg_list) { nsp32_release()
2792 data->sg_list, data->sg_paddr); nsp32_release()
H A Deata_generic.h314 struct eata_sg_list *sg_list; member in struct:eata_ccb
H A Du14-34f.c544 struct sg_list { struct
585 struct sg_list *sglist; /* pointer to the allocated SG list */
983 sh[j]->sg_tablesize * sizeof(struct sg_list), port_detect()
1143 cpp->use_sg * sizeof(struct sg_list),
H A D3w-xxxx.h386 TW_SG_Entry sg_list[TW_ATA_PASS_SGL_MAX]; member in struct:TAG_TW_Passthru
H A Dips.h1100 IPS_SG_LIST sg_list; member in struct:ips_scb
1126 IPS_SG_LIST *sg_list; member in struct:ips_scb_pt
H A Dlibiscsi_tcp.c369 struct scatterlist *sg_list, unsigned int sg_count, iscsi_segment_seek_sg()
377 for_each_sg(sg_list, sg, sg_count, i) { for_each_sg()
368 iscsi_segment_seek_sg(struct iscsi_segment *segment, struct scatterlist *sg_list, unsigned int sg_count, unsigned int offset, size_t size, iscsi_segment_done_fn_t *done, struct hash_desc *hash) iscsi_segment_seek_sg() argument
H A Deata.c742 struct sg_list { struct
811 struct sg_list *sglist; /* pointer to the allocated SG list */
1346 size_t sz = shost->sg_tablesize *sizeof(struct sg_list); port_detect()
1634 sizeof(struct sg_list),
1636 cpp->data_len = H2DEV((scsi_sg_count(SCpnt) * sizeof(struct sg_list)));
H A Dscsi_transport_fc.c3575 kfree(job->request_payload.sg_list); fc_destroy_bsgjob()
3576 kfree(job->reply_payload.sg_list); fc_destroy_bsgjob()
3679 buf->sg_list = kzalloc(sz, GFP_KERNEL); fc_bsg_map_buffer()
3680 if (!buf->sg_list) fc_bsg_map_buffer()
3682 sg_init_table(buf->sg_list, req->nr_phys_segments); fc_bsg_map_buffer()
3683 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); fc_bsg_map_buffer()
3756 kfree(job->request_payload.sg_list); fc_req_to_bsgjob()
H A Dnsp32.h575 nsp32_sglun *sg_list; /* sglist virtuxal address */ member in struct:_nsp32_hw_data
H A D3w-9xxx.h521 TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH]; member in struct:TAG_TW_Command_Apache
H A Dhptiop.c767 memcpy(scp->sense_buffer, &req->sg_list, hptiop_finish_scsi_req()
1050 sg_count = hptiop_buildsgl(scp, req->sg_list); hptiop_queuecommand_lck()
H A D3w-xxxx.c974 passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1; tw_chrdev_ioctl()
975 passthru->sg_list[0].length = data_buffer_length_adjusted; tw_chrdev_ioctl()
/linux-4.1.27/drivers/firmware/efi/
H A Druntime-wrappers.c258 unsigned long sg_list) virt_efi_update_capsule()
267 status = efi_call_virt(update_capsule, capsules, count, sg_list); virt_efi_update_capsule()
256 virt_efi_update_capsule(efi_capsule_header_t **capsules, unsigned long count, unsigned long sg_list) virt_efi_update_capsule() argument
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c227 priv->tx_wr.sg_list = priv->tx_sge; ipoib_transport_dev_init()
236 priv->rx_wr.sg_list = priv->rx_sge; ipoib_transport_dev_init()
H A Dipoib_cm.c342 wr->sg_list = sge; ipoib_cm_init_rx_wr()
/linux-4.1.27/drivers/crypto/
H A Dbfin_crc.c103 static int sg_count(struct scatterlist *sg_list) sg_count() argument
105 struct scatterlist *sg = sg_list; sg_count()
108 if (sg_list == NULL) sg_count()
122 static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents, sg_get() argument
128 for_each_sg(sg_list, sg, nents, i) sg_get()
H A Datmel-aes.c164 struct scatterlist *sg_list; atmel_aes_sg_length() local
167 sg_list = sg; atmel_aes_sg_length()
171 len = min(sg_list->length, total); atmel_aes_sg_length()
176 sg_list = sg_next(sg_list); atmel_aes_sg_length()
177 if (!sg_list) atmel_aes_sg_length()
H A Dsahara.c282 struct scatterlist *sg_list; sahara_sg_length() local
285 sg_list = sg; sahara_sg_length()
288 len = min(sg_list->length, total); sahara_sg_length()
293 sg_list = sg_next(sg_list); sahara_sg_length()
294 if (!sg_list) sahara_sg_length()
H A Dpicoxcell_crypto.c264 static int sg_count(struct scatterlist *sg_list, int nbytes) sg_count() argument
266 struct scatterlist *sg = sg_list; sg_count()
H A Dtalitos.c1093 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) sg_count() argument
1095 struct scatterlist *sg = sg_list; sg_count()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dfsl_hcalls.h352 * @sg_list: guest physical address of an array of &fh_sg_list structures
353 * @count: the number of entries in @sg_list
358 unsigned int target, phys_addr_t sg_list, unsigned int count) fh_partition_memcpy()
370 r5 = (uint32_t) sg_list; fh_partition_memcpy()
373 r6 = sg_list >> 32; fh_partition_memcpy()
357 fh_partition_memcpy(unsigned int source, unsigned int target, phys_addr_t sg_list, unsigned int count) fh_partition_memcpy() argument
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dsrq.c459 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); mlx5_ib_post_srq_recv()
460 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); mlx5_ib_post_srq_recv()
461 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); mlx5_ib_post_srq_recv()
H A Dqp.c2107 addr = (void *)(unsigned long)(wr->sg_list[i].addr); set_data_inl_seg()
2108 len = wr->sg_list[i].length; set_data_inl_seg()
2245 u32 data_len = wr->sg_list->length; set_sig_data_segment()
2246 u32 data_key = wr->sg_list->lkey; set_sig_data_segment()
2247 u64 data_va = wr->sg_list->addr; set_sig_data_segment()
2388 region_len = wr->sg_list->length; set_sig_umr_wr()
2390 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || set_sig_umr_wr()
2391 wr->wr.sig_handover.prot->addr != wr->sg_list->addr || set_sig_umr_wr()
2392 wr->wr.sig_handover.prot->length != wr->sg_list->length)) set_sig_umr_wr()
2830 if (likely(wr->sg_list[i].length)) { mlx5_ib_post_send()
2831 set_data_ptr_seg(dpseg, wr->sg_list + i); mlx5_ib_post_send()
2929 set_data_ptr_seg(scat + i, wr->sg_list + i); mlx5_ib_post_recv()
H A Dmr.c702 wr->sg_list = sg; prep_umr_reg_wqe()
933 wr.sg_list = &sg; mlx5_ib_update_mtt()
/linux-4.1.27/drivers/infiniband/core/
H A Dmad.c955 mad_send_wr->sg_list[0].length = hdr_len; ib_create_send_mad()
956 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; ib_create_send_mad()
957 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len; ib_create_send_mad()
958 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; ib_create_send_mad()
961 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; ib_create_send_mad()
1076 sge = mad_send_wr->sg_list; ib_send_mad()
2221 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); ib_mad_send_done_handler()
2224 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); ib_mad_send_done_handler()
2647 struct ib_sge sg_list; ib_mad_post_receive_mads() local
2652 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; ib_mad_post_receive_mads()
2653 sg_list.lkey = (*qp_info->port_priv->mr).lkey; ib_mad_post_receive_mads()
2657 recv_wr.sg_list = &sg_list; ib_mad_post_receive_mads()
2674 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, ib_mad_post_receive_mads()
2680 sg_list.addr))) { ib_mad_post_receive_mads()
2684 mad_priv->header.mapping = sg_list.addr; ib_mad_post_receive_mads()
H A Dmad_priv.h129 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; member in struct:ib_mad_send_wr_private
H A Duverbs_cmd.c2301 next->sg_list = (void *) next + ib_uverbs_post_send()
2303 if (copy_from_user(next->sg_list, ib_uverbs_post_send()
2313 next->sg_list = NULL; ib_uverbs_post_send()
2402 next->sg_list = (void *) next + ib_uverbs_unmarshall_recv()
2404 if (copy_from_user(next->sg_list, ib_uverbs_unmarshall_recv()
2413 next->sg_list = NULL; ib_uverbs_unmarshall_recv()
/linux-4.1.27/include/scsi/
H A Dlibiscsi_tcp.h117 struct scatterlist *sg_list, unsigned int sg_count,
H A Dscsi_transport_fc.h630 struct scatterlist *sg_list; member in struct:fc_bsg_buffer
/linux-4.1.27/drivers/infiniband/hw/amso1100/
H A Dc2_qp.c845 ib_wr->sg_list, c2_post_send()
868 ib_wr->sg_list, c2_post_send()
888 cpu_to_be32(ib_wr->sg_list->lkey); c2_post_send()
890 cpu_to_be64(ib_wr->sg_list->addr); c2_post_send()
896 cpu_to_be32(ib_wr->sg_list->length); c2_post_send()
980 ib_wr->sg_list, c2_post_receive()
/linux-4.1.27/drivers/char/agp/
H A Dintel-gtt.c123 static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) intel_gtt_unmap_memory() argument
128 pci_unmap_sg(intel_private.pcidev, sg_list, intel_gtt_unmap_memory()
131 st.sgl = sg_list; intel_gtt_unmap_memory()
919 mem->sg_list = st.sgl; intel_fake_agp_insert_entries()
955 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); intel_fake_agp_remove_entries()
956 mem->sg_list = NULL; intel_fake_agp_remove_entries()
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c1944 struct ib_sge *sg_list) ocrdma_build_sges()
1949 sge[i].lrkey = sg_list[i].lkey; ocrdma_build_sges()
1950 sge[i].addr_lo = sg_list[i].addr; ocrdma_build_sges()
1951 sge[i].addr_hi = upper_32_bits(sg_list[i].addr); ocrdma_build_sges()
1952 sge[i].len = sg_list[i].length; ocrdma_build_sges()
1953 hdr->total_len += sg_list[i].length; ocrdma_build_sges()
1959 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) ocrdma_sglist_len() argument
1964 total_len += sg_list[i].length; ocrdma_sglist_len()
1978 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); ocrdma_build_inline_sges()
1988 (void *)(unsigned long)wr->sg_list[i].addr, ocrdma_build_inline_sges()
1989 wr->sg_list[i].length); ocrdma_build_inline_sges()
1990 dpp_addr += wr->sg_list[i].length; ocrdma_build_inline_sges()
1998 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); ocrdma_build_inline_sges()
2054 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); ocrdma_build_read()
2288 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); ocrdma_build_rqe()
1942 ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, struct ocrdma_sge *sge, int num_sge, struct ib_sge *sg_list) ocrdma_build_sges() argument
/linux-4.1.27/drivers/infiniband/hw/nes/
H A Dnes_verbs.c3239 ib_wr->sg_list[sge_index].addr); fill_wqe_sg_send()
3241 ib_wr->sg_list[sge_index].length); fill_wqe_sg_send()
3244 (ib_wr->sg_list[sge_index].lkey)); fill_wqe_sg_send()
3248 total_payload_length += ib_wr->sg_list[sge_index].length; fill_wqe_sg_send()
3332 (ib_wr->sg_list[0].length <= 64)) { nes_post_send()
3334 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length); nes_post_send()
3336 ib_wr->sg_list[0].length); nes_post_send()
3362 (ib_wr->sg_list[0].length <= 64)) { nes_post_send()
3364 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length); nes_post_send()
3366 ib_wr->sg_list[0].length); nes_post_send()
3397 ib_wr->sg_list->length); nes_post_send()
3399 ib_wr->sg_list->addr); nes_post_send()
3401 ib_wr->sg_list->lkey); nes_post_send()
3594 ib_wr->sg_list[sge_index].addr); nes_post_recv()
3596 ib_wr->sg_list[sge_index].length); nes_post_recv()
3598 ib_wr->sg_list[sge_index].lkey); nes_post_recv()
3600 total_payload_length += ib_wr->sg_list[sge_index].length; nes_post_recv()
/linux-4.1.27/drivers/infiniband/ulp/isert/
H A Dib_isert.c1019 rx_wr->sg_list = &rx_desc->rx_sg; isert_post_recv()
1053 send_wr.sg_list = tx_desc->tx_sg; isert_post_send()
1124 send_wr->sg_list = &tx_desc->tx_sg[0]; isert_init_send_wr()
1146 rx_wr.sg_list = &sge; isert_rdma_post_recvl()
2401 send_wr->sg_list = ib_sge; isert_build_rdma_wr()
2428 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2429 send_wr->sg_list, send_wr->num_sge);
2737 sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; isert_reg_sig_mr()
2889 send_wr->sg_list = &wr->s_ib_sge; isert_reg_rdma()
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_srq.c522 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_tavor_post_srq_recv()
614 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_arbel_post_srq_recv()
H A Dmthca_qp.c1737 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_tavor_post_send()
1862 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_tavor_post_receive()
2078 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_arbel_post_send()
2198 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_arbel_post_receive()
/linux-4.1.27/net/9p/
H A Dtrans_rdma.c423 wr.sg_list = &sge; post_recv()
516 wr.sg_list = &sge; rdma_request()
H A Dtrans_virtio.c174 * @start: which segment of the sg_list to start at
217 * @start: which segment of the sg_list to start at
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dsvc_rdma_recvfrom.c199 read_wr.sg_list = ctxt->sge; rdma_read_chunk_lcl()
320 read_wr.sg_list = ctxt->sge; rdma_read_chunk_frmr()
H A Dsvc_rdma_sendto.c213 write_wr.sg_list = &sge[0]; send_write()
473 send_wr.sg_list = ctxt->sge; send_reply()
H A Dsvc_rdma_transport.c538 recv_wr.sg_list = &ctxt->sge[0]; svc_rdma_post_recv()
1353 err_wr.sg_list = ctxt->sge; svc_rdma_send_error()
H A Dverbs.c1594 send_wr.sg_list = req->rl_send_iov; rpcrdma_ep_post()
1636 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; rpcrdma_ep_post_recv()
/linux-4.1.27/drivers/scsi/lpfc/
H A Dlpfc_bsg.c235 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, lpfc_bsg_copy_data()
613 sg_copy_from_buffer(job->reply_payload.sg_list, lpfc_bsg_rport_els_cmp()
712 sg_copy_to_buffer(job->request_payload.sg_list, lpfc_bsg_rport_els()
1337 sg_copy_from_buffer(job->request_payload.sg_list, lpfc_bsg_hba_get_event()
3081 sg_copy_to_buffer(job->request_payload.sg_list, lpfc_bsg_diag_loopback_run()
3246 sg_copy_from_buffer(job->reply_payload.sg_list, lpfc_bsg_diag_loopback_run()
3378 sg_copy_from_buffer(job->reply_payload.sg_list, lpfc_bsg_issue_mbox_cmpl()
3572 sg_copy_from_buffer(job->reply_payload.sg_list, lpfc_bsg_issue_mbox_ext_handle_job()
4320 sg_copy_from_buffer(job->reply_payload.sg_list, lpfc_bsg_read_ebuf_get()
4376 sg_copy_to_buffer(job->request_payload.sg_list, lpfc_bsg_write_ebuf_set()
4654 sg_copy_to_buffer(job->request_payload.sg_list, lpfc_bsg_issue_mbox()
4861 sg_copy_from_buffer(job->reply_payload.sg_list, lpfc_bsg_issue_mbox()
/linux-4.1.27/drivers/infiniband/ulp/iser/
H A Diser_verbs.c1015 rx_wr.sg_list = &sge; iser_post_recvl()
1039 rx_wr->sg_list = &rx_desc->rx_sg; iser_post_recvm()
1076 send_wr.sg_list = tx_desc->tx_sg; iser_post_send()
H A Diser_memory.c678 sig_wr.sg_list = &data_reg->sge; iser_reg_sig_mr()
/linux-4.1.27/drivers/block/
H A Dskd_main.c4191 struct fit_sg_descriptor *sg_list; skd_cons_sg_list() local
4194 nbytes = sizeof(*sg_list) * n_sg; skd_cons_sg_list()
4196 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); skd_cons_sg_list()
4198 if (sg_list != NULL) { skd_cons_sg_list()
4202 memset(sg_list, 0, nbytes); skd_cons_sg_list()
4208 sg_list[i].next_desc_ptr = dma_address + ndp_off; skd_cons_sg_list()
4210 sg_list[i].next_desc_ptr = 0LL; skd_cons_sg_list()
4213 return sg_list; skd_cons_sg_list()
4570 struct fit_sg_descriptor *sg_list, skd_free_sg_list()
4573 if (sg_list != NULL) { skd_free_sg_list()
4576 nbytes = sizeof(*sg_list) * n_sg; skd_free_sg_list()
4578 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); skd_free_sg_list()
4569 skd_free_sg_list(struct skd_device *skdev, struct fit_sg_descriptor *sg_list, u32 n_sg, dma_addr_t dma_addr) skd_free_sg_list() argument
/linux-4.1.27/drivers/scsi/libfc/
H A Dfc_lport.c147 * @sg: job->reply_payload.sg_list
1985 sg_copy_to_buffer(job->request_payload.sg_list, fc_lport_els_request()
2009 info->sg = job->reply_payload.sg_list; fc_lport_els_request()
2046 sg_copy_to_buffer(job->request_payload.sg_list, fc_lport_ct_request()
2070 info->sg = job->reply_payload.sg_list; fc_lport_ct_request()
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfad_bsg.c3155 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ bfad_im_bsg_vendor_request()
3156 sg_copy_to_buffer(job->request_payload.sg_list, bfad_im_bsg_vendor_request()
3166 /* Copy the response data to the job->reply_payload sg_list */ bfad_im_bsg_vendor_request()
3167 sg_copy_from_buffer(job->reply_payload.sg_list, bfad_im_bsg_vendor_request()
3468 /* map req sg - copy the sg_list passed in to the linear buffer */ bfad_im_bsg_els_ct_request()
3469 sg_copy_to_buffer(job->request_payload.sg_list, bfad_im_bsg_els_ct_request()
3530 sg_copy_from_buffer(job->reply_payload.sg_list, bfad_im_bsg_els_ct_request()
/linux-4.1.27/drivers/scsi/ibmvscsi/
H A Dibmvfc.c1859 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list, ibmvfc_bsg_request()
1867 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list, ibmvfc_bsg_request()
1871 dma_unmap_sg(vhost->dev, job->request_payload.sg_list, ibmvfc_bsg_request()
1911 mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list)); ibmvfc_bsg_request()
1912 mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list)); ibmvfc_bsg_request()
1913 mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list)); ibmvfc_bsg_request()
1914 mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list)); ibmvfc_bsg_request()
1943 dma_unmap_sg(vhost->dev, job->request_payload.sg_list, ibmvfc_bsg_request()
1945 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list, ibmvfc_bsg_request()
/linux-4.1.27/arch/x86/platform/efi/
H A Defi_64.c547 unsigned long count, unsigned long sg_list) efi_thunk_update_capsule()
546 efi_thunk_update_capsule(efi_capsule_header_t **capsules, unsigned long count, unsigned long sg_list) efi_thunk_update_capsule() argument
/linux-4.1.27/include/xen/interface/
H A Dplatform.h199 uint64_t sg_list; /* machine address */ member in struct:xenpf_efi_runtime_call::__anon13918::__anon13924
/linux-4.1.27/drivers/misc/genwqe/
H A Dcard_utils.c549 * page_list and pci_alloc_consistent for the sg_list.
550 * The sg_list is currently itself not scattered, which could
/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_fc.c993 ct_els->req = job->request_payload.sg_list; zfcp_fc_exec_bsg_job()
994 ct_els->resp = job->reply_payload.sg_list; zfcp_fc_exec_bsg_job()
/linux-4.1.27/drivers/crypto/amcc/
H A Dcrypto4xx_core.c747 static int get_sg_count(struct scatterlist *sg_list, int nbytes) get_sg_count() argument
749 struct scatterlist *sg = sg_list; get_sg_count()
/linux-4.1.27/net/rds/
H A Dib_recv.c59 recv->r_wr.sg_list = recv->r_sge; rds_ib_recv_init_ring()
525 wr->sg_list = sge; rds_ib_recv_init_ack()
H A Diw_recv.c95 recv->r_wr.sg_list = recv->r_sge; rds_iw_recv_init_ring()
356 wr->sg_list = sge; rds_iw_recv_init_ack()
H A Dib_send.c233 send->s_wr.sg_list = send->s_sge; rds_ib_send_init_ring()
H A Diw_send.c142 send->s_wr.sg_list = send->s_sge; rds_iw_send_init_ring()
/linux-4.1.27/drivers/scsi/aic7xxx/
H A Daic79xx_core.c467 sg = (struct ahd_dma64_seg *)scb->sg_list; ahd_setup_data_scb()
474 sg = (struct ahd_dma_seg *)scb->sg_list; ahd_setup_data_scb()
511 return ((uint8_t *)scb->sg_list + sg_offset); ahd_sg_bus_to_virt()
520 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) ahd_sg_virt_to_bus()
1741 struct ahd_dma64_seg *sg_list; ahd_dump_sglist() local
1743 sg_list = (struct ahd_dma64_seg*)scb->sg_list; ahd_dump_sglist()
1748 addr = ahd_le64toh(sg_list[i].addr); ahd_dump_sglist()
1749 len = ahd_le32toh(sg_list[i].len); ahd_dump_sglist()
1754 sg_list[i].len & AHD_SG_LEN_MASK, ahd_dump_sglist()
1755 (sg_list[i].len & AHD_DMA_LAST_SEG) ahd_dump_sglist()
1759 struct ahd_dma_seg *sg_list; ahd_dump_sglist() local
1761 sg_list = (struct ahd_dma_seg*)scb->sg_list; ahd_dump_sglist()
1765 len = ahd_le32toh(sg_list[i].len); ahd_dump_sglist()
1769 ahd_le32toh(sg_list[i].addr), ahd_dump_sglist()
5759 if (sg != scb->sg_list ahd_handle_ign_wide_residue()
5792 if (sg != scb->sg_list ahd_handle_ign_wide_residue()
6956 next_scb->sg_list = segs; ahd_alloc_scbs()
9055 sg = scb->sg_list; ahd_handle_scsi_status()
H A Daic7xxx_core.c380 return (&scb->sg_list[sg_index]); ahc_sg_bus_to_virt()
389 sg_index = sg - &scb->sg_list[1]; ahc_sg_virt_to_bus()
391 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); ahc_sg_virt_to_bus()
417 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) ahc_sync_sglist()
1080 sg = scb->sg_list; ahc_handle_seqint()
1411 (ahc_le32toh(scb->sg_list[i].len) >> 24 ahc_handle_seqint()
1413 ahc_le32toh(scb->sg_list[i].addr), ahc_handle_seqint()
1414 ahc_le32toh(scb->sg_list[i].len) ahc_handle_seqint()
2125 (ahc_le32toh(scb->sg_list[i].len) >> 24
2127 ahc_le32toh(scb->sg_list[i].addr),
2128 ahc_le32toh(scb->sg_list[i].len));
4226 if (sg != scb->sg_list ahc_handle_ign_wide_residue()
5016 next_scb->sg_list = segs; ahc_alloc_scbs()
H A Daic7xxx_osm.c1529 sg = scb->sg_list; ahc_linux_run_command()
1559 scb->hscb->dataptr = scb->sg_list->addr;
1560 scb->hscb->datacnt = scb->sg_list->len;
H A Daic79xx.h617 void *sg_list; member in struct:scb
H A Daic7xxx.h576 struct ahc_dma_seg *sg_list; member in struct:scb
H A Daic79xx_osm.c1637 void *sg = scb->sg_list; ahd_linux_run_command()
/linux-4.1.27/drivers/char/
H A Dvirtio_console.c868 struct sg_list { struct
878 struct sg_list *sgl = sd->u.data; pipe_to_sg()
924 struct sg_list sgl; port_fops_splice_write()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd_cb.c168 rx->rx_wrq.sg_list = &rx->rx_sge; kiblnd_post_rx()
1077 wrq->sg_list = sge; kiblnd_init_tx_msg()
1141 wrq->sg_list = sge; kiblnd_init_rdma()
/linux-4.1.27/drivers/media/platform/soc_camera/
H A Dpxa_camera.c321 * @sg_first: first element of sg_list
322 * @sg_first_ofs: offset in first element of sg_list
/linux-4.1.27/drivers/infiniband/ulp/srp/
H A Dib_srp.c1696 wr.sg_list = &list; srp_post_send()
1716 wr.sg_list = &list; srp_post_recv()
/linux-4.1.27/include/rdma/
H A Dib_verbs.h1031 struct ib_sge *sg_list; member in struct:ib_send_wr
1090 struct ib_sge *sg_list; member in struct:ib_recv_wr
/linux-4.1.27/drivers/scsi/be2iscsi/
H A Dbe_mgmt.c511 sg_copy_to_buffer(job->request_payload.sg_list, mgmt_vendor_specific_fw_cmd()
H A Dbe_main.c5156 sg_copy_from_buffer(job->reply_payload.sg_list, beiscsi_bsg_request()
/linux-4.1.27/drivers/infiniband/ulp/srpt/
H A Dib_srpt.c789 wr.sg_list = &list; srpt_post_recv()
825 wr.sg_list = &list; srpt_post_send()
2818 wr.sg_list = riu->sge; srpt_perform_rdmas()

Completed in 3400 milliseconds