rdma 47 drivers/gpu/drm/mediatek/mtk_disp_rdma.c #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) rdma 75 drivers/gpu/drm/mediatek/mtk_disp_rdma.c struct mtk_ddp_comp *rdma = &priv->ddp_comp; rdma 78 drivers/gpu/drm/mediatek/mtk_disp_rdma.c writel(0x0, rdma->regs + DISP_REG_RDMA_INT_STATUS); rdma 83 drivers/gpu/drm/mediatek/mtk_disp_rdma.c mtk_crtc_ddp_irq(priv->crtc, rdma); rdma 100 drivers/gpu/drm/mediatek/mtk_disp_rdma.c struct mtk_disp_rdma *rdma = comp_to_rdma(comp); rdma 102 drivers/gpu/drm/mediatek/mtk_disp_rdma.c rdma->crtc = crtc; rdma 109 drivers/gpu/drm/mediatek/mtk_disp_rdma.c struct mtk_disp_rdma *rdma = comp_to_rdma(comp); rdma 111 drivers/gpu/drm/mediatek/mtk_disp_rdma.c rdma->crtc = NULL; rdma 132 drivers/gpu/drm/mediatek/mtk_disp_rdma.c struct mtk_disp_rdma *rdma = comp_to_rdma(comp); rdma 145 drivers/gpu/drm/mediatek/mtk_disp_rdma.c RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) | rdma 150 drivers/gpu/drm/mediatek/mtk_disp_rdma.c static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, rdma 195 drivers/gpu/drm/mediatek/mtk_disp_rdma.c struct mtk_disp_rdma *rdma = comp_to_rdma(comp); rdma 202 drivers/gpu/drm/mediatek/mtk_disp_rdma.c con = rdma_fmt_convert(rdma, fmt); rdma 2084 drivers/infiniband/core/uverbs_cmd.c struct ib_rdma_wr *rdma; rdma 2086 drivers/infiniband/core/uverbs_cmd.c next_size = sizeof(*rdma); rdma 2087 drivers/infiniband/core/uverbs_cmd.c rdma = alloc_wr(next_size, user_wr->num_sge); rdma 2088 drivers/infiniband/core/uverbs_cmd.c if (!rdma) { rdma 2093 drivers/infiniband/core/uverbs_cmd.c rdma->remote_addr = user_wr->wr.rdma.remote_addr; rdma 2094 drivers/infiniband/core/uverbs_cmd.c rdma->rkey = user_wr->wr.rdma.rkey; rdma 2096 drivers/infiniband/core/uverbs_cmd.c next = &rdma->wr; rdma 2103 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->rdma.imm_data = wr->ex.imm_data; rdma 2107 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->rdma.inv_key = wr->ex.invalidate_rkey; rdma 2112 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; rdma 2113 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->rdma.r_key = rdma_wr(wr)->rkey; rdma 1660 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); rdma 1662 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); rdma 1663 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); rdma 171 drivers/infiniband/hw/bnxt_re/qplib_fp.h } rdma; rdma 73 drivers/infiniband/hw/mlx5/odp.c } rdma; rdma 1268 drivers/infiniband/hw/mlx5/odp.c u32 rkey = pfault->rdma.r_key; rdma 1277 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_va += pfault->bytes_committed; rdma 1278 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, rdma 1279 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_op_len); rdma 1282 drivers/infiniband/hw/mlx5/odp.c address = pfault->rdma.rdma_va; rdma 1283 drivers/infiniband/hw/mlx5/odp.c length = pfault->rdma.rdma_op_len; rdma 1290 drivers/infiniband/hw/mlx5/odp.c length = pfault->rdma.packet_size; rdma 1387 drivers/infiniband/hw/mlx5/odp.c be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; rdma 1389 drivers/infiniband/hw/mlx5/odp.c be32_to_cpu(pf_eqe->rdma.pftype_token) & rdma 1391 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.r_key = rdma 1392 drivers/infiniband/hw/mlx5/odp.c be32_to_cpu(pf_eqe->rdma.r_key); rdma 1393 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.packet_size = rdma 1394 drivers/infiniband/hw/mlx5/odp.c be16_to_cpu(pf_eqe->rdma.packet_length); rdma 1395 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_op_len = rdma 1396 drivers/infiniband/hw/mlx5/odp.c be32_to_cpu(pf_eqe->rdma.rdma_op_len); rdma 1397 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_va = rdma 1398 drivers/infiniband/hw/mlx5/odp.c be64_to_cpu(pf_eqe->rdma.rdma_va); rdma 1402 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.r_key); rdma 1405 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_op_len, rdma 1406 drivers/infiniband/hw/mlx5/odp.c pfault->rdma.rdma_va); rdma 748 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c wqe_hdr->wr.rdma.remote_addr = rdma 750 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey; rdma 95 drivers/infiniband/sw/rxe/rxe_req.c wqe->wr.wr.rdma.remote_addr : rdma 439 drivers/infiniband/sw/rxe/rxe_req.c reth_set_rkey(pkt, ibwr->wr.rdma.rkey); rdma 554 drivers/infiniband/sw/rxe/rxe_verbs.c wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; rdma 555 drivers/infiniband/sw/rxe/rxe_verbs.c wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; rdma 146 drivers/macintosh/rack-meter.c struct rackmeter_dma *rdma = rm->dma_buf_v; rdma 155 drivers/macintosh/rack-meter.c memset(rdma->buf1, 0, sizeof(rdma->buf1)); rdma 156 drivers/macintosh/rack-meter.c memset(rdma->buf2, 0, sizeof(rdma->buf2)); rdma 371 drivers/macintosh/rack-meter.c struct resource ri2s, rdma; rdma 431 drivers/macintosh/rack-meter.c of_address_to_resource(i2s, 1, &rdma)) { rdma 441 drivers/macintosh/rack-meter.c pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start); rdma 475 drivers/macintosh/rack-meter.c rm->dma_regs = ioremap(rdma.start, 0x100); rdma 281 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct rdma_info *rdma = data; rdma 284 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->udbell_physbase = pci_resource_start(pdev, 2); rdma 285 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->udbell_len = pci_resource_len(pdev, 2); rdma 286 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->tpt_base = rdma 288 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); rdma 289 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->pbl_base = rdma 291 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); rdma 292 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); rdma 293 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); rdma 294 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; rdma 295 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->pdev = pdev; rdma 300 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct rdma_cq_op *rdma = data; rdma 304 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, rdma 305 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->credits); rdma 332 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct rdma_cq_setup *rdma = data; rdma 336 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c t3_sge_init_cqcntxt(adapter, rdma->id, rdma 337 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->base_addr, rdma->size, rdma 339 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->ovfl_mode, rdma->credits, rdma 340 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->credit_thres); rdma 350 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct rdma_ctrlqp_setup *rdma = data; rdma 356 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c rdma->base_addr, rdma->size, rdma 438 drivers/net/ethernet/pensando/ionic/ionic_if.h } rdma; rdma 2222 drivers/net/ethernet/pensando/ionic/ionic_lif.c neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); rdma 8089 drivers/net/ethernet/qlogic/qed/qed_hsi.h struct rdma_init_func_ramrod_data rdma; rdma 10051 drivers/net/ethernet/qlogic/qed/qed_hsi.h struct rdma_init_func_ramrod_data rdma; rdma 606 drivers/net/ethernet/qlogic/qed/qed_rdma.c p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; rdma 608 drivers/net/ethernet/qlogic/qed/qed_rdma.c p_ramrod = &p_ent->ramrod.roce_init_func.rdma; rdma 69 drivers/net/ethernet/seeq/sgiseeq.c volatile struct hpc_dma_desc rdma; rdma 209 drivers/net/ethernet/seeq/sgiseeq.c sp->rx_desc[i].rdma.pbuf = dma_addr; rdma 211 drivers/net/ethernet/seeq/sgiseeq.c sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; rdma 214 drivers/net/ethernet/seeq/sgiseeq.c sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; rdma 259 drivers/net/ethernet/seeq/sgiseeq.c i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, rdma 260 drivers/net/ethernet/seeq/sgiseeq.c r[i].rdma.pnext); rdma 263 drivers/net/ethernet/seeq/sgiseeq.c i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, rdma 264 drivers/net/ethernet/seeq/sgiseeq.c r[i].rdma.pnext); rdma 351 drivers/net/ethernet/seeq/sgiseeq.c while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { rdma 352 drivers/net/ethernet/seeq/sgiseeq.c len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; rdma 353 drivers/net/ethernet/seeq/sgiseeq.c dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, rdma 395 drivers/net/ethernet/seeq/sgiseeq.c rd->rdma.pbuf = dma_map_single(dev->dev.parent, rdma 400 drivers/net/ethernet/seeq/sgiseeq.c rd->rdma.cntinfo = RCNTINFO_INIT; rdma 407 drivers/net/ethernet/seeq/sgiseeq.c sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); rdma 410 drivers/net/ethernet/seeq/sgiseeq.c sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; rdma 702 drivers/net/ethernet/seeq/sgiseeq.c buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); rdma 703 drivers/net/ethernet/seeq/sgiseeq.c buf[i].rdma.pbuf = 0; rdma 707 drivers/net/ethernet/seeq/sgiseeq.c buf[i].rdma.pbuf = 0; rdma 708 drivers/net/ethernet/seeq/sgiseeq.c buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); rdma 268 drivers/nvme/target/configfs.c port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; rdma 269 drivers/nvme/target/configfs.c port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; rdma 270 drivers/nvme/target/configfs.c port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; rdma 214 drivers/scsi/ibmvscsi_tgt/libsrp.c goto rdma; rdma 241 drivers/scsi/ibmvscsi_tgt/libsrp.c rdma: rdma 256 fs/cifs/cifs_debug.c if (!server->rdma) rdma 359 fs/cifs/cifs_debug.c if (server->rdma) rdma 391 fs/cifs/cifsfs.c if (server->rdma) rdma 593 fs/cifs/cifsglob.h bool rdma:1; rdma 730 fs/cifs/cifsglob.h bool rdma; rdma 2001 fs/cifs/connect.c vol->rdma = true; rdma 2448 fs/cifs/connect.c if (vol->rdma && vol->vals->protocol_id < SMB30_PROT_ID) { rdma 2553 fs/cifs/connect.c if (server->rdma) rdma 2675 fs/cifs/connect.c if (server->rdma != vol->rdma) rdma 2783 fs/cifs/connect.c tcp_ses->rdma = volume_info->rdma; rdma 2824 fs/cifs/connect.c if (tcp_ses->rdma) { rdma 330 fs/cifs/smb2ops.c if (server->rdma) { rdma 355 fs/cifs/smb2ops.c if (server->rdma) { rdma 380 fs/cifs/smb2ops.c if (server->rdma) { rdma 406 fs/cifs/smb2ops.c if (server->rdma) { rdma 3569 fs/cifs/smb2pdu.c if (server->rdma && rdata && !server->sign && rdma 3978 fs/cifs/smb2pdu.c if (server->rdma && !server->sign && wdata->bytes >= rdma 11 fs/cifs/smbdirect.h #define cifs_rdma_enabled(server) ((server)->rdma) rdma 61 include/linux/cgroup_subsys.h SUBSYS(rdma) rdma 647 include/linux/mlx5/device.h } __packed rdma; rdma 2473 include/linux/mlx5/mlx5_ifc.h u8 rdma[0x1]; rdma 2491 include/linux/mlx5/mlx5_ifc.h u8 rdma[0x1]; rdma 1123 include/linux/nvme.h } rdma; rdma 160 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); rdma 161 include/linux/sunrpc/svc_rdma.h extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); rdma 162 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, rdma 164 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); rdma 169 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); rdma 170 include/linux/sunrpc/svc_rdma.h extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, rdma 173 include/linux/sunrpc/svc_rdma.h extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, rdma 175 include/linux/sunrpc/svc_rdma.h extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, rdma 180 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma); rdma 182 include/linux/sunrpc/svc_rdma.h svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); rdma 183 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, rdma 185 include/linux/sunrpc/svc_rdma.h extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); rdma 186 include/linux/sunrpc/svc_rdma.h extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, rdma 189 include/linux/sunrpc/svc_rdma.h extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, rdma 1528 include/trace/events/rpcrdma.h const struct svcxprt_rdma *rdma, rdma 1532 include/trace/events/rpcrdma.h TP_ARGS(rdma, page), rdma 1536 include/trace/events/rpcrdma.h __string(device, rdma->sc_cm_id->device->name) rdma 1537 include/trace/events/rpcrdma.h __string(addr, rdma->sc_xprt.xpt_remotebuf) rdma 1542 include/trace/events/rpcrdma.h __assign_str(device, rdma->sc_cm_id->device->name); rdma 1543 include/trace/events/rpcrdma.h __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); rdma 1553 include/trace/events/rpcrdma.h const struct svcxprt_rdma *rdma, rdma 1557 include/trace/events/rpcrdma.h TP_ARGS(rdma, status), rdma 1561 include/trace/events/rpcrdma.h __string(device, rdma->sc_cm_id->device->name) rdma 1562 include/trace/events/rpcrdma.h __string(addr, rdma->sc_xprt.xpt_remotebuf) rdma 1567 include/trace/events/rpcrdma.h __assign_str(device, rdma->sc_cm_id->device->name); rdma 1568 include/trace/events/rpcrdma.h __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); rdma 1806 include/trace/events/rpcrdma.h const struct svcxprt_rdma *rdma rdma 1809 include/trace/events/rpcrdma.h TP_ARGS(rdma), rdma 1814 include/trace/events/rpcrdma.h __string(addr, rdma->sc_xprt.xpt_remotebuf) rdma 1818 include/trace/events/rpcrdma.h __entry->avail = atomic_read(&rdma->sc_sq_avail); rdma 1819 include/trace/events/rpcrdma.h __entry->depth = rdma->sc_sq_depth; rdma 1820 include/trace/events/rpcrdma.h __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); rdma 1831 include/trace/events/rpcrdma.h const struct svcxprt_rdma *rdma \ rdma 1833 include/trace/events/rpcrdma.h TP_ARGS(rdma)) rdma 1840 include/trace/events/rpcrdma.h const struct svcxprt_rdma *rdma, rdma 1844 include/trace/events/rpcrdma.h TP_ARGS(rdma, status), rdma 1850 include/trace/events/rpcrdma.h __string(addr, rdma->sc_xprt.xpt_remotebuf) rdma 1854 include/trace/events/rpcrdma.h __entry->avail = atomic_read(&rdma->sc_sq_avail); rdma 1855 include/trace/events/rpcrdma.h __entry->depth = rdma->sc_sq_depth; rdma 1857 include/trace/events/rpcrdma.h __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); rdma 807 include/uapi/rdma/ib_user_verbs.h } rdma; rdma 83 include/uapi/rdma/rdma_user_rxe.h } rdma; rdma 241 include/uapi/rdma/vmw_pvrdma-abi.h } rdma; rdma 152 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = clnt->trans; rdma 154 net/9p/trans_rdma.c if (rdma->port != P9_PORT) rdma 155 net/9p/trans_rdma.c seq_printf(m, ",port=%u", rdma->port); rdma 156 net/9p/trans_rdma.c if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) rdma 157 net/9p/trans_rdma.c seq_printf(m, ",sq=%u", rdma->sq_depth); rdma 158 net/9p/trans_rdma.c if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) rdma 159 net/9p/trans_rdma.c seq_printf(m, ",rq=%u", rdma->rq_depth); rdma 160 net/9p/trans_rdma.c if (rdma->timeout != P9_RDMA_TIMEOUT) rdma 161 net/9p/trans_rdma.c seq_printf(m, ",timeout=%lu", rdma->timeout); rdma 162 net/9p/trans_rdma.c if (rdma->privport) rdma 242 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = c->trans; rdma 245 net/9p/trans_rdma.c BUG_ON(rdma->state != P9_RDMA_INIT); rdma 246 net/9p/trans_rdma.c rdma->state = P9_RDMA_ADDR_RESOLVED; rdma 250 net/9p/trans_rdma.c BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); rdma 251 net/9p/trans_rdma.c rdma->state = P9_RDMA_ROUTE_RESOLVED; rdma 255 net/9p/trans_rdma.c BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); rdma 256 net/9p/trans_rdma.c rdma->state = P9_RDMA_CONNECTED; rdma 260 net/9p/trans_rdma.c if (rdma) rdma 261 net/9p/trans_rdma.c rdma->state = P9_RDMA_CLOSED; rdma 280 net/9p/trans_rdma.c rdma_disconnect(rdma->cm_id); rdma 285 net/9p/trans_rdma.c complete(&rdma->cm_done); rdma 293 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = client->trans; rdma 301 net/9p/trans_rdma.c ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, rdma 328 net/9p/trans_rdma.c up(&rdma->rq_sem); rdma 335 net/9p/trans_rdma.c rdma->state = P9_RDMA_FLUSHING; rdma 344 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = client->trans; rdma 348 net/9p/trans_rdma.c ib_dma_unmap_single(rdma->cm_id->device, rdma 351 net/9p/trans_rdma.c up(&rdma->sq_sem); rdma 362 net/9p/trans_rdma.c static void rdma_destroy_trans(struct p9_trans_rdma *rdma) rdma 364 net/9p/trans_rdma.c if (!rdma) rdma 367 net/9p/trans_rdma.c if (rdma->qp && !IS_ERR(rdma->qp)) rdma 368 net/9p/trans_rdma.c ib_destroy_qp(rdma->qp); rdma 370 net/9p/trans_rdma.c if (rdma->pd && !IS_ERR(rdma->pd)) rdma 371 net/9p/trans_rdma.c ib_dealloc_pd(rdma->pd); rdma 373 net/9p/trans_rdma.c if (rdma->cq && !IS_ERR(rdma->cq)) rdma 374 net/9p/trans_rdma.c ib_free_cq(rdma->cq); rdma 376 net/9p/trans_rdma.c if (rdma->cm_id && !IS_ERR(rdma->cm_id)) rdma 377 net/9p/trans_rdma.c rdma_destroy_id(rdma->cm_id); rdma 379 net/9p/trans_rdma.c kfree(rdma); rdma 385 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = client->trans; rdma 389 net/9p/trans_rdma.c c->busa = ib_dma_map_single(rdma->cm_id->device, rdma 392 net/9p/trans_rdma.c if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) rdma 399 net/9p/trans_rdma.c sge.lkey = rdma->pd->local_dma_lkey; rdma 405 net/9p/trans_rdma.c return ib_post_recv(rdma->qp, &wr, NULL); rdma 414 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = client->trans; rdma 430 net/9p/trans_rdma.c if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { rdma 431 net/9p/trans_rdma.c if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { rdma 438 net/9p/trans_rdma.c atomic_inc(&rdma->excess_rc); rdma 457 net/9p/trans_rdma.c if (down_interruptible(&rdma->rq_sem)) { rdma 479 net/9p/trans_rdma.c c->busa = ib_dma_map_single(rdma->cm_id->device, rdma 482 net/9p/trans_rdma.c if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { rdma 491 net/9p/trans_rdma.c sge.lkey = rdma->pd->local_dma_lkey; rdma 500 net/9p/trans_rdma.c if (down_interruptible(&rdma->sq_sem)) { rdma 510 net/9p/trans_rdma.c err = ib_post_send(rdma->qp, &wr, NULL); rdma 526 net/9p/trans_rdma.c atomic_inc(&rdma->excess_rc); rdma 532 net/9p/trans_rdma.c spin_lock_irqsave(&rdma->req_lock, flags); rdma 533 net/9p/trans_rdma.c if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) { rdma 534 net/9p/trans_rdma.c rdma->state = P9_RDMA_CLOSING; rdma 535 net/9p/trans_rdma.c spin_unlock_irqrestore(&rdma->req_lock, flags); rdma 536 net/9p/trans_rdma.c rdma_disconnect(rdma->cm_id); rdma 538 net/9p/trans_rdma.c spin_unlock_irqrestore(&rdma->req_lock, flags); rdma 544 net/9p/trans_rdma.c struct p9_trans_rdma *rdma; rdma 549 net/9p/trans_rdma.c rdma = client->trans; rdma 550 net/9p/trans_rdma.c if (!rdma) rdma 554 net/9p/trans_rdma.c rdma_disconnect(rdma->cm_id); rdma 555 net/9p/trans_rdma.c rdma_destroy_trans(rdma); rdma 564 net/9p/trans_rdma.c struct p9_trans_rdma *rdma; rdma 566 net/9p/trans_rdma.c rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); rdma 567 net/9p/trans_rdma.c if (!rdma) rdma 570 net/9p/trans_rdma.c rdma->port = opts->port; rdma 571 net/9p/trans_rdma.c rdma->privport = opts->privport; rdma 572 net/9p/trans_rdma.c rdma->sq_depth = opts->sq_depth; rdma 573 net/9p/trans_rdma.c rdma->rq_depth = opts->rq_depth; rdma 574 net/9p/trans_rdma.c rdma->timeout = opts->timeout; rdma 575 net/9p/trans_rdma.c spin_lock_init(&rdma->req_lock); rdma 576 net/9p/trans_rdma.c init_completion(&rdma->cm_done); rdma 577 net/9p/trans_rdma.c sema_init(&rdma->sq_sem, rdma->sq_depth); rdma 578 net/9p/trans_rdma.c sema_init(&rdma->rq_sem, rdma->rq_depth); rdma 579 net/9p/trans_rdma.c atomic_set(&rdma->excess_rc, 0); rdma 581 net/9p/trans_rdma.c return rdma; rdma 597 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = client->trans; rdma 598 net/9p/trans_rdma.c atomic_inc(&rdma->excess_rc); rdma 602 net/9p/trans_rdma.c static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) rdma 612 net/9p/trans_rdma.c err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); rdma 630 net/9p/trans_rdma.c struct p9_trans_rdma *rdma; rdma 643 net/9p/trans_rdma.c rdma = alloc_rdma(&opts); rdma 644 net/9p/trans_rdma.c if (!rdma) rdma 648 net/9p/trans_rdma.c rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, rdma 650 net/9p/trans_rdma.c if (IS_ERR(rdma->cm_id)) rdma 654 net/9p/trans_rdma.c client->trans = rdma; rdma 658 net/9p/trans_rdma.c err = p9_rdma_bind_privport(rdma); rdma 667 net/9p/trans_rdma.c rdma->addr.sin_family = AF_INET; rdma 668 net/9p/trans_rdma.c rdma->addr.sin_addr.s_addr = in_aton(addr); rdma 669 net/9p/trans_rdma.c rdma->addr.sin_port = htons(opts.port); rdma 670 net/9p/trans_rdma.c err = rdma_resolve_addr(rdma->cm_id, NULL, rdma 671 net/9p/trans_rdma.c (struct sockaddr *)&rdma->addr, rdma 672 net/9p/trans_rdma.c rdma->timeout); rdma 675 net/9p/trans_rdma.c err = wait_for_completion_interruptible(&rdma->cm_done); rdma 676 net/9p/trans_rdma.c if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) rdma 680 net/9p/trans_rdma.c err = rdma_resolve_route(rdma->cm_id, rdma->timeout); rdma 683 net/9p/trans_rdma.c err = wait_for_completion_interruptible(&rdma->cm_done); rdma 684 net/9p/trans_rdma.c if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) rdma 688 net/9p/trans_rdma.c rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, rdma 691 net/9p/trans_rdma.c if (IS_ERR(rdma->cq)) rdma 695 net/9p/trans_rdma.c rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); rdma 696 net/9p/trans_rdma.c if (IS_ERR(rdma->pd)) rdma 709 net/9p/trans_rdma.c qp_attr.send_cq = rdma->cq; rdma 710 net/9p/trans_rdma.c qp_attr.recv_cq = rdma->cq; rdma 711 net/9p/trans_rdma.c err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); rdma 714 net/9p/trans_rdma.c rdma->qp = rdma->cm_id->qp; rdma 722 net/9p/trans_rdma.c err = rdma_connect(rdma->cm_id, &conn_param); rdma 725 net/9p/trans_rdma.c err = wait_for_completion_interruptible(&rdma->cm_done); rdma 726 net/9p/trans_rdma.c if (err || (rdma->state != P9_RDMA_CONNECTED)) rdma 734 net/9p/trans_rdma.c rdma_destroy_trans(rdma); rdma 113 net/rds/ib_send.c rds_ib_send_complete(container_of(op, struct rds_message, rdma), rdma 166 net/rds/ib_send.c rm = container_of(send->s_op, struct rds_message, rdma); rdma 578 net/rds/ib_send.c if (rm->rdma.op_active) { rdma 581 net/rds/ib_send.c ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); rdma 614 net/rds/ib_send.c if (rm->rdma.op_active && rm->rdma.op_fence) rdma 947 net/rds/ib_send.c rds_message_addref(container_of(op, struct rds_message, rdma)); rdma 162 net/rds/message.c if (rm->rdma.op_active) rdma 163 net/rds/message.c rds_rdma_free_op(&rm->rdma); rdma 164 net/rds/message.c if (rm->rdma.op_rdma_mr) rdma 165 net/rds/message.c rds_mr_put(rm->rdma.op_rdma_mr); rdma 577 net/rds/rdma.c struct rm_rdma_op *op = &rm->rdma; rdma 586 net/rds/rdma.c || rm->rdma.op_active) rdma 761 net/rds/rdma.c rm->rdma.op_rdma_mr = mr; rdma 780 net/rds/rdma.c &rm->rdma.op_rdma_mr, rm->m_conn_path); rdma 481 net/rds/rds.h } rdma; rdma 171 net/rds/recv.c struct rds_ext_header_rdma rdma; rdma 183 net/rds/recv.c rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0); rdma 281 net/rds/send.c (rm->rdma.op_active && rdma 310 net/rds/send.c if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { rdma 311 net/rds/send.c rm->m_final_op = &rm->rdma; rdma 316 net/rds/send.c ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); rdma 353 net/rds/send.c ops_present = (rm->atomic.op_active || rm->rdma.op_active); rdma 356 net/rds/send.c if (rm->rdma.op_active && !rm->rdma.op_silent) rdma 509 net/rds/send.c ro = &rm->rdma; rdma 580 net/rds/send.c ro = &rm->rdma; rdma 646 net/rds/send.c struct rm_rdma_op *ro = &rm->rdma; rdma 659 net/rds/send.c rm->rdma.op_notifier = NULL; rdma 1324 net/rds/send.c if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { rdma 1326 net/rds/send.c &rm->rdma, conn->c_trans->xmit_rdma); rdma 114 net/sunrpc/xprtrdma/svc_rdma_backchannel.c static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, rdma 120 net/sunrpc/xprtrdma/svc_rdma_backchannel.c ret = svc_rdma_map_reply_msg(rdma, ctxt, &rqst->rq_snd_buf, NULL); rdma 129 net/sunrpc/xprtrdma/svc_rdma_backchannel.c return svc_rdma_send(rdma, &ctxt->sc_send_wr); rdma 172 net/sunrpc/xprtrdma/svc_rdma_backchannel.c rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) rdma 180 net/sunrpc/xprtrdma/svc_rdma_backchannel.c ctxt = svc_rdma_send_ctxt_get(rdma); rdma 192 net/sunrpc/xprtrdma/svc_rdma_backchannel.c svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_MIN); rdma 199 net/sunrpc/xprtrdma/svc_rdma_backchannel.c rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); rdma 201 net/sunrpc/xprtrdma/svc_rdma_backchannel.c svc_rdma_send_ctxt_put(rdma, ctxt); rdma 218 net/sunrpc/xprtrdma/svc_rdma_backchannel.c struct svcxprt_rdma *rdma; rdma 227 net/sunrpc/xprtrdma/svc_rdma_backchannel.c rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt); rdma 229 net/sunrpc/xprtrdma/svc_rdma_backchannel.c ret = rpcrdma_bc_send_request(rdma, rqst); rdma 121 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) rdma 130 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); rdma 133 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c addr = ib_dma_map_single(rdma->sc_pd->device, buffer, rdma 134 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rdma->sc_max_req_size, DMA_FROM_DEVICE); rdma 135 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) rdma 144 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.length = rdma->sc_max_req_size; rdma 145 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; rdma 158 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma, rdma 161 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, rdma 172 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) rdma 177 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c while ((node = llist_del_first(&rdma->sc_recv_ctxts))) { rdma 179 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_destroy(rdma, ctxt); rdma 184 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) rdma 189 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c node = llist_del_first(&rdma->sc_recv_ctxts); rdma 199 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_recv_ctxt_alloc(rdma); rdma 211 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, rdma 220 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); rdma 222 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_destroy(rdma, ctxt); rdma 237 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svcxprt_rdma *rdma = rdma 242 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); rdma 245 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, rdma 250 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_xprt_get(&rdma->sc_xprt); rdma 251 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); rdma 258 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); rdma 259 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_xprt_put(&rdma->sc_xprt); rdma 263 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) rdma 267 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_recv_ctxt_get(rdma); rdma 270 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c return __svc_rdma_post_recv(rdma, ctxt); rdma 279 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) rdma 285 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c for (i = 0; i < rdma->sc_max_requests; i++) { rdma 286 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_recv_ctxt_get(rdma); rdma 290 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ret = __svc_rdma_post_recv(rdma, ctxt); rdma 307 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svcxprt_rdma *rdma = cq->cq_context; rdma 319 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (svc_rdma_post_recv(rdma)) rdma 324 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ib_dma_sync_single_for_cpu(rdma->sc_pd->device, rdma 328 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c spin_lock(&rdma->sc_rq_dto_lock); rdma 329 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); rdma 331 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); rdma 332 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c spin_unlock(&rdma->sc_rq_dto_lock); rdma 333 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) rdma 334 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_xprt_enqueue(&rdma->sc_xprt); rdma 339 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); rdma 340 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); rdma 341 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_xprt_enqueue(&rdma->sc_xprt); rdma 343 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_xprt_put(&rdma->sc_xprt); rdma 351 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma) rdma 355 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { rdma 357 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); rdma 359 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { rdma 361 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); rdma 508 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, rdma 516 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!rdma->sc_snd_w_inv) rdma 55 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) rdma 59 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_lock(&rdma->sc_rw_ctxt_lock); rdma 61 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); rdma 64 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_unlock(&rdma->sc_rw_ctxt_lock); rdma 66 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_unlock(&rdma->sc_rw_ctxt_lock); rdma 85 net/sunrpc/xprtrdma/svc_rdma_rw.c static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, rdma 90 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_lock(&rdma->sc_rw_ctxt_lock); rdma 91 net/sunrpc/xprtrdma/svc_rdma_rw.c list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); rdma 92 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_unlock(&rdma->sc_rw_ctxt_lock); rdma 100 net/sunrpc/xprtrdma/svc_rdma_rw.c void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) rdma 104 net/sunrpc/xprtrdma/svc_rdma_rw.c while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { rdma 125 net/sunrpc/xprtrdma/svc_rdma_rw.c static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, rdma 128 net/sunrpc/xprtrdma/svc_rdma_rw.c cc->cc_rdma = rdma; rdma 129 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_xprt_get(&rdma->sc_xprt); rdma 138 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svcxprt_rdma *rdma = cc->cc_rdma; rdma 144 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, rdma 145 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma->sc_port_num, ctxt->rw_sg_table.sgl, rdma 147 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_put_rw_ctxt(rdma, ctxt); rdma 149 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_xprt_put(&rdma->sc_xprt); rdma 172 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) rdma 184 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_cc_init(rdma, &info->wi_cc); rdma 207 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svcxprt_rdma *rdma = cc->cc_rdma; rdma 213 net/sunrpc/xprtrdma/svc_rdma_rw.c atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); rdma 214 net/sunrpc/xprtrdma/svc_rdma_rw.c wake_up(&rdma->sc_send_wait); rdma 217 net/sunrpc/xprtrdma/svc_rdma_rw.c set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); rdma 235 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) rdma 243 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_cc_init(rdma, &info->ri_cc); rdma 265 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svcxprt_rdma *rdma = cc->cc_rdma; rdma 271 net/sunrpc/xprtrdma/svc_rdma_rw.c atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); rdma 272 net/sunrpc/xprtrdma/svc_rdma_rw.c wake_up(&rdma->sc_send_wait); rdma 275 net/sunrpc/xprtrdma/svc_rdma_rw.c set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); rdma 276 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); rdma 278 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_lock(&rdma->sc_rq_dto_lock); rdma 280 net/sunrpc/xprtrdma/svc_rdma_rw.c &rdma->sc_read_complete_q); rdma 282 net/sunrpc/xprtrdma/svc_rdma_rw.c set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); rdma 283 net/sunrpc/xprtrdma/svc_rdma_rw.c spin_unlock(&rdma->sc_rq_dto_lock); rdma 285 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_xprt_enqueue(&rdma->sc_xprt); rdma 300 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svcxprt_rdma *rdma = cc->cc_rdma; rdma 301 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_xprt *xprt = &rdma->sc_xprt; rdma 308 net/sunrpc/xprtrdma/svc_rdma_rw.c if (cc->cc_sqecount > rdma->sc_sq_depth) rdma 317 net/sunrpc/xprtrdma/svc_rdma_rw.c first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, rdma 318 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma->sc_port_num, cqe, first_wr); rdma 324 net/sunrpc/xprtrdma/svc_rdma_rw.c &rdma->sc_sq_avail) > 0) { rdma 325 net/sunrpc/xprtrdma/svc_rdma_rw.c ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); rdma 331 net/sunrpc/xprtrdma/svc_rdma_rw.c trace_svcrdma_sq_full(rdma); rdma 332 net/sunrpc/xprtrdma/svc_rdma_rw.c atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); rdma 333 net/sunrpc/xprtrdma/svc_rdma_rw.c wait_event(rdma->sc_send_wait, rdma 334 net/sunrpc/xprtrdma/svc_rdma_rw.c atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); rdma 335 net/sunrpc/xprtrdma/svc_rdma_rw.c trace_svcrdma_sq_retry(rdma); rdma 338 net/sunrpc/xprtrdma/svc_rdma_rw.c trace_svcrdma_sq_post_err(rdma, ret); rdma 345 net/sunrpc/xprtrdma/svc_rdma_rw.c atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); rdma 346 net/sunrpc/xprtrdma/svc_rdma_rw.c wake_up(&rdma->sc_send_wait); rdma 408 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svcxprt_rdma *rdma = cc->cc_rdma; rdma 428 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = svc_rdma_get_rw_ctxt(rdma, rdma 434 net/sunrpc/xprtrdma/svc_rdma_rw.c ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma 435 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma->sc_port_num, ctxt->rw_sg_table.sgl, rdma 466 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_put_rw_ctxt(rdma, ctxt); rdma 467 net/sunrpc/xprtrdma/svc_rdma_rw.c trace_svcrdma_dma_map_rwctx(rdma, ret); rdma 511 net/sunrpc/xprtrdma/svc_rdma_rw.c int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, rdma 520 net/sunrpc/xprtrdma/svc_rdma_rw.c info = svc_rdma_write_info_alloc(rdma, wr_ch); rdma 554 net/sunrpc/xprtrdma/svc_rdma_rw.c int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, rdma 560 net/sunrpc/xprtrdma/svc_rdma_rw.c info = svc_rdma_write_info_alloc(rdma, rp_ch); rdma 814 net/sunrpc/xprtrdma/svc_rdma_rw.c int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, rdma 832 net/sunrpc/xprtrdma/svc_rdma_rw.c info = svc_rdma_read_info_alloc(rdma); rdma 127 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) rdma 136 net/sunrpc/xprtrdma/svc_rdma_sendto.c size += rdma->sc_max_send_sges * sizeof(struct ib_sge); rdma 140 net/sunrpc/xprtrdma/svc_rdma_sendto.c buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); rdma 143 net/sunrpc/xprtrdma/svc_rdma_sendto.c addr = ib_dma_map_single(rdma->sc_pd->device, buffer, rdma 144 net/sunrpc/xprtrdma/svc_rdma_sendto.c rdma->sc_max_req_size, DMA_TO_DEVICE); rdma 145 net/sunrpc/xprtrdma/svc_rdma_sendto.c if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) rdma 156 net/sunrpc/xprtrdma/svc_rdma_sendto.c for (i = 0; i < rdma->sc_max_send_sges; i++) rdma 157 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; rdma 173 net/sunrpc/xprtrdma/svc_rdma_sendto.c void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) rdma 177 net/sunrpc/xprtrdma/svc_rdma_sendto.c while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) { rdma 179 net/sunrpc/xprtrdma/svc_rdma_sendto.c ib_dma_unmap_single(rdma->sc_pd->device, rdma 181 net/sunrpc/xprtrdma/svc_rdma_sendto.c rdma->sc_max_req_size, rdma 195 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) rdma 199 net/sunrpc/xprtrdma/svc_rdma_sendto.c spin_lock(&rdma->sc_send_lock); rdma 200 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts); rdma 204 net/sunrpc/xprtrdma/svc_rdma_sendto.c spin_unlock(&rdma->sc_send_lock); rdma 213 net/sunrpc/xprtrdma/svc_rdma_sendto.c spin_unlock(&rdma->sc_send_lock); rdma 214 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt = svc_rdma_send_ctxt_alloc(rdma); rdma 227 net/sunrpc/xprtrdma/svc_rdma_sendto.c void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, rdma 230 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct ib_device *device = rdma->sc_cm_id->device; rdma 245 net/sunrpc/xprtrdma/svc_rdma_sendto.c spin_lock(&rdma->sc_send_lock); rdma 246 net/sunrpc/xprtrdma/svc_rdma_sendto.c list_add(&ctxt->sc_list, &rdma->sc_send_ctxts); rdma 247 net/sunrpc/xprtrdma/svc_rdma_sendto.c spin_unlock(&rdma->sc_send_lock); rdma 260 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svcxprt_rdma *rdma = cq->cq_context; rdma 266 net/sunrpc/xprtrdma/svc_rdma_sendto.c atomic_inc(&rdma->sc_sq_avail); rdma 267 net/sunrpc/xprtrdma/svc_rdma_sendto.c wake_up(&rdma->sc_send_wait); rdma 270 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_send_ctxt_put(rdma, ctxt); rdma 273 net/sunrpc/xprtrdma/svc_rdma_sendto.c set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); rdma 274 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_xprt_enqueue(&rdma->sc_xprt); rdma 277 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_xprt_put(&rdma->sc_xprt); rdma 288 net/sunrpc/xprtrdma/svc_rdma_sendto.c int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) rdma 296 net/sunrpc/xprtrdma/svc_rdma_sendto.c if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { rdma 298 net/sunrpc/xprtrdma/svc_rdma_sendto.c trace_svcrdma_sq_full(rdma); rdma 299 net/sunrpc/xprtrdma/svc_rdma_sendto.c atomic_inc(&rdma->sc_sq_avail); rdma 300 net/sunrpc/xprtrdma/svc_rdma_sendto.c wait_event(rdma->sc_send_wait, rdma 301 net/sunrpc/xprtrdma/svc_rdma_sendto.c atomic_read(&rdma->sc_sq_avail) > 1); rdma 302 net/sunrpc/xprtrdma/svc_rdma_sendto.c if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) rdma 304 net/sunrpc/xprtrdma/svc_rdma_sendto.c trace_svcrdma_sq_retry(rdma); rdma 308 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_xprt_get(&rdma->sc_xprt); rdma 310 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = ib_post_send(rdma->sc_qp, wr, NULL); rdma 316 net/sunrpc/xprtrdma/svc_rdma_sendto.c trace_svcrdma_sq_post_err(rdma, ret); rdma 317 net/sunrpc/xprtrdma/svc_rdma_sendto.c set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); rdma 318 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_xprt_put(&rdma->sc_xprt); rdma 319 net/sunrpc/xprtrdma/svc_rdma_sendto.c wake_up(&rdma->sc_send_wait); rdma 485 net/sunrpc/xprtrdma/svc_rdma_sendto.c static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, rdma 491 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct ib_device *dev = rdma->sc_cm_id->device; rdma 504 net/sunrpc/xprtrdma/svc_rdma_sendto.c trace_svcrdma_dma_map_page(rdma, page); rdma 511 net/sunrpc/xprtrdma/svc_rdma_sendto.c static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, rdma 516 net/sunrpc/xprtrdma/svc_rdma_sendto.c return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base), rdma 527 net/sunrpc/xprtrdma/svc_rdma_sendto.c void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, rdma 533 net/sunrpc/xprtrdma/svc_rdma_sendto.c ib_dma_sync_single_for_device(rdma->sc_pd->device, rdma 542 net/sunrpc/xprtrdma/svc_rdma_sendto.c static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, rdma 571 net/sunrpc/xprtrdma/svc_rdma_sendto.c return elements >= rdma->sc_max_send_sges; rdma 578 net/sunrpc/xprtrdma/svc_rdma_sendto.c static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, rdma 623 net/sunrpc/xprtrdma/svc_rdma_sendto.c ib_dma_sync_single_for_device(rdma->sc_pd->device, rdma 642 net/sunrpc/xprtrdma/svc_rdma_sendto.c int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, rdma 653 net/sunrpc/xprtrdma/svc_rdma_sendto.c if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst)) rdma 654 net/sunrpc/xprtrdma/svc_rdma_sendto.c return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); rdma 657 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_dma_map_buf(rdma, ctxt, rdma 688 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, rdma 702 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); rdma 747 net/sunrpc/xprtrdma/svc_rdma_sendto.c static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, rdma 756 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_map_reply_msg(rdma, sctxt, rdma 772 net/sunrpc/xprtrdma/svc_rdma_sendto.c return svc_rdma_send(rdma, &sctxt->sc_send_wr); rdma 782 net/sunrpc/xprtrdma/svc_rdma_sendto.c static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, rdma 794 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR); rdma 799 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_send(rdma, &ctxt->sc_send_wr); rdma 801 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_send_ctxt_put(rdma, ctxt); rdma 823 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svcxprt_rdma *rdma = rdma 841 net/sunrpc/xprtrdma/svc_rdma_sendto.c sctxt = svc_rdma_send_ctxt_get(rdma); rdma 849 net/sunrpc/xprtrdma/svc_rdma_sendto.c *p++ = rdma->sc_fc_credits; rdma 859 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr); rdma 865 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr); rdma 871 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp)); rdma 872 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp, rdma 882 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp); rdma 888 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_send_ctxt_put(rdma, sctxt); rdma 268 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svcxprt_rdma *rdma = cma_id->context; rdma 269 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svc_xprt *xprt = &rdma->sc_xprt; rdma 279 net/sunrpc/xprtrdma/svc_rdma_transport.c clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); rdma 574 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svcxprt_rdma *rdma = rdma 578 net/sunrpc/xprtrdma/svc_rdma_transport.c rdma_disconnect(rdma->sc_cm_id); rdma 583 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svcxprt_rdma *rdma = rdma 585 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svc_xprt *xprt = &rdma->sc_xprt; rdma 589 net/sunrpc/xprtrdma/svc_rdma_transport.c if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) rdma 590 net/sunrpc/xprtrdma/svc_rdma_transport.c ib_drain_qp(rdma->sc_qp); rdma 592 net/sunrpc/xprtrdma/svc_rdma_transport.c svc_rdma_flush_recv_queues(rdma); rdma 600 net/sunrpc/xprtrdma/svc_rdma_transport.c svc_rdma_destroy_rw_ctxts(rdma); rdma 601 net/sunrpc/xprtrdma/svc_rdma_transport.c svc_rdma_send_ctxts_destroy(rdma); rdma 602 net/sunrpc/xprtrdma/svc_rdma_transport.c svc_rdma_recv_ctxts_destroy(rdma); rdma 605 net/sunrpc/xprtrdma/svc_rdma_transport.c if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) rdma 606 net/sunrpc/xprtrdma/svc_rdma_transport.c ib_destroy_qp(rdma->sc_qp); rdma 608 net/sunrpc/xprtrdma/svc_rdma_transport.c if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) rdma 609 net/sunrpc/xprtrdma/svc_rdma_transport.c ib_free_cq(rdma->sc_sq_cq); rdma 611 net/sunrpc/xprtrdma/svc_rdma_transport.c if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) rdma 612 net/sunrpc/xprtrdma/svc_rdma_transport.c ib_free_cq(rdma->sc_rq_cq); rdma 614 net/sunrpc/xprtrdma/svc_rdma_transport.c if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) rdma 615 net/sunrpc/xprtrdma/svc_rdma_transport.c ib_dealloc_pd(rdma->sc_pd); rdma 618 net/sunrpc/xprtrdma/svc_rdma_transport.c rdma_destroy_id(rdma->sc_cm_id); rdma 620 net/sunrpc/xprtrdma/svc_rdma_transport.c kfree(rdma); rdma 625 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svcxprt_rdma *rdma = rdma 628 net/sunrpc/xprtrdma/svc_rdma_transport.c INIT_WORK(&rdma->sc_work, __svc_rdma_free); rdma 629 net/sunrpc/xprtrdma/svc_rdma_transport.c schedule_work(&rdma->sc_work); rdma 634 net/sunrpc/xprtrdma/svc_rdma_transport.c struct svcxprt_rdma *rdma = rdma 641 net/sunrpc/xprtrdma/svc_rdma_transport.c if (waitqueue_active(&rdma->sc_send_wait)) rdma 383 sound/soc/codecs/wm_adsp.c __be32 rdma[6];