/linux-4.4.14/drivers/media/dvb-frontends/ |
D | dib3000mb.c | 152 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend() 158 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend() 162 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend() 175 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend() 179 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend() 183 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend() 187 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend() 200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend() 207 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend() 217 wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); in dib3000mb_set_frontend() [all …]
|
D | dib3000mb_priv.h | 24 #define wr(reg,val) if (dib3000_write_reg(state,reg,val)) \ macro 31 wr(a[i],v[i]); \ 34 #define set_or(reg,val) wr(reg,rd(reg) | val) 36 #define set_and(reg,val) wr(reg,rd(reg) & val)
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_cm.c | 46 struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ in c2_llp_connect() local 81 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); in c2_llp_connect() 82 if (!wr) { in c2_llp_connect() 93 c2_wr_set_id(wr, CCWR_QP_CONNECT); in c2_llp_connect() 94 wr->hdr.context = 0; in c2_llp_connect() 95 wr->rnic_handle = c2dev->adapter_handle; in c2_llp_connect() 96 wr->qp_handle = qp->adapter_handle; in c2_llp_connect() 98 wr->remote_addr = raddr->sin_addr.s_addr; in c2_llp_connect() 99 wr->remote_port = raddr->sin_port; in c2_llp_connect() 106 wr->private_data_length = in c2_llp_connect() [all …]
|
D | c2_mm.c | 56 struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ in send_pbl_messages() local 74 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); in send_pbl_messages() 75 if (!wr) { in send_pbl_messages() 78 c2_wr_set_id(wr, CCWR_NSMR_PBL); in send_pbl_messages() 85 wr->hdr.context = 0; in send_pbl_messages() 86 wr->rnic_handle = c2dev->adapter_handle; in send_pbl_messages() 87 wr->stag_index = stag_index; /* already swapped */ in send_pbl_messages() 88 wr->flags = 0; in send_pbl_messages() 92 wr->addrs_length = cpu_to_be32(count); in send_pbl_messages() 105 wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); in send_pbl_messages() [all …]
|
D | c2_qp.c | 137 struct c2wr_qp_modify_req wr; in c2_qp_modify() local 154 c2_wr_set_id(&wr, CCWR_QP_MODIFY); in c2_qp_modify() 155 wr.hdr.context = (unsigned long) vq_req; in c2_qp_modify() 156 wr.rnic_handle = c2dev->adapter_handle; in c2_qp_modify() 157 wr.qp_handle = qp->adapter_handle; in c2_qp_modify() 158 wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); in c2_qp_modify() 159 wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); in c2_qp_modify() 160 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); in c2_qp_modify() 161 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); in c2_qp_modify() 170 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); in c2_qp_modify() [all …]
|
D | c2_rnic.c | 83 struct c2wr_init_req wr; in c2_adapter_init() local 86 memset(&wr, 0, sizeof(wr)); in c2_adapter_init() 87 c2_wr_set_id(&wr, CCWR_INIT); in c2_adapter_init() 88 wr.hdr.context = 0; in c2_adapter_init() 89 wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); in c2_adapter_init() 90 wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); in c2_adapter_init() 91 wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); in c2_adapter_init() 92 wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); in c2_adapter_init() 93 wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma); in c2_adapter_init() 94 wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma); in c2_adapter_init() [all …]
|
D | c2_ae.c | 150 union c2wr *wr; in c2_ae_event() local 164 wr = c2_mq_consume(mq); in c2_ae_event() 165 if (!wr) in c2_ae_event() 171 event_id = c2_wr_get_id(wr); in c2_ae_event() 172 resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type); in c2_ae_event() 174 (void *) (unsigned long) wr->ae.ae_generic.user_context; in c2_ae_event() 176 status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr)); in c2_ae_event() 200 (unsigned long long) wr->ae.ae_generic.user_context, in c2_ae_event() 201 be32_to_cpu(wr->ae.ae_generic.resource_type), in c2_ae_event() 202 be32_to_cpu(wr->ae.ae_generic.resource), in c2_ae_event() [all …]
|
D | c2_cq.c | 292 struct c2wr_cq_create_req wr; in c2_init_cq() local 320 memset(&wr, 0, sizeof(wr)); in c2_init_cq() 321 c2_wr_set_id(&wr, CCWR_CQ_CREATE); in c2_init_cq() 322 wr.hdr.context = (unsigned long) vq_req; in c2_init_cq() 323 wr.rnic_handle = c2dev->adapter_handle; in c2_init_cq() 324 wr.msg_size = cpu_to_be32(cq->mq.msg_size); in c2_init_cq() 325 wr.depth = cpu_to_be32(cq->mq.q_size); in c2_init_cq() 326 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma); in c2_init_cq() 327 wr.msg_pool = cpu_to_be64(cq->mq.host_dma); in c2_init_cq() 328 wr.user_context = (u64) (unsigned long) (cq); in c2_init_cq() [all …]
|
D | c2_wr.h | 1479 static __inline__ u8 c2_wr_get_id(void *wr) in c2_wr_get_id() argument 1481 return ((struct c2wr_hdr *) wr)->id; in c2_wr_get_id() 1483 static __inline__ void c2_wr_set_id(void *wr, u8 id) in c2_wr_set_id() argument 1485 ((struct c2wr_hdr *) wr)->id = id; in c2_wr_set_id() 1487 static __inline__ u8 c2_wr_get_result(void *wr) in c2_wr_get_result() argument 1489 return ((struct c2wr_hdr *) wr)->result; in c2_wr_get_result() 1491 static __inline__ void c2_wr_set_result(void *wr, u8 result) in c2_wr_set_result() argument 1493 ((struct c2wr_hdr *) wr)->result = result; in c2_wr_set_result() 1495 static __inline__ u8 c2_wr_get_flags(void *wr) in c2_wr_get_flags() argument 1497 return ((struct c2wr_hdr *) wr)->flags; in c2_wr_get_flags() [all …]
|
D | c2_vq.c | 178 int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr) in vq_send_wr() argument 226 memcpy(msg, wr, c2dev->req_vq.msg_size); in vq_send_wr()
|
D | c2_vq.h | 57 extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
|
/linux-4.4.14/lib/ |
D | decompress_unlzma.c | 292 static inline size_t INIT get_pos(struct writer *wr) in get_pos() argument 295 wr->global_pos + wr->buffer_pos; in get_pos() 298 static inline uint8_t INIT peek_old_byte(struct writer *wr, in peek_old_byte() argument 301 if (!wr->flush) { in peek_old_byte() 303 while (offs > wr->header->dict_size) in peek_old_byte() 304 offs -= wr->header->dict_size; in peek_old_byte() 305 pos = wr->buffer_pos - offs; in peek_old_byte() 306 return wr->buffer[pos]; in peek_old_byte() 308 uint32_t pos = wr->buffer_pos - offs; in peek_old_byte() 309 while (pos >= wr->header->dict_size) in peek_old_byte() [all …]
|
/linux-4.4.14/include/linux/ |
D | hdlcdrv.h | 27 unsigned rd, wr; member 34 unsigned int wr; member 48 buf->buffer[buf->wr] = buf->shreg; in hdlcdrv_add_bitbuffer() 49 buf->wr = (buf->wr+1) % sizeof(buf->buffer); in hdlcdrv_add_bitbuffer() 57 buf->buffer[buf->wr] = bits & 0xff; in hdlcdrv_add_bitbuffer_word() 58 buf->wr = (buf->wr+1) % sizeof(buf->buffer); in hdlcdrv_add_bitbuffer_word() 59 buf->buffer[buf->wr] = (bits >> 8) & 0xff; in hdlcdrv_add_bitbuffer_word() 60 buf->wr = (buf->wr+1) % sizeof(buf->buffer); in hdlcdrv_add_bitbuffer_word() 163 ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER); in hdlcdrv_hbuf_full() 176 ret = (hb->rd == hb->wr); in hdlcdrv_hbuf_empty() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, in build_rdma_send() argument 48 switch (wr->opcode) { in build_rdma_send() 50 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send() 57 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send() 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 66 if (wr->num_sge > T3_MAX_SGE) in build_rdma_send() 72 for (i = 0; i < wr->num_sge; i++) { in build_rdma_send() 73 if ((plen + wr->sg_list[i].length) < plen) in build_rdma_send() 76 plen += wr->sg_list[i].length; in build_rdma_send() 77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_send() [all …]
|
D | iwch_cm.c | 179 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in release_tid() 194 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in iwch_quiesce_tid() 195 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); in iwch_quiesce_tid() 215 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in iwch_resume_tid() 216 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); in iwch_resume_tid() 401 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); in send_halfclose() 402 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); in send_halfclose() 422 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); in send_abort() 423 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); in send_abort() 461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in send_connect() [all …]
|
D | iwch_provider.h | 329 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 331 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
/linux-4.4.14/arch/mips/mm/ |
D | tlbex.c | 1869 struct work_registers wr = build_get_work_registers(p); in build_r4000_tlbchange_handler_head() local 1872 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ in build_r4000_tlbchange_handler_head() 1874 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ in build_r4000_tlbchange_handler_head() 1883 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); in build_r4000_tlbchange_handler_head() 1886 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); in build_r4000_tlbchange_handler_head() 1887 UASM_i_LW(p, wr.r2, 0, wr.r2); in build_r4000_tlbchange_handler_head() 1888 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); in build_r4000_tlbchange_handler_head() 1889 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); in build_r4000_tlbchange_handler_head() 1890 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); in build_r4000_tlbchange_handler_head() 1895 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ in build_r4000_tlbchange_handler_head() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 1479 int ind, struct ib_ud_wr *wr, in build_mlx_header() argument 1488 mthca_ah_grh_present(to_mah(wr->ah)), 0, in build_mlx_header() 1491 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header() 1502 switch (wr->wr.opcode) { in build_mlx_header() 1510 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header() 1519 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header() 1525 wr->pkey_index, &pkey); in build_mlx_header() 1527 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header() 1529 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header() 1530 sqp->qkey : wr->remote_qkey); in build_mlx_header() [all …]
|
D | mthca_srq.c | 475 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mthca_tavor_post_srq_recv() argument 494 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 502 *bad_wr = wr; in mthca_tavor_post_srq_recv() 514 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv() 516 *bad_wr = wr; in mthca_tavor_post_srq_recv() 521 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_srq_recv() 522 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_srq_recv() 532 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv() 575 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mthca_arbel_post_srq_recv() argument 590 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv() [all …]
|
D | mthca_dev.h | 522 int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 524 int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 533 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 535 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 537 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 539 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
/linux-4.4.14/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 54 struct isert_rdma_wr *wr); 59 struct isert_rdma_wr *wr); 1675 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; in isert_unmap_cmd() local 1679 if (wr->data.sg) { in isert_unmap_cmd() 1681 isert_unmap_data_buf(isert_conn, &wr->data); in isert_unmap_cmd() 1684 if (wr->rdma_wr) { in isert_unmap_cmd() 1686 kfree(wr->rdma_wr); in isert_unmap_cmd() 1687 wr->rdma_wr = NULL; in isert_unmap_cmd() 1690 if (wr->ib_sge) { in isert_unmap_cmd() 1692 kfree(wr->ib_sge); in isert_unmap_cmd() [all …]
|
D | ib_isert.h | 213 struct isert_rdma_wr *wr);
|
/linux-4.4.14/drivers/infiniband/ulp/iser/ |
D | iser_memory.c | 440 struct ib_sig_handover_wr *wr; in iser_reg_sig_mr() local 453 wr = sig_handover_wr(iser_tx_next_wr(tx_desc)); in iser_reg_sig_mr() 454 wr->wr.opcode = IB_WR_REG_SIG_MR; in iser_reg_sig_mr() 455 wr->wr.wr_id = ISER_FASTREG_LI_WRID; in iser_reg_sig_mr() 456 wr->wr.sg_list = &data_reg->sge; in iser_reg_sig_mr() 457 wr->wr.num_sge = 1; in iser_reg_sig_mr() 458 wr->wr.send_flags = 0; in iser_reg_sig_mr() 459 wr->sig_attrs = sig_attrs; in iser_reg_sig_mr() 460 wr->sig_mr = pi_ctx->sig_mr; in iser_reg_sig_mr() 462 wr->prot = &prot_reg->sge; in iser_reg_sig_mr() [all …]
|
D | iser_verbs.c | 1110 struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc); in iser_post_send() local 1117 wr->next = NULL; in iser_post_send() 1118 wr->wr_id = (uintptr_t)tx_desc; in iser_post_send() 1119 wr->sg_list = tx_desc->tx_sg; in iser_post_send() 1120 wr->num_sge = tx_desc->num_sge; in iser_post_send() 1121 wr->opcode = IB_WR_SEND; in iser_post_send() 1122 wr->send_flags = signal ? IB_SEND_SIGNALED : 0; in iser_post_send()
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | qp.c | 1839 struct ib_send_wr *wr) in set_datagram_seg() argument 1841 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg() 1842 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg() 1843 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg() 1955 struct ib_send_wr *wr) in set_reg_umr_segment() argument 1957 struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment() 1961 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) in set_reg_umr_segment() 1966 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { in set_reg_umr_segment() 1968 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { in set_reg_umr_segment() 1979 if (!wr->num_sge) in set_reg_umr_segment() [all …]
|
D | mr.c | 696 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, in prep_umr_reg_wqe() argument 702 struct mlx5_umr_wr *umrwr = umr_wr(wr); in prep_umr_reg_wqe() 708 wr->next = NULL; in prep_umr_reg_wqe() 709 wr->send_flags = 0; in prep_umr_reg_wqe() 710 wr->sg_list = sg; in prep_umr_reg_wqe() 712 wr->num_sge = 1; in prep_umr_reg_wqe() 714 wr->num_sge = 0; in prep_umr_reg_wqe() 716 wr->opcode = MLX5_IB_WR_UMR; in prep_umr_reg_wqe() 728 struct ib_send_wr *wr, u32 key) in prep_umr_unreg_wqe() argument 730 struct mlx5_umr_wr *umrwr = umr_wr(wr); in prep_umr_unreg_wqe() [all …]
|
D | srq.c | 425 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mlx5_ib_post_srq_recv() argument 438 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 439 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { in mlx5_ib_post_srq_recv() 441 *bad_wr = wr; in mlx5_ib_post_srq_recv() 447 *bad_wr = wr; in mlx5_ib_post_srq_recv() 451 srq->wrid[srq->head] = wr->wr_id; in mlx5_ib_post_srq_recv() 457 for (i = 0; i < wr->num_sge; i++) { in mlx5_ib_post_srq_recv() 458 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx5_ib_post_srq_recv() 459 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx5_ib_post_srq_recv() 460 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx5_ib_post_srq_recv()
|
D | mlx5_ib.h | 248 struct ib_send_wr wr; member 261 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr) in umr_wr() argument 263 return container_of(wr, struct mlx5_umr_wr, wr); in umr_wr() 522 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 532 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 534 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
/linux-4.4.14/arch/sparc/kernel/ |
D | trampoline_32.S | 46 wr %g1, 0x0, %psr ! traps off though 51 wr %g1, 0x0, %wim 55 wr %g3, 0x0, %tbr 70 wr %g1, PSR_ET, %psr ! traps on 100 wr %g1, 0x0, %psr ! traps off though 105 wr %g1, 0x0, %wim 110 wr %g1, 0x0, %tbr 131 wr %g1, PSR_ET, %psr ! traps on 159 wr %g1, 0x0, %psr ! traps off though 164 wr %g1, 0x0, %wim [all …]
|
D | entry.S | 157 wr %l0, 0x0, %psr 179 wr %l4, 0x0, %psr 181 wr %l4, PSR_ET, %psr 198 wr %l0, PSR_ET, %psr 228 wr %g2, 0x0, %psr 230 wr %g2, PSR_ET, %psr 237 wr %g2, PSR_ET, %psr ! keep ET up 247 wr %g2, 0x0, %psr 249 wr %g2, PSR_ET, %psr 253 wr %l0, PSR_ET, %psr [all …]
|
D | rtrap_32.S | 57 wr %t_psr, 0x0, %psr 87 wr %t_psr, 0x0, %psr 95 wr %t_psr, PSR_ET, %psr 129 wr %glob_tmp, 0x0, %wim 153 wr %t_psr, 0x0, %psr 164 wr %t_wim, 0x0, %wim ! or else... 166 wr %t_psr, PSR_ET, %psr 191 wr %g1, 0x0, %wim 209 wr %t_psr, 0x0, %psr 216 wr %t_wim, 0x0, %wim [all …]
|
D | wuf.S | 92 wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */ 121 wr %t_psr, 0x0, %psr 179 wr %t_wim, 0x0, %wim 187 wr %t_psr, PSR_ET, %psr ! enable traps 213 wr %t_psr, 0x0, %psr 299 wr %t_psr, 0x0, %psr
|
D | wof.S | 110 wr %glob_tmp, 0x0, %wim ! set new %wim, this is safe now 121 wr %t_psr, 0x0, %psr ! restore condition codes in %psr 147 wr %glob_tmp, 0x0, %wim ! Now it is safe to set new %wim 190 wr %t_psr, 0x0, %psr 252 wr %t_psr, PSR_ET, %psr 285 wr %t_psr, 0x0, %psr
|
D | una_asm_64.S | 12 wr %o3, 0, %asi 45 wr %o4, 0x0, %asi 70 wr %o4, 0, %asi 127 wr %o5, 0x0, %asi
|
D | rtrap_64.S | 194 wr %o3, %g0, %y 233 wr %g0, ASI_AIUP, %asi 303 wr %g1, FPRS_FEF, %fprs 317 wr %g1, 0, %gsr 327 5: wr %g0, FPRS_FEF, %fprs 336 wr %g0, FPRS_DU, %fprs
|
D | head_32.S | 243 wr %g2, 0x0, %psr 292 wr %g3, 0x0, %psr ! tick tock, tick tock 510 wr %g2, 0x0, %psr 560 wr %g0, 0x0, %wim ! so we do not get a trap 573 wr %g1, 0x0, %wim ! make window 1 invalid 649 wr %g3, 0x0, %tbr 654 wr %g3, 0x0, %psr 657 wr %g3, PSR_ET, %psr
|
D | hvtramp.S | 94 wr %g0, 0, %fprs 95 wr %g0, ASI_P, %asi
|
D | trampoline_64.S | 56 wr %g1, %asr18 86 wr %g5, %asr25 102 wr %g2, 0, %tick_cmpr 251 wr %g0, 0, %fprs 253 wr %g0, ASI_P, %asi
|
D | head_64.S | 487 wr %g1, %asr18 531 wr %g0, 0, %fprs 670 wr %g0, ASI_P, %asi 814 wr %o2, 0, %tick_cmpr 824 wr %o2, %asr25 948 wr %g0, ASI_AIUS, %asi 954 wr %g0, ASI_AIUS, %asi 960 wr %g0, ASI_AIUS, %asi
|
D | ivec.S | 34 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
|
D | fpu_traps.S | 24 wr %g0, FPRS_FEF, %fprs 186 wr %g7, 0, %gsr 191 wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits 382 wr %g0, 0, %fprs
|
D | etrap_32.S | 120 wr %g2, 0x0, %wim 214 wr %g2, 0x0, %wim
|
D | etrap_64.S | 49 wr %g0, 0, %fprs 79 wr %g3, 0x0, %asi
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | qp.c | 2143 struct ib_ud_wr *wr, in build_sriov_qp0_header() argument 2150 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header() 2158 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header() 2163 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header() 2164 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header() 2189 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header() 2193 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header() 2265 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, in build_mlx_header() argument 2272 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header() 2286 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header() [all …]
|
D | srq.c | 314 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mlx4_ib_post_srq_recv() argument 329 *bad_wr = wr; in mlx4_ib_post_srq_recv() 334 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 335 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { in mlx4_ib_post_srq_recv() 337 *bad_wr = wr; in mlx4_ib_post_srq_recv() 343 *bad_wr = wr; in mlx4_ib_post_srq_recv() 347 srq->wrid[srq->head] = wr->wr_id; in mlx4_ib_post_srq_recv() 353 for (i = 0; i < wr->num_sge; ++i) { in mlx4_ib_post_srq_recv() 354 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv() 355 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv() [all …]
|
D | mr.c | 372 struct ib_bind_mw_wr wr; in mlx4_ib_bind_mw() local 376 memset(&wr, 0, sizeof(wr)); in mlx4_ib_bind_mw() 377 wr.wr.opcode = IB_WR_BIND_MW; in mlx4_ib_bind_mw() 378 wr.wr.wr_id = mw_bind->wr_id; in mlx4_ib_bind_mw() 379 wr.wr.send_flags = mw_bind->send_flags; in mlx4_ib_bind_mw() 380 wr.mw = mw; in mlx4_ib_bind_mw() 381 wr.bind_info = mw_bind->bind_info; in mlx4_ib_bind_mw() 382 wr.rkey = ib_inc_rkey(mw->rkey); in mlx4_ib_bind_mw() 384 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr); in mlx4_ib_bind_mw() 386 mw->rkey = wr.rkey; in mlx4_ib_bind_mw()
|
D | mad.c | 460 struct ib_ud_wr wr; in mlx4_ib_send_to_slave() local 586 wr.ah = ah; in mlx4_ib_send_to_slave() 587 wr.port_num = port; in mlx4_ib_send_to_slave() 588 wr.remote_qkey = IB_QP_SET_QKEY; in mlx4_ib_send_to_slave() 589 wr.remote_qpn = dqpn; in mlx4_ib_send_to_slave() 590 wr.wr.next = NULL; in mlx4_ib_send_to_slave() 591 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); in mlx4_ib_send_to_slave() 592 wr.wr.sg_list = &list; in mlx4_ib_send_to_slave() 593 wr.wr.num_sge = 1; in mlx4_ib_send_to_slave() 594 wr.wr.opcode = IB_WR_SEND; in mlx4_ib_send_to_slave() [all …]
|
D | mlx4_ib.h | 740 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 751 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 753 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | omap3430-sdp.dts | 68 gpmc,cs-wr-off-ns = <186>; 71 gpmc,adv-wr-off-ns = <48>; 77 gpmc,wr-cycle-ns = <186>; 82 gpmc,wr-data-mux-bus-ns = <90>; 83 gpmc,wr-access-ns = <186>; 114 gpmc,cs-wr-off-ns = <36>; 117 gpmc,adv-wr-off-ns = <36>; 123 gpmc,wr-cycle-ns = <72>; 125 gpmc,wr-access-ns = <30>; 159 gpmc,cs-wr-off-ns = <72>; [all …]
|
D | omap-gpmc-smsc911x.dtsi | 31 gpmc,cs-wr-off-ns = <150>; 34 gpmc,adv-wr-off-ns = <40>; 40 gpmc,wr-cycle-ns = <155>; 47 gpmc,wr-data-mux-bus-ns = <0>; 48 gpmc,wr-access-ns = <0>;
|
D | omap-gpmc-smsc9221.dtsi | 34 gpmc,cs-wr-off-ns = <36>; 37 gpmc,adv-wr-off-ns = <12>; 43 gpmc,wr-cycle-ns = <54>; 48 gpmc,wr-data-mux-bus-ns = <18>; 49 gpmc,wr-access-ns = <42>;
|
D | omap3-overo-tobiduo-common.dtsi | 34 gpmc,cs-wr-off-ns = <36>; 37 gpmc,adv-wr-off-ns = <12>; 43 gpmc,wr-cycle-ns = <54>; 48 gpmc,wr-data-mux-bus-ns = <18>; 49 gpmc,wr-access-ns = <42>;
|
D | omap2430-sdp.dts | 53 gpmc,cs-wr-off-ns = <187>; 56 gpmc,adv-wr-off-ns = <48>; 62 gpmc,wr-cycle-ns = <187>; 69 gpmc,wr-data-mux-bus-ns = <0>; 70 gpmc,wr-access-ns = <0>;
|
D | omap-zoom-common.dtsi | 33 gpmc,cs-wr-off-ns = <155>; 36 gpmc,adv-wr-off-ns = <40>; 42 gpmc,wr-cycle-ns = <155>; 49 gpmc,wr-data-mux-bus-ns = <45>; 50 gpmc,wr-access-ns = <145>;
|
D | orion5x-rd88f5182-nas.dts | 58 devbus,wr-high-ps = <90000>; 59 devbus,wr-low-ps = <90000>; 60 devbus,ale-wr-ps = <90000>; 80 devbus,wr-high-ps = <90000>; 81 devbus,wr-low-ps = <90000>; 82 devbus,ale-wr-ps = <90000>;
|
D | omap3-sb-t35.dtsi | 115 gpmc,cs-wr-off-ns = <150>; 118 gpmc,adv-wr-off-ns = <40>; 124 gpmc,wr-cycle-ns = <155>; 131 gpmc,wr-data-mux-bus-ns = <0>; 132 gpmc,wr-access-ns = <0>;
|
D | omap3-devkit8000-common.dtsi | 215 gpmc,cs-wr-off-ns = <44>; 218 gpmc,adv-wr-off-ns = <44>; 223 gpmc,wr-cycle-ns = <82>; 224 gpmc,wr-access-ns = <40>; 225 gpmc,wr-data-mux-bus-ns = <0>; 277 gpmc,cs-wr-off-ns = <180>; 280 gpmc,adv-wr-off-ns = <48>; 286 gpmc,wr-cycle-ns = <186>; 293 gpmc,wr-data-mux-bus-ns = <0>; 294 gpmc,wr-access-ns = <0>;
|
D | dm8168-evm.dts | 98 gpmc,cs-wr-off-ns = <44>; 101 gpmc,adv-wr-off-ns = <44>; 108 gpmc,wr-cycle-ns = <82>; 115 gpmc,wr-access-ns = <40>; 116 gpmc,wr-data-mux-bus-ns = <0>;
|
D | logicpd-torpedo-som.dtsi | 48 gpmc,cs-wr-off-ns = <44>; 51 gpmc,adv-wr-off-ns = <44>; 56 gpmc,wr-cycle-ns = <82>; 57 gpmc,wr-access-ns = <40>; 58 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap3-n950-n9.dtsi | 140 gpmc,cs-wr-off-ns = <87>; 143 gpmc,adv-wr-off-ns = <10>; 149 gpmc,wr-cycle-ns = <112>; 156 gpmc,wr-data-mux-bus-ns = <30>; 157 gpmc,wr-access-ns = <81>;
|
D | omap4-duovero-parlor.dts | 149 gpmc,cs-wr-off-ns = <50>; 152 gpmc,adv-wr-off-ns = <10>; 158 gpmc,wr-cycle-ns = <50>; 163 gpmc,wr-data-mux-bus-ns = <35>; 164 gpmc,wr-access-ns = <50>;
|
D | omap2420-h4.dts | 36 gpmc,cs-wr-off-ns = <160>; 39 gpmc,adv-wr-off-ns = <50>; 45 gpmc,wr-cycle-ns = <170>;
|
D | omap3-lilly-a83x.dtsi | 377 gpmc,cs-wr-off-ns = <100>; 380 gpmc,adv-wr-off-ns = <100>; 386 gpmc,wr-cycle-ns = <100>; 392 gpmc,wr-data-mux-bus-ns = <75>; 393 gpmc,wr-access-ns = <155>; 430 gpmc,cs-wr-off-ns = <60>; 433 gpmc,adv-wr-off-ns = <10>; 439 gpmc,wr-cycle-ns = <100>; 444 gpmc,wr-data-mux-bus-ns = <15>; 445 gpmc,wr-access-ns = <75>;
|
D | omap3-igep.dtsi | 107 gpmc,cs-wr-off-ns = <44>; 110 gpmc,adv-wr-off-ns = <44>; 115 gpmc,wr-cycle-ns = <82>; 116 gpmc,wr-access-ns = <40>; 117 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap2420-n8x0-common.dtsi | 60 gpmc,cs-wr-off-ns = <109>; 63 gpmc,adv-wr-off-ns = <18>; 69 gpmc,wr-cycle-ns = <136>;
|
D | omap3-lilly-dbb056.dts | 141 gpmc,cs-wr-off-ns = <65>; 144 gpmc,adv-wr-off-ns = <10>; 150 gpmc,wr-cycle-ns = <100>; 155 gpmc,wr-data-mux-bus-ns = <15>; 156 gpmc,wr-access-ns = <75>;
|
D | am335x-chilisom.dtsi | 219 gpmc,cs-wr-off-ns = <44>; 222 gpmc,adv-wr-off-ns = <44>; 229 gpmc,wr-cycle-ns = <82>; 236 gpmc,wr-access-ns = <40>; 237 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap3-evm-37xx.dts | 170 gpmc,cs-wr-off-ns = <44>; 173 gpmc,adv-wr-off-ns = <44>; 178 gpmc,wr-cycle-ns = <82>; 179 gpmc,wr-access-ns = <40>; 180 gpmc,wr-data-mux-bus-ns = <0>;
|
D | am335x-igep0033.dtsi | 139 gpmc,cs-wr-off-ns = <44>; 142 gpmc,adv-wr-off-ns = <44>; 149 gpmc,wr-cycle-ns = <82>; 156 gpmc,wr-access-ns = <40>; 157 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap3-overo-base.dtsi | 238 gpmc,cs-wr-off-ns = <44>; 241 gpmc,adv-wr-off-ns = <44>; 246 gpmc,wr-cycle-ns = <82>; 247 gpmc,wr-access-ns = <40>; 248 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap3-ldp.dts | 113 gpmc,cs-wr-off-ns = <44>; 116 gpmc,adv-wr-off-ns = <44>; 121 gpmc,wr-cycle-ns = <82>; 122 gpmc,wr-access-ns = <40>; 123 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap3-cm-t3x.dtsi | 274 gpmc,cs-wr-off-ns = <120>; 278 gpmc,adv-wr-off-ns = <120>; 291 gpmc,wr-cycle-ns = <120>; 292 gpmc,wr-access-ns = <186>; 293 gpmc,wr-data-mux-bus-ns = <90>;
|
D | am335x-phycore-som.dtsi | 176 gpmc,cs-wr-off-ns = <30>; 179 gpmc,adv-wr-off-ns = <30>; 186 gpmc,wr-cycle-ns = <30>; 194 gpmc,wr-access-ns = <30>; 195 gpmc,wr-data-mux-bus-ns = <0>;
|
D | orion5x-lacie-ethernet-disk-mini-v2.dts | 78 devbus,wr-high-ps = <90000>; 79 devbus,wr-low-ps = <90000>; 80 devbus,ale-wr-ps = <90000>;
|
D | am335x-nano.dts | 265 gpmc,cs-wr-off-ns = <160>; 268 gpmc,adv-wr-off-ns = <30>; 274 gpmc,wr-cycle-ns = <160>; 279 gpmc,wr-data-mux-bus-ns = <70>; 280 gpmc,wr-access-ns = <80>;
|
D | omap3-tao3530.dtsi | 288 gpmc,cs-wr-off-ns = <36>; 291 gpmc,adv-wr-off-ns = <36>; 297 gpmc,wr-cycle-ns = <72>; 299 gpmc,wr-access-ns = <30>;
|
D | omap3-gta04.dtsi | 502 gpmc,cs-wr-off-ns = <44>; 505 gpmc,adv-wr-off-ns = <44>; 510 gpmc,wr-cycle-ns = <82>; 511 gpmc,wr-access-ns = <40>; 512 gpmc,wr-data-mux-bus-ns = <0>;
|
D | omap3-n900.dts | 728 gpmc,cs-wr-off-ns = <87>; 731 gpmc,adv-wr-off-ns = <10>; 737 gpmc,wr-cycle-ns = <112>; 744 gpmc,wr-data-mux-bus-ns = <30>; 745 gpmc,wr-access-ns = <81>; 794 gpmc,cs-wr-off-ns = <24>; 797 gpmc,adv-wr-off-ns = <0>; 805 gpmc,wr-cycle-ns = <180>; 810 gpmc,wr-access-ns = <0>; 811 gpmc,wr-data-mux-bus-ns = <12>;
|
D | omap3-beagle.dts | 397 gpmc,cs-wr-off-ns = <36>; 400 gpmc,adv-wr-off-ns = <36>; 406 gpmc,wr-cycle-ns = <72>; 408 gpmc,wr-access-ns = <30>;
|
D | armada-xp-openblocks-ax3-4.dts | 88 devbus,wr-high-ps = <60000>; 89 devbus,wr-low-ps = <60000>; 90 devbus,ale-wr-ps = <60000>;
|
D | am335x-baltos-ir5221.dts | 253 gpmc,cs-wr-off-ns = <44>; 256 gpmc,adv-wr-off-ns = <44>; 263 gpmc,wr-cycle-ns = <82>; 270 gpmc,wr-access-ns = <40>; 271 gpmc,wr-data-mux-bus-ns = <0>;
|
D | armada-xp-db.dts | 98 devbus,wr-high-ps = <60000>; 99 devbus,wr-low-ps = <60000>; 100 devbus,ale-wr-ps = <60000>;
|
D | armada-xp-gp.dts | 117 devbus,wr-high-ps = <60000>; 118 devbus,wr-low-ps = <60000>; 119 devbus,ale-wr-ps = <60000>;
|
D | omap3-pandora-common.dtsi | 556 gpmc,cs-wr-off-ns = <44>; 559 gpmc,adv-wr-off-ns = <44>; 564 gpmc,wr-cycle-ns = <82>; 565 gpmc,wr-access-ns = <40>; 566 gpmc,wr-data-mux-bus-ns = <0>;
|
D | am335x-evm.dts | 530 gpmc,cs-wr-off-ns = <44>; 533 gpmc,adv-wr-off-ns = <44>; 540 gpmc,wr-cycle-ns = <82>; 547 gpmc,wr-access-ns = <40>; 548 gpmc,wr-data-mux-bus-ns = <0>;
|
D | am43x-epos-evm.dts | 574 gpmc,cs-wr-off-ns = <40>; 577 gpmc,adv-wr-off-ns = <25>; /* min( tALH + tALS + 1) */ 584 gpmc,wr-cycle-ns = <40>; 590 gpmc,wr-access-ns = <40>; 591 gpmc,wr-data-mux-bus-ns = <0>;
|
D | dra72-evm.dts | 505 gpmc,cs-wr-off-ns = <80>; 508 gpmc,adv-wr-off-ns = <60>; 514 gpmc,wr-access-ns = <80>; 516 gpmc,wr-cycle-ns = <80>; 521 gpmc,wr-data-mux-bus-ns = <0>;
|
D | dra7-evm.dts | 753 gpmc,cs-wr-off-ns = <80>; 756 gpmc,adv-wr-off-ns = <60>; 762 gpmc,wr-access-ns = <80>; 764 gpmc,wr-cycle-ns = <80>; 769 gpmc,wr-data-mux-bus-ns = <0>;
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 389 struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument 397 for (i = 0; i < wr->num_sge; i++) { in build_immd() 398 if ((plen + wr->sg_list[i].length) > max) in build_immd() 400 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd() 401 plen += wr->sg_list[i].length; in build_immd() 402 rem = wr->sg_list[i].length; in build_immd() 459 struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument 465 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send() 467 switch (wr->opcode) { in build_rdma_send() 469 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send() [all …]
|
D | mem.c | 84 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | in _c4iw_write_mem_dma_aligned() 86 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; in _c4iw_write_mem_dma_aligned() 87 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); in _c4iw_write_mem_dma_aligned() 91 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); in _c4iw_write_mem_dma_aligned() 145 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | in _c4iw_write_mem_inline() 147 req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait; in _c4iw_write_mem_inline() 149 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); in _c4iw_write_mem_inline() 150 req->wr.wr_mid = cpu_to_be32( in _c4iw_write_mem_inline() 156 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), in _c4iw_write_mem_inline()
|
/linux-4.4.14/drivers/scsi/csiostor/ |
D | csio_scsi.c | 205 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; in csio_scsi_init_cmd_wr() local 209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | in csio_scsi_init_cmd_wr() 211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr() 215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr() 216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr() 217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr() 218 wr->r3 = 0; in csio_scsi_init_cmd_wr() 219 memset(&wr->r5, 0, 8); in csio_scsi_init_cmd_wr() 225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_cmd_wr() 226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_cmd_wr() [all …]
|
D | csio_lnode.c | 1415 csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) in csio_ln_mgmt_wr_handler() argument 1422 wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; in csio_ln_mgmt_wr_handler() 1472 struct fw_wr_hdr *wr; in csio_fcoe_fwevt_handler() local 1508 wr = (struct fw_wr_hdr *) (cmd + 4); in csio_fcoe_fwevt_handler() 1509 if (FW_WR_OP_G(be32_to_cpu(wr->hi)) in csio_fcoe_fwevt_handler() 1577 FW_WR_OP_G(be32_to_cpu((wr->hi)))); in csio_fcoe_fwevt_handler() 1581 wr = (struct fw_wr_hdr *) (cmd); in csio_fcoe_fwevt_handler() 1582 if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { in csio_fcoe_fwevt_handler() 1583 csio_ln_mgmt_wr_handler(hw, wr, in csio_fcoe_fwevt_handler() 1587 FW_WR_OP_G(be32_to_cpu((wr->hi)))); in csio_fcoe_fwevt_handler() [all …]
|
D | csio_wr.c | 762 void *wr; in csio_wr_cleanup_iq_ftr() local 771 wr = (void *)((uintptr_t)q->vstart + in csio_wr_cleanup_iq_ftr() 774 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + in csio_wr_cleanup_iq_ftr() 1037 void *wr, uint32_t len_to_qid, in csio_wr_process_fl() argument 1087 iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer), in csio_wr_process_fl() 1130 void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz)); in csio_wr_process_iq() local 1139 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + in csio_wr_process_iq() 1148 CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <= in csio_wr_process_iq() 1156 iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv); in csio_wr_process_iq() 1159 csio_wr_process_fl(hw, q, wr, in csio_wr_process_iq() [all …]
|
D | csio_mb.c | 195 const u32 *params, u32 *val, bool wr, in csio_mb_params() argument 207 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) | in csio_mb_params() 213 if (wr) { in csio_mb_params() 301 bool wr, bool init, bool tgt, bool cofld, in csio_mb_caps_config() argument 307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); in csio_mb_caps_config() 311 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F)); in csio_mb_caps_config() 315 if (!wr) in csio_mb_caps_config() 347 uint8_t portid, bool wr, uint32_t fc, uint16_t caps, in csio_mb_port() argument 357 (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) | in csio_mb_port() 359 if (!wr) { in csio_mb_port()
|
D | csio_isr.c | 131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, in csio_fwevt_intx_handler() argument 146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, in csio_process_scsi_cmpl() argument 155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); in csio_process_scsi_cmpl() 285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, in csio_scsi_intx_handler() argument
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | svc_rdma_recvfrom.c | 183 read_wr.wr.wr_id = (unsigned long)ctxt; in rdma_read_chunk_lcl() 184 read_wr.wr.opcode = IB_WR_RDMA_READ; in rdma_read_chunk_lcl() 185 ctxt->wr_op = read_wr.wr.opcode; in rdma_read_chunk_lcl() 186 read_wr.wr.send_flags = IB_SEND_SIGNALED; in rdma_read_chunk_lcl() 189 read_wr.wr.sg_list = ctxt->sge; in rdma_read_chunk_lcl() 190 read_wr.wr.num_sge = pages_needed; in rdma_read_chunk_lcl() 192 ret = svc_rdma_send(xprt, &read_wr.wr); in rdma_read_chunk_lcl() 302 reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_read_chunk_frmr() 303 reg_wr.wr.wr_id = 0; in rdma_read_chunk_frmr() 304 reg_wr.wr.send_flags = IB_SEND_SIGNALED; in rdma_read_chunk_frmr() [all …]
|
D | frwr_ops.c | 384 reg_wr.wr.next = NULL; in frwr_op_map() 385 reg_wr.wr.opcode = IB_WR_REG_MR; in frwr_op_map() 386 reg_wr.wr.wr_id = (uintptr_t)mw; in frwr_op_map() 387 reg_wr.wr.num_sge = 0; in frwr_op_map() 388 reg_wr.wr.send_flags = 0; in frwr_op_map() 396 rc = ib_post_send(ia->ri_id->qp, ®_wr.wr, &bad_wr); in frwr_op_map()
|
D | svc_rdma_sendto.c | 285 write_wr.wr.wr_id = (unsigned long)ctxt; in send_write() 286 write_wr.wr.sg_list = &sge[0]; in send_write() 287 write_wr.wr.num_sge = sge_no; in send_write() 288 write_wr.wr.opcode = IB_WR_RDMA_WRITE; in send_write() 289 write_wr.wr.send_flags = IB_SEND_SIGNALED; in send_write() 295 if (svc_rdma_send(xprt, &write_wr.wr)) in send_write()
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | visasm.h | 22 297: wr %g0, FPRS_FEF, %fprs; \ 25 wr %g0, 0, %fprs; 42 297: wr %o5, FPRS_FEF, %fprs; 45 wr %o5, 0, %fprs;
|
D | backoff.h | 62 wr tmp, 0, %asr27; \
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | srq.c | 65 int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in hfi1_post_srq_receive() argument 73 for (; wr; wr = wr->next) { in hfi1_post_srq_receive() 78 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in hfi1_post_srq_receive() 79 *bad_wr = wr; in hfi1_post_srq_receive() 91 *bad_wr = wr; in hfi1_post_srq_receive() 97 wqe->wr_id = wr->wr_id; in hfi1_post_srq_receive() 98 wqe->num_sge = wr->num_sge; in hfi1_post_srq_receive() 99 for (i = 0; i < wr->num_sge; i++) in hfi1_post_srq_receive() 100 wqe->sg_list[i] = wr->sg_list[i]; in hfi1_post_srq_receive()
|
D | verbs.c | 361 static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) in post_one_send() argument 375 if (unlikely(wr->num_sge > qp->s_max_sge)) in post_one_send() 387 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) in post_one_send() 391 if (wr->opcode != IB_WR_SEND && in post_one_send() 392 wr->opcode != IB_WR_SEND_WITH_IMM) in post_one_send() 395 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) in post_one_send() 397 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) in post_one_send() 399 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && in post_one_send() 400 (wr->num_sge == 0 || in post_one_send() 401 wr->sg_list[0].length < sizeof(u64) || in post_one_send() [all …]
|
D | uc.c | 119 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_uc_req() 123 switch (wqe->wr.opcode) { in hfi1_make_uc_req() 131 if (wqe->wr.opcode == IB_WR_SEND) in hfi1_make_uc_req() 137 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_uc_req() 140 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_uc_req() 160 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) in hfi1_make_uc_req() 166 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in hfi1_make_uc_req() 168 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_uc_req() 191 if (wqe->wr.opcode == IB_WR_SEND) in hfi1_make_uc_req() 196 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_uc_req() [all …]
|
D | rc.c | 71 ss->num_sge = wqe->wr.num_sge; in restart_sge() 348 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 364 switch (wqe->wr.opcode) { in hfi1_make_rc_req() 380 if (wqe->wr.opcode == IB_WR_SEND) in hfi1_make_rc_req() 385 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req() 388 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req() 419 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) in hfi1_make_rc_req() 425 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req() 427 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req() 488 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in hfi1_make_rc_req() [all …]
|
D | ruc.c | 452 sqp->s_sge.num_sge = wqe->wr.num_sge; in ruc_loopback() 454 switch (wqe->wr.opcode) { in ruc_loopback() 457 wc.ex.imm_data = wqe->wr.ex.imm_data; in ruc_loopback() 471 wc.ex.imm_data = wqe->wr.ex.imm_data; in ruc_loopback() 506 qp->r_sge.num_sge = wqe->wr.num_sge; in ruc_loopback() 523 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? in ruc_loopback() 573 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) in ruc_loopback() 587 wqe->wr.send_flags & IB_SEND_SOLICITED); in ruc_loopback() 907 for (i = 0; i < wqe->wr.num_sge; i++) { in hfi1_send_complete() 919 (wqe->wr.send_flags & IB_SEND_SIGNALED) || in hfi1_send_complete() [all …]
|
D | ud.c | 155 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in ud_loopback() 157 wc.ex.imm_data = swqe->wr.ex.imm_data; in ud_loopback() 196 ssge.num_sge = swqe->wr.num_sge; in ud_loopback() 252 swqe->wr.send_flags & IB_SEND_SOLICITED); in ud_loopback() 352 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_ud_req() 371 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_ud_req() 373 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in hfi1_make_ud_req() 400 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_ud_req()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_srq.c | 48 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in qib_post_srq_receive() argument 56 for (; wr; wr = wr->next) { in qib_post_srq_receive() 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in qib_post_srq_receive() 62 *bad_wr = wr; in qib_post_srq_receive() 74 *bad_wr = wr; in qib_post_srq_receive() 80 wqe->wr_id = wr->wr_id; in qib_post_srq_receive() 81 wqe->num_sge = wr->num_sge; in qib_post_srq_receive() 82 for (i = 0; i < wr->num_sge; i++) in qib_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; in qib_post_srq_receive()
|
D | qib_uc.c | 101 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req() 105 switch (wqe->wr.opcode) { in qib_make_uc_req() 113 if (wqe->wr.opcode == IB_WR_SEND) in qib_make_uc_req() 119 ohdr->u.imm_data = wqe->wr.ex.imm_data; in qib_make_uc_req() 122 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_uc_req() 142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) in qib_make_uc_req() 148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in qib_make_uc_req() 150 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_uc_req() 172 if (wqe->wr.opcode == IB_WR_SEND) in qib_make_uc_req() 177 ohdr->u.imm_data = wqe->wr.ex.imm_data; in qib_make_uc_req() [all …]
|
D | qib_verbs.c | 337 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, in qib_post_one_send() argument 357 if (wr->num_sge > qp->s_max_sge) in qib_post_one_send() 365 if (wr->opcode == IB_WR_REG_MR) { in qib_post_one_send() 366 if (qib_reg_mr(qp, reg_wr(wr))) in qib_post_one_send() 369 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) in qib_post_one_send() 373 if (wr->opcode != IB_WR_SEND && in qib_post_one_send() 374 wr->opcode != IB_WR_SEND_WITH_IMM) in qib_post_one_send() 377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) in qib_post_one_send() 379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) in qib_post_one_send() 381 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && in qib_post_one_send() [all …]
|
D | qib_rc.c | 51 ss->num_sge = wqe->wr.num_sge; in restart_sge() 318 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req() 334 switch (wqe->wr.opcode) { in qib_make_rc_req() 350 if (wqe->wr.opcode == IB_WR_SEND) in qib_make_rc_req() 355 ohdr->u.imm_data = wqe->wr.ex.imm_data; in qib_make_rc_req() 358 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_rc_req() 390 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE) in qib_make_rc_req() 396 wqe->rdma_wr.wr.ex.imm_data; in qib_make_rc_req() 398 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED) in qib_make_rc_req() 460 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in qib_make_rc_req() [all …]
|
D | qib_ruc.c | 430 sqp->s_sge.num_sge = wqe->wr.num_sge; in qib_ruc_loopback() 432 switch (wqe->wr.opcode) { in qib_ruc_loopback() 435 wc.ex.imm_data = wqe->wr.ex.imm_data; in qib_ruc_loopback() 449 wc.ex.imm_data = wqe->wr.ex.imm_data; in qib_ruc_loopback() 484 qp->r_sge.num_sge = wqe->wr.num_sge; in qib_ruc_loopback() 501 (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? in qib_ruc_loopback() 551 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) in qib_ruc_loopback() 565 wqe->wr.send_flags & IB_SEND_SOLICITED); in qib_ruc_loopback() 780 for (i = 0; i < wqe->wr.num_sge; i++) { in qib_send_complete() 792 (wqe->wr.send_flags & IB_SEND_SIGNALED) || in qib_send_complete() [all …]
|
D | qib_ud.c | 133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in qib_ud_loopback() 135 wc.ex.imm_data = swqe->wr.ex.imm_data; in qib_ud_loopback() 174 ssge.num_sge = swqe->wr.num_sge; in qib_ud_loopback() 220 swqe->wr.send_flags & IB_SEND_SOLICITED); in qib_ud_loopback() 322 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_ud_req() 341 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in qib_make_ud_req() 343 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in qib_make_ud_req() 361 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_ud_req()
|
D | qib_keys.c | 341 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr) in qib_reg_mr() argument 345 struct qib_mr *mr = to_imr(wr->mr); in qib_reg_mr() 347 u32 key = wr->key; in qib_reg_mr() 375 mrg->access_flags = wr->access; in qib_reg_mr()
|
D | qib_verbs.h | 344 struct ib_send_wr wr; /* don't use wr.sg_list */ member 984 int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); 999 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1053 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_srq.c | 48 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in ipath_post_srq_receive() argument 56 for (; wr; wr = wr->next) { in ipath_post_srq_receive() 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive() 62 *bad_wr = wr; in ipath_post_srq_receive() 74 *bad_wr = wr; in ipath_post_srq_receive() 80 wqe->wr_id = wr->wr_id; in ipath_post_srq_receive() 81 wqe->num_sge = wr->num_sge; in ipath_post_srq_receive() 82 for (i = 0; i < wr->num_sge; i++) in ipath_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; in ipath_post_srq_receive()
|
D | ipath_uc.c | 100 qp->s_sge.num_sge = wqe->wr.num_sge; in ipath_make_uc_req() 102 switch (wqe->wr.opcode) { in ipath_make_uc_req() 110 if (wqe->wr.opcode == IB_WR_SEND) in ipath_make_uc_req() 116 ohdr->u.imm_data = wqe->wr.ex.imm_data; in ipath_make_uc_req() 119 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in ipath_make_uc_req() 139 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) in ipath_make_uc_req() 145 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in ipath_make_uc_req() 147 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in ipath_make_uc_req() 169 if (wqe->wr.opcode == IB_WR_SEND) in ipath_make_uc_req() 174 ohdr->u.imm_data = wqe->wr.ex.imm_data; in ipath_make_uc_req() [all …]
|
D | ipath_rc.c | 50 ss->num_sge = wqe->wr.num_sge; in restart_sge() 294 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in ipath_make_rc_req() 310 switch (wqe->wr.opcode) { in ipath_make_rc_req() 326 if (wqe->wr.opcode == IB_WR_SEND) in ipath_make_rc_req() 331 ohdr->u.imm_data = wqe->wr.ex.imm_data; in ipath_make_rc_req() 334 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in ipath_make_rc_req() 365 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) in ipath_make_rc_req() 371 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in ipath_make_rc_req() 373 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in ipath_make_rc_req() 433 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in ipath_make_rc_req() [all …]
|
D | ipath_verbs.c | 336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) in ipath_post_one_send() argument 360 if (wr->num_sge > qp->s_max_sge) in ipath_post_one_send() 369 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) in ipath_post_one_send() 373 if (wr->opcode != IB_WR_SEND && in ipath_post_one_send() 374 wr->opcode != IB_WR_SEND_WITH_IMM) in ipath_post_one_send() 377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) in ipath_post_one_send() 379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) in ipath_post_one_send() 381 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && in ipath_post_one_send() 382 (wr->num_sge == 0 || in ipath_post_one_send() 383 wr->sg_list[0].length < sizeof(u64) || in ipath_post_one_send() [all …]
|
D | ipath_ruc.c | 329 sqp->s_sge.num_sge = wqe->wr.num_sge; in ipath_ruc_loopback() 331 switch (wqe->wr.opcode) { in ipath_ruc_loopback() 334 wc.ex.imm_data = wqe->wr.ex.imm_data; in ipath_ruc_loopback() 345 wc.ex.imm_data = wqe->wr.ex.imm_data; in ipath_ruc_loopback() 371 qp->r_sge.num_sge = wqe->wr.num_sge; in ipath_ruc_loopback() 387 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? in ipath_ruc_loopback() 431 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) in ipath_ruc_loopback() 445 wqe->wr.send_flags & IB_SEND_SOLICITED); in ipath_ruc_loopback() 708 (wqe->wr.send_flags & IB_SEND_SIGNALED) || in ipath_send_complete() 713 wc.wr_id = wqe->wr.wr_id; in ipath_send_complete() [all …]
|
D | ipath_ud.c | 95 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in ipath_ud_loopback() 97 wc.ex.imm_data = swqe->wr.ex.imm_data; in ipath_ud_loopback() 197 if (--swqe->wr.num_sge) in ipath_ud_loopback() 227 swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED); in ipath_ud_loopback() 324 qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge; in ipath_make_ud_req() 342 if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) { in ipath_make_ud_req() 344 ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data; in ipath_make_ud_req() 362 if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED) in ipath_make_ud_req()
|
D | ipath_verbs.h | 281 struct ib_send_wr wr; /* don't use wr.sg_list */ member 783 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); 799 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1997 struct ib_send_wr *wr) in ocrdma_build_ud_hdr() argument 2001 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); in ocrdma_build_ud_hdr() 2003 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; in ocrdma_build_ud_hdr() 2007 ud_hdr->qkey = ud_wr(wr)->remote_qkey; in ocrdma_build_ud_hdr() 2043 struct ib_send_wr *wr, u32 wqe_size) in ocrdma_build_inline_sges() argument 2048 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { in ocrdma_build_inline_sges() 2049 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); in ocrdma_build_inline_sges() 2057 for (i = 0; i < wr->num_sge; i++) { in ocrdma_build_inline_sges() 2059 (void *)(unsigned long)wr->sg_list[i].addr, in ocrdma_build_inline_sges() 2060 wr->sg_list[i].length); in ocrdma_build_inline_sges() [all …]
|
/linux-4.4.14/net/rds/ |
D | ib_send.c | 780 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; in rds_ib_xmit_atomic() 786 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; in rds_ib_xmit_atomic() 793 send->s_atomic_wr.wr.num_sge = 1; in rds_ib_xmit_atomic() 794 send->s_atomic_wr.wr.next = NULL; in rds_ib_xmit_atomic() 821 failed_wr = &send->s_atomic_wr.wr; in rds_ib_xmit_atomic() 822 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); in rds_ib_xmit_atomic() 825 BUG_ON(failed_wr != &send->s_atomic_wr.wr); in rds_ib_xmit_atomic() 834 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { in rds_ib_xmit_atomic() 836 BUG_ON(failed_wr != &send->s_atomic_wr.wr); in rds_ib_xmit_atomic() 911 send->s_rdma_wr.wr.num_sge = max_sge; in rds_ib_xmit_rdma() [all …]
|
D | iw_send.c | 774 send->s_reg_wr.wr.opcode = IB_WR_REG_MR; in rds_iw_build_send_reg() 775 send->s_reg_wr.wr.wr_id = 0; in rds_iw_build_send_reg() 776 send->s_reg_wr.wr.num_sge = 0; in rds_iw_build_send_reg() 861 send->s_rdma_wr.wr.send_flags = 0; in rds_iw_xmit_rdma() 870 send->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED; in rds_iw_xmit_rdma() 878 send->s_rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; in rds_iw_xmit_rdma() 880 send->s_rdma_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; in rds_iw_xmit_rdma() 887 send->s_rdma_wr.wr.num_sge = rds_iwdev->max_sge; in rds_iw_xmit_rdma() 890 send->s_rdma_wr.wr.num_sge = num_sge; in rds_iw_xmit_rdma() 892 send->s_rdma_wr.wr.next = NULL; in rds_iw_xmit_rdma() [all …]
|
D | iw_rdma.c | 673 reg_wr.wr.next = NULL; 674 reg_wr.wr.opcode = IB_WR_REG_MR; 675 reg_wr.wr.wr_id = RDS_IW_REG_WR_ID; 676 reg_wr.wr.num_sge = 0; 692 failed_wr = ®_wr.wr; 693 ret = ib_post_send(ibmr->cm_id->qp, ®_wr.wr, &failed_wr); 694 BUG_ON(failed_wr != ®_wr.wr);
|
D | iw_recv.c | 349 struct ib_send_wr *wr = &ic->i_ack_wr; in rds_iw_recv_init_ack() local 356 wr->sg_list = sge; in rds_iw_recv_init_ack() 357 wr->num_sge = 1; in rds_iw_recv_init_ack() 358 wr->opcode = IB_WR_SEND; in rds_iw_recv_init_ack() 359 wr->wr_id = RDS_IW_ACK_WR_ID; in rds_iw_recv_init_ack() 360 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_iw_recv_init_ack()
|
D | ib_recv.c | 562 struct ib_send_wr *wr = &ic->i_ack_wr; in rds_ib_recv_init_ack() local 569 wr->sg_list = sge; in rds_ib_recv_init_ack() 570 wr->num_sge = 1; in rds_ib_recv_init_ack() 571 wr->opcode = IB_WR_SEND; in rds_ib_recv_init_ack() 572 wr->wr_id = RDS_IB_ACK_WR_ID; in rds_ib_recv_init_ack() 573 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_ib_recv_init_ack()
|
/linux-4.4.14/arch/mips/kernel/ |
D | r4k_fpu.S | 243 .macro save_msa_upper wr, off, base 247 copy_s_d \wr, 1 250 copy_s_w \wr, 2 252 copy_s_w \wr, 3 255 copy_s_w \wr, 2 257 copy_s_w \wr, 3 300 .macro restore_msa_upper wr, off, base 305 insert_d \wr, 1 308 insert_w \wr, 2 310 insert_w \wr, 3 [all …]
|
D | signal.c | 171 err |= _save_msa_all_upper(&msa->wr); in save_msa_extcontext() 181 err |= __put_user(val, &msa->wr[i]); in save_msa_extcontext() 221 err |= _restore_msa_all_upper(&msa->wr); in restore_msa_extcontext() 229 err |= __get_user(val, &msa->wr[i]); in restore_msa_extcontext()
|
/linux-4.4.14/Documentation/devicetree/bindings/mtd/ |
D | gpmc-nor.txt | 16 - gpmc,cs-wr-off-ns: Chip-select de-assertion time for writes 23 - gpmc,wr-cycle-ns: Total write cycle time 62 gpmc,cs-wr-off-ns = <186>; 65 gpmc,adv-wr-off-ns = <48>; 71 gpmc,wr-cycle-ns = <186>; 76 gpmc,wr-data-mux-bus-ns = <90>; 77 gpmc,wr-access-ns = <186>;
|
D | lpc32xx-mlc.txt | 38 nxp,wr-high = <40000000>; 39 nxp,wr-low = <83333333>;
|
D | gpmc-nand.txt | 76 gpmc,cs-wr-off-ns = <44>; 79 gpmc,adv-wr-off-ns = <44>; 84 gpmc,wr-cycle-ns = <82>; 85 gpmc,wr-access-ns = <40>; 86 gpmc,wr-data-mux-bus-ns = <0>;
|
/linux-4.4.14/net/9p/ |
D | trans_rdma.c | 402 struct ib_recv_wr wr, *bad_wr; in post_recv() local 415 wr.next = NULL; in post_recv() 417 wr.wr_id = (unsigned long) c; in post_recv() 418 wr.sg_list = &sge; in post_recv() 419 wr.num_sge = 1; in post_recv() 420 return ib_post_recv(rdma->qp, &wr, &bad_wr); in post_recv() 430 struct ib_send_wr wr, *bad_wr; in rdma_request() local 506 wr.next = NULL; in rdma_request() 508 wr.wr_id = (unsigned long) c; in rdma_request() 509 wr.opcode = IB_WR_SEND; in rdma_request() [all …]
|
D | trans_fd.c | 158 struct file *wr; member 246 if (!ts->wr->f_op->poll) in p9_fd_poll() 253 if (ts->rd != ts->wr) { in p9_fd_poll() 254 n = ts->wr->f_op->poll(ts->wr, pt); in p9_fd_poll() 425 if (!(ts->wr->f_flags & O_NONBLOCK)) in p9_fd_write() 431 ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); in p9_fd_write() 794 ts->wr = fget(wfd); in p9_fd_open() 795 if (!ts->rd || !ts->wr) { in p9_fd_open() 798 if (ts->wr) in p9_fd_open() 799 fput(ts->wr); in p9_fd_open() [all …]
|
/linux-4.4.14/arch/mips/kvm/ |
D | msa.S | 93 .macro kvm_restore_msa_upper wr, off, base 98 insert_d \wr, 1 101 insert_w \wr, 2 103 insert_w \wr, 3 106 insert_w \wr, 2 108 insert_w \wr, 3
|
/linux-4.4.14/drivers/media/pci/solo6x10/ |
D | solo6x10-p2m.c | 37 int solo_p2m_dma(struct solo_dev *solo_dev, int wr, in solo_p2m_dma() argument 50 wr ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); in solo_p2m_dma() 54 ret = solo_p2m_dma_t(solo_dev, wr, dma_addr, ext_addr, size, in solo_p2m_dma() 58 wr ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); in solo_p2m_dma() 136 void solo_p2m_fill_desc(struct solo_p2m_desc *desc, int wr, in solo_p2m_fill_desc() argument 145 (wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON; in solo_p2m_fill_desc() 157 int solo_p2m_dma_t(struct solo_dev *solo_dev, int wr, in solo_p2m_dma_t() argument 163 solo_p2m_fill_desc(&desc[1], wr, dma_addr, ext_addr, size, repeat, in solo_p2m_dma_t()
|
D | solo6x10.h | 344 int solo_p2m_dma_t(struct solo_dev *solo_dev, int wr, 347 int solo_p2m_dma(struct solo_dev *solo_dev, int wr, 350 void solo_p2m_fill_desc(struct solo_p2m_desc *desc, int wr,
|
D | solo6x10-i2c.c | 69 static void solo_i2c_flush(struct solo_dev *solo_dev, int wr) in solo_i2c_flush() argument 78 if (wr) { in solo_i2c_flush()
|
/linux-4.4.14/arch/sparc/lib/ |
D | xor.S | 32 0: wr %g0, FPRS_FEF, %fprs 34 wr %g0, ASI_BLK_P, %asi 89 wr %g1, %g0, %asi 91 wr %g0, 0, %fprs 101 0: wr %g0, FPRS_FEF, %fprs 103 wr %g0, ASI_BLK_P, %asi 155 wr %g1, %g0, %asi 157 wr %g0, 0, %fprs 167 0: wr %g0, FPRS_FEF, %fprs 169 wr %g0, ASI_BLK_P, %asi [all …]
|
D | NGpage.S | 21 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi 56 wr %g3, 0x0, %asi 67 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi 105 wr %g3, 0x0, %asi
|
D | NGbzero.S | 44 wr %o4, 0x0, %asi 71 wr %g7, 0x0, %asi 88 wr %o4, 0x0, %asi 110 wr %o5, 0x0, %asi
|
D | GENbzero.S | 43 wr %o4, 0x0, %asi 86 wr %o4, 0x0, %asi 108 wr %o5, 0x0, %asi
|
D | copy_page.S | 174 wr %g0, ASI_BLK_P, %asi 175 wr %g0, ASI_BLK_COMMIT_P, %asi 225 wr %g3, 0x0, %asi
|
D | NG4memcpy.S | 21 wr %g0, FPRS_FEF, %fprs; \ 27 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs 30 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs 103 wr %g0, 0x80, %asi
|
D | U3memcpy.S | 14 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ 16 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs 18 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs 19 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
|
D | udivdi3.S | 64 wr %g0, 0, %y 202 wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
|
D | divdi3.S | 86 wr %g0, 0, %y 214 wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
|
D | muldi3.S | 25 wr %g0, %i1, %y
|
D | NG2memcpy.S | 16 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ 18 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs 20 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs 21 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
|
D | NGmemcpy.S | 12 wr TMP, 0x0, %asi; 16 wr %g0, ASI_PNF, %asi 120 wr %g0, STORE_ASI, %asi
|
/linux-4.4.14/drivers/staging/fbtft/ |
D | fbtft-io.c | 147 gpio_set_value(par->gpio.wr, 0); in fbtft_write_gpio8_wr() 152 gpio_set_value(par->gpio.wr, 0); /* used as delay */ in fbtft_write_gpio8_wr() 170 gpio_set_value(par->gpio.wr, 1); in fbtft_write_gpio8_wr() 197 gpio_set_value(par->gpio.wr, 0); in fbtft_write_gpio16_wr() 202 gpio_set_value(par->gpio.wr, 0); /* used as delay */ in fbtft_write_gpio16_wr() 220 gpio_set_value(par->gpio.wr, 1); in fbtft_write_gpio16_wr()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.h | 56 (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \ 57 FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \ 58 (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \ 60 (w)->wr.wr_lo = cpu_to_be64(0); \ 69 (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \ 71 (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \ 73 (w)->wr.wr_lo = cpu_to_be64(0); \
|
D | sge.c | 973 u64 *wr = (u64 *)&q->desc[index]; in ring_tx_db() local 977 wr); in ring_tx_db() 1147 struct fw_eth_tx_pkt_wr *wr; in t4_eth_xmit() local 1214 wr = (void *)&q->q.desc[q->q.pidx]; in t4_eth_xmit() 1215 wr->equiq_to_len16 = htonl(wr_mid); in t4_eth_xmit() 1216 wr->r3 = cpu_to_be64(0); in t4_eth_xmit() 1217 end = (u64 *)wr + flits; in t4_eth_xmit() 1222 struct cpl_tx_pkt_lso *lso = (void *)wr; in t4_eth_xmit() 1228 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in t4_eth_xmit() 1257 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in t4_eth_xmit() [all …]
|
/linux-4.4.14/Documentation/devicetree/bindings/net/ |
D | gpmc-eth.txt | 31 - gpmc,cs-wr-off-ns: Chip-select de-assertion time for writes 38 - gpmc,wr-cycle-ns: Total write cycle time 70 gpmc,cs-wr-off-ns = <186>; 73 gpmc,adv-wr-off-ns = <48>; 79 gpmc,wr-cycle-ns = <186>; 84 gpmc,wr-data-mux-bus-ns = <90>; 85 gpmc,wr-access-ns = <186>;
|
/linux-4.4.14/drivers/media/radio/ |
D | radio-tea5764.c | 175 struct tea5764_write_regs wr; in tea5764_i2c_write() local 180 .len = sizeof(wr), in tea5764_i2c_write() 181 .buf = (void *)&wr in tea5764_i2c_write() 184 wr.intreg = r->intreg & 0xff; in tea5764_i2c_write() 185 wr.frqset = __cpu_to_be16(r->frqset); in tea5764_i2c_write() 186 wr.tnctrl = __cpu_to_be16(r->tnctrl); in tea5764_i2c_write() 187 wr.testreg = __cpu_to_be16(r->testreg); in tea5764_i2c_write() 188 wr.rdsctrl = __cpu_to_be16(r->rdsctrl); in tea5764_i2c_write() 189 wr.rdsbbl = __cpu_to_be16(r->rdsbbl); in tea5764_i2c_write()
|
/linux-4.4.14/drivers/net/hamradio/ |
D | scc.c | 275 static inline void wr(struct scc_channel *scc, unsigned char reg, in wr() function 721 wr(scc,R12,tc & 255); /* brg rate LOW */ in set_brg() 722 wr(scc,R13,tc >> 8); /* brg rate HIGH */ in set_brg() 742 wr(scc, R14, BRSRC); /* BRG source = PCLK */ in init_brg() 799 wr(scc,R4,X1CLK|SDLC); /* *1 clock, SDLC mode */ in init_channel() 800 wr(scc,R1,0); /* no W/REQ operation */ in init_channel() 801 wr(scc,R3,Rx8|RxCRC_ENAB); /* RX 8 bits/char, CRC, disabled */ in init_channel() 802 wr(scc,R5,Tx8|DTR|TxCRC_ENAB); /* TX 8 bits/char, disabled, DTR */ in init_channel() 803 wr(scc,R6,0); /* SDLC address zero (not used) */ in init_channel() 804 wr(scc,R7,FLAG); /* SDLC flag value */ in init_channel() [all …]
|
D | hdlcdrv.c | 455 s->hdlcrx.hbuf.rd = s->hdlcrx.hbuf.wr = 0; in hdlcdrv_open() 459 s->hdlctx.hbuf.rd = s->hdlctx.hbuf.wr = 0; in hdlcdrv_open() 586 if (s->bitbuf_channel.rd == s->bitbuf_channel.wr) in hdlcdrv_ioctl() 599 if (s->bitbuf_hdlc.rd == s->bitbuf_hdlc.wr) in hdlcdrv_ioctl() 651 s->hdlcrx.hbuf.rd = s->hdlcrx.hbuf.wr = 0; in hdlcdrv_setup() 656 s->hdlctx.hbuf.rd = s->hdlctx.hbuf.wr = 0; in hdlcdrv_setup() 666 s->bitbuf_channel.rd = s->bitbuf_channel.wr = 0; in hdlcdrv_setup() 669 s->bitbuf_hdlc.rd = s->bitbuf_hdlc.wr = 0; in hdlcdrv_setup()
|
/linux-4.4.14/drivers/gpu/drm/udl/ |
D | udl_dmabuf.c | 79 struct scatterlist *rd, *wr; in udl_map_dma_buf() local 118 wr = sgt->sgl; in udl_map_dma_buf() 120 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); in udl_map_dma_buf() 122 wr = sg_next(wr); in udl_map_dma_buf()
|
/linux-4.4.14/Documentation/devicetree/bindings/display/exynos/ |
D | samsung-fimd.txt | 52 - wr-setup: clock cycles for the active period of CS signal is enabled until 55 - wr-active: clock cycles for the active period of CS is enabled. 57 - wr-hold: clock cycles for the active period of CS is disabled until write 69 | wr-setup+1 | | wr-hold+1 | 72 | wr-active+1|
|
/linux-4.4.14/fs/ |
D | mpage.c | 488 int wr = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); in __mpage_writepage() local 597 bio = mpage_bio_submit(wr, bio); in __mpage_writepage() 624 bio = mpage_bio_submit(wr, bio); in __mpage_writepage() 634 bio = mpage_bio_submit(wr, bio); in __mpage_writepage() 646 bio = mpage_bio_submit(wr, bio); in __mpage_writepage() 703 int wr = (wbc->sync_mode == WB_SYNC_ALL ? in mpage_writepages() local 705 mpage_bio_submit(wr, mpd.bio); in mpage_writepages() 724 int wr = (wbc->sync_mode == WB_SYNC_ALL ? in mpage_writepage() local 726 mpage_bio_submit(wr, mpd.bio); in mpage_writepage()
|
/linux-4.4.14/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 778 struct ib_recv_wr wr, *bad_wr; in srpt_post_recv() local 781 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index); in srpt_post_recv() 787 wr.next = NULL; in srpt_post_recv() 788 wr.sg_list = &list; in srpt_post_recv() 789 wr.num_sge = 1; in srpt_post_recv() 791 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr); in srpt_post_recv() 803 struct ib_send_wr wr, *bad_wr; in srpt_post_send() local 822 wr.next = NULL; in srpt_post_send() 823 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index); in srpt_post_send() 824 wr.sg_list = &list; in srpt_post_send() [all …]
|
/linux-4.4.14/include/rdma/ |
D | ib_verbs.h | 1096 struct ib_send_wr wr; member 1101 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) in rdma_wr() argument 1103 return container_of(wr, struct ib_rdma_wr, wr); in rdma_wr() 1107 struct ib_send_wr wr; member 1116 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) in atomic_wr() argument 1118 return container_of(wr, struct ib_atomic_wr, wr); in atomic_wr() 1122 struct ib_send_wr wr; member 1133 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) in ud_wr() argument 1135 return container_of(wr, struct ib_ud_wr, wr); in ud_wr() 1139 struct ib_send_wr wr; member [all …]
|
/linux-4.4.14/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_xgmac.c | 49 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, in xgene_enet_wr_indirect() argument 57 iowrite32(wr_data, wr); in xgene_enet_wr_indirect() 75 void __iomem *addr, *wr, *cmd, *cmd_done; in xgene_enet_wr_mac() local 78 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; in xgene_enet_wr_mac() 82 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) in xgene_enet_wr_mac()
|
D | xgene_enet_hw.c | 270 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, in xgene_enet_wr_indirect() argument 278 iowrite32(wr_data, wr); in xgene_enet_wr_indirect() 296 void __iomem *addr, *wr, *cmd, *cmd_done; in xgene_enet_wr_mcx_mac() local 299 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; in xgene_enet_wr_mcx_mac() 303 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) in xgene_enet_wr_mcx_mac()
|
/linux-4.4.14/Documentation/devicetree/bindings/memory-controllers/ |
D | mvebu-devbus.txt | 85 - devbus,ale-wr-ps: Defines the time delay from the ALE[0] negation cycle 89 - devbus,wr-low-ps: Defines the time during which DEV_WEn is active. 95 - devbus,wr-high-ps: Defines the time during which DEV_WEn is kept 98 <wr-high-ps> - <tick> ps. 151 devbus,wr-high-ps = <60000>; 152 devbus,wr-low-ps = <60000>; 153 devbus,ale-wr-ps = <60000>;
|
/linux-4.4.14/drivers/infiniband/core/ |
D | uverbs_cmd.c | 2446 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; in ib_uverbs_post_send() local 2503 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); in ib_uverbs_post_send() 2509 ud->remote_qpn = user_wr->wr.ud.remote_qpn; in ib_uverbs_post_send() 2510 ud->remote_qkey = user_wr->wr.ud.remote_qkey; in ib_uverbs_post_send() 2512 next = &ud->wr; in ib_uverbs_post_send() 2525 rdma->remote_addr = user_wr->wr.rdma.remote_addr; in ib_uverbs_post_send() 2526 rdma->rkey = user_wr->wr.rdma.rkey; in ib_uverbs_post_send() 2528 next = &rdma->wr; in ib_uverbs_post_send() 2540 atomic->remote_addr = user_wr->wr.atomic.remote_addr; in ib_uverbs_post_send() 2541 atomic->compare_add = user_wr->wr.atomic.compare_add; in ib_uverbs_post_send() [all …]
|
D | mad.c | 835 send_wr->wr.wr_id, drslid, in handle_outgoing_dr_smp() 1042 mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr; in ib_create_send_mad() 1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; in ib_create_send_mad() 1044 mad_send_wr->send_wr.wr.num_sge = 2; in ib_create_send_mad() 1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; in ib_create_send_mad() 1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; in ib_create_send_mad() 1154 mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list; in ib_send_mad() 1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, in ib_send_mad() 1841 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, in rcv_has_same_class() argument 1844 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == in rcv_has_same_class() [all …]
|
D | mad_rmpp.c | 634 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, in adjust_last_ack() argument 639 wr->last_ack = seg_num; in adjust_last_ack() 640 list = &wr->last_ack_seg->list; in adjust_last_ack() 641 list_for_each_entry(wr->last_ack_seg, list, list) in adjust_last_ack() 642 if (wr->last_ack_seg->num == seg_num) in adjust_last_ack()
|
/linux-4.4.14/arch/ia64/hp/sim/ |
D | simscsi.c | 71 static int rd, wr; variable 322 queue[wr].sc = sc; in simscsi_queuecommand_lck() 323 wr = (wr + 1) % SIMSCSI_REQ_QUEUE_LEN; in simscsi_queuecommand_lck()
|
/linux-4.4.14/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.h | 82 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 84 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvif/ |
D | object.c | 126 struct nvif_ioctl_wr_v0 wr; in nvif_object_wr() member 129 .wr.size = size, in nvif_object_wr() 130 .wr.addr = addr, in nvif_object_wr() 131 .wr.data = data, in nvif_object_wr()
|
/linux-4.4.14/arch/sparc/mm/ |
D | swift.S | 61 wr %g3, 0x0, %psr 99 wr %g1, 0x0, %psr 127 wr %g3, 0x0, %psr 165 wr %g1, 0x0, %psr
|
D | ultra.S | 780 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint 785 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint 790 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint 795 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 800 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint 806 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
|
/linux-4.4.14/Documentation/devicetree/bindings/bus/ |
D | ti-gpmc.txt | 43 - gpmc,cs-wr-off-ns: Write deassertion time 48 - gpmc,adv-wr-off-ns: Write deassertion time 63 - gpmc,wr-cycle-ns: Total write cycle time 83 - gpmc,wr-access-ns: In synchronous write mode, for single or 88 - gpmc,wr-data-mux-bus-ns: In address-data multiplex mode, specifies
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; in srp_destroy_qp() local 473 ret = ib_post_recv(ch->qp, &wr, &bad_wr); in srp_destroy_qp() 1047 struct ib_send_wr wr = { in srp_inv_rkey() local 1056 return ib_post_send(ch->qp, &wr, &bad_wr); in srp_inv_rkey() 1321 struct ib_reg_wr wr; in srp_map_finish_fr() local 1352 wr.wr.next = NULL; in srp_map_finish_fr() 1353 wr.wr.opcode = IB_WR_REG_MR; in srp_map_finish_fr() 1354 wr.wr.wr_id = FAST_REG_WR_ID_MASK; in srp_map_finish_fr() 1355 wr.wr.num_sge = 0; in srp_map_finish_fr() 1356 wr.wr.send_flags = 0; in srp_map_finish_fr() [all …]
|
/linux-4.4.14/drivers/mtd/nand/ |
D | mpc5121_nfc.c | 407 u8 *buffer, uint size, int wr) in mpc5121_nfc_copy_spare() argument 444 if (wr) in mpc5121_nfc_copy_spare() 459 int wr) in mpc5121_nfc_buf_copy() argument 473 mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); in mpc5121_nfc_buf_copy() 484 if (wr) in mpc5121_nfc_buf_copy() 493 mpc5121_nfc_buf_copy(mtd, buf, len, wr); in mpc5121_nfc_buf_copy()
|
/linux-4.4.14/fs/autofs4/ |
D | waitq.c | 63 ssize_t wr = 0; in autofs4_write() local 73 (wr = __vfs_write(file,data,bytes,&file->f_pos)) > 0) { in autofs4_write() 74 data += wr; in autofs4_write() 75 bytes -= wr; in autofs4_write() 83 if (wr == -EPIPE && !sigpipe) { in autofs4_write()
|
/linux-4.4.14/arch/sparc/power/ |
D | hibernate_asm.S | 64 wr %g0, ASI_PHYS_USE_EC, %asi 119 wr %g1, %g0, %asi
|
/linux-4.4.14/arch/mips/include/uapi/asm/ |
D | ucontext.h | 38 unsigned long long wr[32]; member
|
/linux-4.4.14/Documentation/devicetree/bindings/mips/cavium/ |
D | bootbus.txt | 42 - cavium,t-wr-hld: A cell specifying the WR_HLD timing (in nS). 93 cavium,t-wr-hld = <45>; 113 cavium,t-wr-hld = <70>;
|
/linux-4.4.14/drivers/i2c/busses/ |
D | i2c-ocores.c | 321 u32 curr, wr; in oc_setreg_grlib() local 328 wr = (curr & 0xff00) | value; in oc_setreg_grlib() 330 wr = (((u32)value) << 8) | (curr & 0xff); in oc_setreg_grlib() 332 wr = value; in oc_setreg_grlib() 334 iowrite32be(wr, i2c->base + (rreg << i2c->reg_shift)); in oc_setreg_grlib()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_reqs.c | 119 struct ib_sge *sge = ud_wr->wr.sg_list; in trace_ud_wr() 121 "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id, in trace_ud_wr() 122 ud_wr->wr.num_sge, ud_wr->wr.send_flags, in trace_ud_wr() 123 ud_wr->.wr.opcode); in trace_ud_wr() 137 for (j = 0; j < ud_wr->wr.num_sge; j++) { in trace_ud_wr() 148 ud_wr = ud_wr(ud_wr->wr.next); in trace_ud_wr()
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 224 priv->tx_wr.wr.opcode = IB_WR_SEND; in ipoib_transport_dev_init() 225 priv->tx_wr.wr.sg_list = priv->tx_sge; in ipoib_transport_dev_init() 226 priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED; in ipoib_transport_dev_init()
|
D | ipoib_cm.c | 116 struct ib_recv_wr *wr, in ipoib_cm_post_receive_nonsrq() argument 123 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_nonsrq() 128 ret = ib_post_recv(rx->qp, wr, &bad_wr); in ipoib_cm_post_receive_nonsrq() 328 struct ib_recv_wr *wr, in ipoib_cm_init_rx_wr() argument 341 wr->next = NULL; in ipoib_cm_init_rx_wr() 342 wr->sg_list = sge; in ipoib_cm_init_rx_wr() 343 wr->num_sge = priv->cm.num_frags; in ipoib_cm_init_rx_wr() 351 struct ib_recv_wr wr; in ipoib_cm_nonsrq_init_rx() member 370 ipoib_cm_init_rx_wr(dev, &t->wr, t->sge); in ipoib_cm_nonsrq_init_rx() 392 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); in ipoib_cm_nonsrq_init_rx() [all …]
|
D | ipoib_ib.c | 521 priv->tx_wr.wr.wr_id = wr_id; in post_send() 529 priv->tx_wr.wr.opcode = IB_WR_LSO; in post_send() 531 priv->tx_wr.wr.opcode = IB_WR_SEND; in post_send() 533 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); in post_send() 586 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; in ipoib_send() 588 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; in ipoib_send()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1171 struct fw_eth_tx_pkt_vm_wr *wr; in t4vf_eth_xmit() local 1175 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + in t4vf_eth_xmit() 1176 sizeof(wr->ethmacsrc) + in t4vf_eth_xmit() 1177 sizeof(wr->ethtype) + in t4vf_eth_xmit() 1178 sizeof(wr->vlantci)); in t4vf_eth_xmit() 1267 wr = (void *)&txq->q.desc[txq->q.pidx]; in t4vf_eth_xmit() 1268 wr->equiq_to_len16 = cpu_to_be32(wr_mid); in t4vf_eth_xmit() 1269 wr->r3[0] = cpu_to_be32(0); in t4vf_eth_xmit() 1270 wr->r3[1] = cpu_to_be32(0); in t4vf_eth_xmit() 1271 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); in t4vf_eth_xmit() [all …]
|
/linux-4.4.14/drivers/media/v4l2-core/ |
D | videobuf2-dma-contig.c | 223 struct scatterlist *rd, *wr; in vb2_dc_dmabuf_ops_attach() local 243 wr = sgt->sgl; in vb2_dc_dmabuf_ops_attach() 245 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); in vb2_dc_dmabuf_ops_attach() 247 wr = sg_next(wr); in vb2_dc_dmabuf_ops_attach()
|
D | videobuf2-dma-sg.c | 389 struct scatterlist *rd, *wr; in vb2_dma_sg_dmabuf_ops_attach() local 409 wr = sgt->sgl; in vb2_dma_sg_dmabuf_ops_attach() 411 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); in vb2_dma_sg_dmabuf_ops_attach() 413 wr = sg_next(wr); in vb2_dma_sg_dmabuf_ops_attach()
|
/linux-4.4.14/drivers/scsi/cxgbi/cxgb3i/ |
D | cxgb3i.c | 164 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in send_act_open_req() 209 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); in send_close_req() 210 req->wr.wr_lo = htonl(V_WR_TID(tid)); in send_close_req() 253 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); in send_abort_req() 254 req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); in send_abort_req() 284 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); in send_abort_rpl() 285 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); in send_abort_rpl() 312 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in send_rx_credits() 1076 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); in ulp_mem_io_set_hdr() 1155 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in ddp_setup_conn_pgidx() [all …]
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/ |
D | marb_defs.h | 359 unsigned int wr : 1; member 407 unsigned int wr : 1; member
|
/linux-4.4.14/arch/cris/include/arch-v32/mach-fs/mach/hwregs/ |
D | marb_defs.h | 359 unsigned int wr : 1; member 407 unsigned int wr : 1; member
|
/linux-4.4.14/arch/cris/include/arch-v32/mach-a3/mach/hwregs/ |
D | marb_bar_defs.h | 381 unsigned int wr : 1; member 424 unsigned int wr : 1; member
|
D | marb_foo_defs.h | 507 unsigned int wr : 1; member 556 unsigned int wr : 1; member
|
/linux-4.4.14/drivers/pcmcia/ |
D | m32r_pcc.c | 84 void pcc_iorw(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int wr, int flag) in pcc_iorw() argument 160 if (wr) { in pcc_iorw() 180 if (wr) { in pcc_iorw()
|
/linux-4.4.14/Documentation/ia64/ |
D | err_inject.txt | 464 int wr(char *fn, unsigned long data) 572 wr(fn, err_type_info.err_type_info); 574 wr(fn, 0x0); 580 wr(fn, mode); 649 wr(fn, err_type_info.err_type_info); 651 wr(fn, err_struct_info.err_struct_info); 657 wr(fn,mode); 692 if (wr(fn,virt_addr)<0)
|
/linux-4.4.14/arch/mips/boot/dts/cavium-octeon/ |
D | octeon_68xx.dts | 471 cavium,t-wr-hld = <35>; 488 cavium,t-wr-hld = <320>; 505 cavium,t-wr-hld = <300>; 522 cavium,t-wr-hld = <30>;
|
D | octeon_3xxx.dts | 435 cavium,t-wr-hld = <45>; 452 cavium,t-wr-hld = <320>; 469 cavium,t-wr-hld = <30>; 486 cavium,t-wr-hld = <70>;
|
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd_cb.c | 841 rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq); in kiblnd_post_tx_locked() 1034 wrq->wr.next = NULL; in kiblnd_init_tx_msg() 1035 wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); in kiblnd_init_tx_msg() 1036 wrq->wr.sg_list = sge; in kiblnd_init_tx_msg() 1037 wrq->wr.num_sge = 1; in kiblnd_init_tx_msg() 1038 wrq->wr.opcode = IB_WR_SEND; in kiblnd_init_tx_msg() 1039 wrq->wr.send_flags = IB_SEND_SIGNALED; in kiblnd_init_tx_msg() 1099 wrq->wr.next = &next->wr; in kiblnd_init_rdma() 1100 wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); in kiblnd_init_rdma() 1101 wrq->wr.sg_list = sge; in kiblnd_init_rdma() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | vvp_io.c | 343 start = io->u.ci_wr.wr.crw_pos; in vvp_io_write_lock() 344 end = start + io->u.ci_wr.wr.crw_count - 1; in vvp_io_write_lock() 571 loff_t pos = io->u.ci_wr.wr.crw_pos; in vvp_io_write_start() 572 size_t cnt = io->u.ci_wr.wr.crw_count; in vvp_io_write_start() 582 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); in vvp_io_write_start()
|
/linux-4.4.14/arch/microblaze/boot/dts/ |
D | system.dts | 69 xlnx,allow-dcache-wr = <0x1>; 70 xlnx,allow-icache-wr = <0x1>; 110 xlnx,number-of-wr-addr-brk = <0x0>;
|