/linux-4.4.14/include/math-emu/ |
D | op-common.h | 27 #define _FP_DECL(wc, X) \ argument 29 _FP_FRAC_DECL_##wc(X) 36 #define _FP_UNPACK_CANONICAL(fs, wc, X) \ argument 42 _FP_FRAC_SLL_##wc(X, _FP_WORKBITS); \ 48 if (_FP_FRAC_ZEROP_##wc(X)) \ 54 _FP_FRAC_CLZ_##wc(_shift, X); \ 56 _FP_FRAC_SLL_##wc(X, (_shift+_FP_WORKBITS)); \ 69 if (_FP_FRAC_ZEROP_##wc(X)) \ 89 #define _FP_PACK_CANONICAL(fs, wc, X) \ argument 97 _FP_ROUND(wc, X); \ [all …]
|
D | soft-fp.h | 135 #define _FP_ROUND_NEAREST(wc, X) \ argument 137 if ((_FP_FRAC_LOW_##wc(X) & 15) != _FP_WORK_ROUND) \ 138 _FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \ 141 #define _FP_ROUND_ZERO(wc, X) 0 argument 143 #define _FP_ROUND_PINF(wc, X) \ argument 145 if (!X##_s && (_FP_FRAC_LOW_##wc(X) & 7)) \ 146 _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \ 149 #define _FP_ROUND_MINF(wc, X) \ argument 151 if (X##_s && (_FP_FRAC_LOW_##wc(X) & 7)) \ 152 _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \ [all …]
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | iwch_cq.c | 45 struct ib_wc *wc) in iwch_poll_cq_one() argument 81 wc->wr_id = cookie; in iwch_poll_cq_one() 82 wc->qp = &qhp->ibqp; in iwch_poll_cq_one() 83 wc->vendor_err = CQE_STATUS(cqe); in iwch_poll_cq_one() 84 wc->wc_flags = 0; in iwch_poll_cq_one() 94 wc->byte_len = CQE_LEN(cqe); in iwch_poll_cq_one() 96 wc->byte_len = 0; in iwch_poll_cq_one() 97 wc->opcode = IB_WC_RECV; in iwch_poll_cq_one() 100 wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe); in iwch_poll_cq_one() 101 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in iwch_poll_cq_one() [all …]
|
D | iwch_provider.h | 336 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_cq.c | 50 struct ipath_cq_wc *wc; in ipath_cq_enter() local 61 wc = cq->queue; in ipath_cq_enter() 62 head = wc->head; in ipath_cq_enter() 68 if (unlikely(next == wc->tail)) { in ipath_cq_enter() 81 wc->uqueue[head].wr_id = entry->wr_id; in ipath_cq_enter() 82 wc->uqueue[head].status = entry->status; in ipath_cq_enter() 83 wc->uqueue[head].opcode = entry->opcode; in ipath_cq_enter() 84 wc->uqueue[head].vendor_err = entry->vendor_err; in ipath_cq_enter() 85 wc->uqueue[head].byte_len = entry->byte_len; in ipath_cq_enter() 86 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data; in ipath_cq_enter() [all …]
|
D | ipath_ud.c | 62 struct ib_wc wc; in ipath_ud_loopback() local 92 memset(&wc, 0, sizeof wc); in ipath_ud_loopback() 93 wc.byte_len = length + sizeof(struct ib_grh); in ipath_ud_loopback() 96 wc.wc_flags = IB_WC_WITH_IMM; in ipath_ud_loopback() 97 wc.ex.imm_data = swqe->wr.ex.imm_data; in ipath_ud_loopback() 140 if (wc.byte_len > rlen) { in ipath_ud_loopback() 148 wc.wr_id = wqe->wr_id; in ipath_ud_loopback() 180 wc.wc_flags |= IB_WC_GRH; in ipath_ud_loopback() 212 wc.status = IB_WC_SUCCESS; in ipath_ud_loopback() 213 wc.opcode = IB_WC_RECV; in ipath_ud_loopback() [all …]
|
D | ipath_ruc.c | 126 struct ib_wc wc; in ipath_init_sge() local 144 memset(&wc, 0, sizeof(wc)); in ipath_init_sge() 145 wc.wr_id = wqe->wr_id; in ipath_init_sge() 146 wc.status = IB_WC_LOC_PROT_ERR; in ipath_init_sge() 147 wc.opcode = IB_WC_RECV; in ipath_init_sge() 148 wc.qp = &qp->ibqp; in ipath_init_sge() 150 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_init_sge() 266 struct ib_wc wc; in ipath_ruc_loopback() local 324 memset(&wc, 0, sizeof wc); in ipath_ruc_loopback() 333 wc.wc_flags = IB_WC_WITH_IMM; in ipath_ruc_loopback() [all …]
|
D | ipath_uc.c | 248 struct ib_wc wc; in ipath_uc_rcv() local 285 memset(&wc, 0, sizeof wc); in ipath_uc_rcv() 382 wc.ex.imm_data = *(__be32 *) data; in ipath_uc_rcv() 386 wc.ex.imm_data = ohdr->u.imm_data; in ipath_uc_rcv() 389 wc.wc_flags = IB_WC_WITH_IMM; in ipath_uc_rcv() 404 wc.byte_len = tlen + qp->r_rcv_len; in ipath_uc_rcv() 405 if (unlikely(wc.byte_len > qp->r_len)) { in ipath_uc_rcv() 410 wc.opcode = IB_WC_RECV; in ipath_uc_rcv() 413 wc.wr_id = qp->r_wr_id; in ipath_uc_rcv() 414 wc.status = IB_WC_SUCCESS; in ipath_uc_rcv() [all …]
|
D | ipath_rc.c | 861 struct ib_wc wc; in do_rc_ack() local 951 memset(&wc, 0, sizeof wc); in do_rc_ack() 952 wc.wr_id = wqe->wr.wr_id; in do_rc_ack() 953 wc.status = IB_WC_SUCCESS; in do_rc_ack() 954 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; in do_rc_ack() 955 wc.byte_len = wqe->length; in do_rc_ack() 956 wc.qp = &qp->ibqp; in do_rc_ack() 957 wc.src_qp = qp->remote_qpn; in do_rc_ack() 958 wc.slid = qp->remote_ah_attr.dlid; in do_rc_ack() 959 wc.sl = qp->remote_ah_attr.sl; in do_rc_ack() [all …]
|
D | ipath_qp.c | 379 struct ib_wc wc; in ipath_error_qp() local 398 memset(&wc, 0, sizeof(wc)); in ipath_error_qp() 399 wc.qp = &qp->ibqp; in ipath_error_qp() 400 wc.opcode = IB_WC_RECV; in ipath_error_qp() 403 wc.wr_id = qp->r_wr_id; in ipath_error_qp() 404 wc.status = err; in ipath_error_qp() 405 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_error_qp() 407 wc.status = IB_WC_WR_FLUSH_ERR; in ipath_error_qp() 425 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in ipath_error_qp() 428 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_error_qp()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | cq.c | 505 struct ib_wc *wc) in mlx4_ib_handle_error_cqe() argument 519 wc->status = IB_WC_LOC_LEN_ERR; in mlx4_ib_handle_error_cqe() 522 wc->status = IB_WC_LOC_QP_OP_ERR; in mlx4_ib_handle_error_cqe() 525 wc->status = IB_WC_LOC_PROT_ERR; in mlx4_ib_handle_error_cqe() 528 wc->status = IB_WC_WR_FLUSH_ERR; in mlx4_ib_handle_error_cqe() 531 wc->status = IB_WC_MW_BIND_ERR; in mlx4_ib_handle_error_cqe() 534 wc->status = IB_WC_BAD_RESP_ERR; in mlx4_ib_handle_error_cqe() 537 wc->status = IB_WC_LOC_ACCESS_ERR; in mlx4_ib_handle_error_cqe() 540 wc->status = IB_WC_REM_INV_REQ_ERR; in mlx4_ib_handle_error_cqe() 543 wc->status = IB_WC_REM_ACCESS_ERR; in mlx4_ib_handle_error_cqe() [all …]
|
D | mad.c | 456 enum ib_qp_type dest_qpt, struct ib_wc *wc, in mlx4_ib_send_to_slave() argument 492 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); in mlx4_ib_send_to_slave() 547 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); in mlx4_ib_send_to_slave() 548 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; in mlx4_ib_send_to_slave() 555 if (vlan != wc->vlan_id) in mlx4_ib_send_to_slave() 566 vlan = wc->vlan_id; in mlx4_ib_send_to_slave() 570 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4); in mlx4_ib_send_to_slave() 571 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2); in mlx4_ib_send_to_slave() 573 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); in mlx4_ib_send_to_slave() 574 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); in mlx4_ib_send_to_slave() [all …]
|
D | mcg.c | 233 struct ib_wc wc; in send_mad_to_slave() local 242 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index)) in send_mad_to_slave() 244 wc.sl = 0; in send_mad_to_slave() 245 wc.dlid_path_bits = 0; in send_mad_to_slave() 246 wc.port_num = ctx->port; in send_mad_to_slave() 247 wc.slid = ah_attr.dlid; /* opensm lid */ in send_mad_to_slave() 248 wc.src_qp = 1; in send_mad_to_slave() 249 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); in send_mad_to_slave()
|
D | mlx4_ib.h | 723 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 813 enum ib_qp_type qpt, struct ib_wc *wc,
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_cq.c | 53 struct qib_cq_wc *wc; in qib_cq_enter() local 64 wc = cq->queue; in qib_cq_enter() 65 head = wc->head; in qib_cq_enter() 71 if (unlikely(next == wc->tail)) { in qib_cq_enter() 84 wc->uqueue[head].wr_id = entry->wr_id; in qib_cq_enter() 85 wc->uqueue[head].status = entry->status; in qib_cq_enter() 86 wc->uqueue[head].opcode = entry->opcode; in qib_cq_enter() 87 wc->uqueue[head].vendor_err = entry->vendor_err; in qib_cq_enter() 88 wc->uqueue[head].byte_len = entry->byte_len; in qib_cq_enter() 89 wc->uqueue[head].ex.imm_data = in qib_cq_enter() [all …]
|
D | qib_ud.c | 58 struct ib_wc wc; in qib_ud_loopback() local 130 memset(&wc, 0, sizeof(wc)); in qib_ud_loopback() 131 wc.byte_len = length + sizeof(struct ib_grh); in qib_ud_loopback() 134 wc.wc_flags = IB_WC_WITH_IMM; in qib_ud_loopback() 135 wc.ex.imm_data = swqe->wr.ex.imm_data; in qib_ud_loopback() 160 if (unlikely(wc.byte_len > qp->r_len)) { in qib_ud_loopback() 169 wc.wc_flags |= IB_WC_GRH; in qib_ud_loopback() 207 wc.wr_id = qp->r_wr_id; in qib_ud_loopback() 208 wc.status = IB_WC_SUCCESS; in qib_ud_loopback() 209 wc.opcode = IB_WC_RECV; in qib_ud_loopback() [all …]
|
D | qib_ruc.c | 85 struct ib_wc wc; in qib_init_sge() local 117 memset(&wc, 0, sizeof(wc)); in qib_init_sge() 118 wc.wr_id = wqe->wr_id; in qib_init_sge() 119 wc.status = IB_WC_LOC_PROT_ERR; in qib_init_sge() 120 wc.opcode = IB_WC_RECV; in qib_init_sge() 121 wc.qp = &qp->ibqp; in qib_init_sge() 123 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_init_sge() 363 struct ib_wc wc; in qib_ruc_loopback() local 424 memset(&wc, 0, sizeof(wc)); in qib_ruc_loopback() 434 wc.wc_flags = IB_WC_WITH_IMM; in qib_ruc_loopback() [all …]
|
D | qib_uc.c | 250 struct ib_wc wc; in qib_uc_rcv() local 381 wc.ex.imm_data = ohdr->u.imm_data; in qib_uc_rcv() 383 wc.wc_flags = IB_WC_WITH_IMM; in qib_uc_rcv() 387 wc.ex.imm_data = 0; in qib_uc_rcv() 388 wc.wc_flags = 0; in qib_uc_rcv() 398 wc.byte_len = tlen + qp->r_rcv_len; in qib_uc_rcv() 399 if (unlikely(wc.byte_len > qp->r_len)) in qib_uc_rcv() 401 wc.opcode = IB_WC_RECV; in qib_uc_rcv() 405 wc.wr_id = qp->r_wr_id; in qib_uc_rcv() 406 wc.status = IB_WC_SUCCESS; in qib_uc_rcv() [all …]
|
D | qib_rc.c | 975 struct ib_wc wc; in qib_rc_send_complete() local 1022 memset(&wc, 0, sizeof(wc)); in qib_rc_send_complete() 1023 wc.wr_id = wqe->wr.wr_id; in qib_rc_send_complete() 1024 wc.status = IB_WC_SUCCESS; in qib_rc_send_complete() 1025 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; in qib_rc_send_complete() 1026 wc.byte_len = wqe->length; in qib_rc_send_complete() 1027 wc.qp = &qp->ibqp; in qib_rc_send_complete() 1028 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in qib_rc_send_complete() 1060 struct ib_wc wc; in do_rc_completion() local 1078 memset(&wc, 0, sizeof(wc)); in do_rc_completion() [all …]
|
D | qib_qp.c | 477 struct ib_wc wc; in qib_error_qp() local 518 memset(&wc, 0, sizeof(wc)); in qib_error_qp() 519 wc.qp = &qp->ibqp; in qib_error_qp() 520 wc.opcode = IB_WC_RECV; in qib_error_qp() 523 wc.wr_id = qp->r_wr_id; in qib_error_qp() 524 wc.status = err; in qib_error_qp() 525 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_error_qp() 527 wc.status = IB_WC_WR_FLUSH_ERR; in qib_error_qp() 545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in qib_error_qp() 548 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_error_qp()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | cq.c | 69 struct hfi1_cq_wc *wc; in hfi1_cq_enter() local 80 wc = cq->queue; in hfi1_cq_enter() 81 head = wc->head; in hfi1_cq_enter() 87 if (unlikely(next == wc->tail)) { in hfi1_cq_enter() 100 wc->uqueue[head].wr_id = entry->wr_id; in hfi1_cq_enter() 101 wc->uqueue[head].status = entry->status; in hfi1_cq_enter() 102 wc->uqueue[head].opcode = entry->opcode; in hfi1_cq_enter() 103 wc->uqueue[head].vendor_err = entry->vendor_err; in hfi1_cq_enter() 104 wc->uqueue[head].byte_len = entry->byte_len; in hfi1_cq_enter() 105 wc->uqueue[head].ex.imm_data = in hfi1_cq_enter() [all …]
|
D | ud.c | 77 struct ib_wc wc; in ud_loopback() local 152 memset(&wc, 0, sizeof(wc)); in ud_loopback() 153 wc.byte_len = length + sizeof(struct ib_grh); in ud_loopback() 156 wc.wc_flags = IB_WC_WITH_IMM; in ud_loopback() 157 wc.ex.imm_data = swqe->wr.ex.imm_data; in ud_loopback() 182 if (unlikely(wc.byte_len > qp->r_len)) { in ud_loopback() 191 wc.wc_flags |= IB_WC_GRH; in ud_loopback() 229 wc.wr_id = qp->r_wr_id; in ud_loopback() 230 wc.status = IB_WC_SUCCESS; in ud_loopback() 231 wc.opcode = IB_WC_RECV; in ud_loopback() [all …]
|
D | ruc.c | 103 struct ib_wc wc; in init_sge() local 135 memset(&wc, 0, sizeof(wc)); in init_sge() 136 wc.wr_id = wqe->wr_id; in init_sge() 137 wc.status = IB_WC_LOC_PROT_ERR; in init_sge() 138 wc.opcode = IB_WC_RECV; in init_sge() 139 wc.qp = &qp->ibqp; in init_sge() 141 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in init_sge() 383 struct ib_wc wc; in ruc_loopback() local 446 memset(&wc, 0, sizeof(wc)); in ruc_loopback() 456 wc.wc_flags = IB_WC_WITH_IMM; in ruc_loopback() [all …]
|
D | uc.c | 275 struct ib_wc wc; in hfi1_uc_rcv() local 422 wc.ex.imm_data = ohdr->u.imm_data; in hfi1_uc_rcv() 423 wc.wc_flags = IB_WC_WITH_IMM; in hfi1_uc_rcv() 427 wc.ex.imm_data = 0; in hfi1_uc_rcv() 428 wc.wc_flags = 0; in hfi1_uc_rcv() 438 wc.byte_len = tlen + qp->r_rcv_len; in hfi1_uc_rcv() 439 if (unlikely(wc.byte_len > qp->r_len)) in hfi1_uc_rcv() 441 wc.opcode = IB_WC_RECV; in hfi1_uc_rcv() 445 wc.wr_id = qp->r_wr_id; in hfi1_uc_rcv() 446 wc.status = IB_WC_SUCCESS; in hfi1_uc_rcv() [all …]
|
D | rc.c | 988 struct ib_wc wc; in hfi1_rc_send_complete() local 1036 memset(&wc, 0, sizeof(wc)); in hfi1_rc_send_complete() 1037 wc.wr_id = wqe->wr.wr_id; in hfi1_rc_send_complete() 1038 wc.status = IB_WC_SUCCESS; in hfi1_rc_send_complete() 1039 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; in hfi1_rc_send_complete() 1040 wc.byte_len = wqe->length; in hfi1_rc_send_complete() 1041 wc.qp = &qp->ibqp; in hfi1_rc_send_complete() 1042 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in hfi1_rc_send_complete() 1075 struct ib_wc wc; in do_rc_completion() local 1093 memset(&wc, 0, sizeof(wc)); in do_rc_completion() [all …]
|
D | qp.c | 462 struct ib_wc wc; in hfi1_error_qp() local 502 memset(&wc, 0, sizeof(wc)); in hfi1_error_qp() 503 wc.qp = &qp->ibqp; in hfi1_error_qp() 504 wc.opcode = IB_WC_RECV; in hfi1_error_qp() 507 wc.wr_id = qp->r_wr_id; in hfi1_error_qp() 508 wc.status = err; in hfi1_error_qp() 509 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in hfi1_error_qp() 511 wc.status = IB_WC_WR_FLUSH_ERR; in hfi1_error_qp() 529 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in hfi1_error_qp() 532 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in hfi1_error_qp()
|
D | pio.c | 196 static int wildcard_to_pool(int wc) in wildcard_to_pool() argument 198 if (wc >= 0) in wildcard_to_pool() 200 return -wc - 1; in wildcard_to_pool()
|
/linux-4.4.14/drivers/net/ethernet/brocade/bna/ |
D | bfa_cs.h | 81 bfa_wc_up(struct bfa_wc *wc) in bfa_wc_up() argument 83 wc->wc_count++; in bfa_wc_up() 87 bfa_wc_down(struct bfa_wc *wc) in bfa_wc_down() argument 89 wc->wc_count--; in bfa_wc_down() 90 if (wc->wc_count == 0) in bfa_wc_down() 91 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down() 96 bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) in bfa_wc_init() argument 98 wc->wc_resume = wc_resume; in bfa_wc_init() 99 wc->wc_cbarg = wc_cbarg; in bfa_wc_init() 100 wc->wc_count = 0; in bfa_wc_init() [all …]
|
/linux-4.4.14/fs/ocfs2/ |
D | aops.c | 1285 static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) in ocfs2_unlock_pages() argument 1294 if (wc->w_target_locked) { in ocfs2_unlock_pages() 1295 BUG_ON(!wc->w_target_page); in ocfs2_unlock_pages() 1296 for (i = 0; i < wc->w_num_pages; i++) { in ocfs2_unlock_pages() 1297 if (wc->w_target_page == wc->w_pages[i]) { in ocfs2_unlock_pages() 1298 wc->w_pages[i] = NULL; in ocfs2_unlock_pages() 1302 mark_page_accessed(wc->w_target_page); in ocfs2_unlock_pages() 1303 page_cache_release(wc->w_target_page); in ocfs2_unlock_pages() 1305 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); in ocfs2_unlock_pages() 1308 static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) in ocfs2_free_write_ctxt() argument [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | cq.c | 121 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 124 wc->wc_flags = 0; in handle_good_req() 127 wc->wc_flags |= IB_WC_WITH_IMM; in handle_good_req() 129 wc->opcode = IB_WC_RDMA_WRITE; in handle_good_req() 132 wc->wc_flags |= IB_WC_WITH_IMM; in handle_good_req() 135 wc->opcode = IB_WC_SEND; in handle_good_req() 138 wc->opcode = IB_WC_RDMA_READ; in handle_good_req() 139 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 142 wc->opcode = IB_WC_COMP_SWAP; in handle_good_req() 143 wc->byte_len = 8; in handle_good_req() [all …]
|
D | mr.c | 740 struct ib_wc wc; in mlx5_umr_cq_handler() local 744 err = ib_poll_cq(cq, 1, &wc); in mlx5_umr_cq_handler() 752 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id; in mlx5_umr_cq_handler() 753 context->status = wc.status; in mlx5_umr_cq_handler()
|
D | mlx5_ib.h | 544 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
/linux-4.4.14/fs/ntfs/ |
D | unistr.c | 264 wchar_t wc; in ntfs_nlstoucs() local 273 &wc); in ntfs_nlstoucs() 276 if (likely(wc)) { in ntfs_nlstoucs() 277 ucs[o++] = cpu_to_le16(wc); in ntfs_nlstoucs() 341 int i, o, ns_len, wc; in ntfs_ucstonls() local 348 wc = -ENAMETOOLONG; in ntfs_ucstonls() 358 retry: wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o, in ntfs_ucstonls() 360 if (wc > 0) { in ntfs_ucstonls() 361 o += wc; in ntfs_ucstonls() 363 } else if (!wc) in ntfs_ucstonls() [all …]
|
/linux-4.4.14/arch/c6x/platforms/ |
D | cache.c | 141 unsigned int wc = 0; in cache_block_operation() local 143 for (; wcnt; wcnt -= wc, start += wc) { in cache_block_operation() 163 wc = 0xffff; in cache_block_operation() 165 wc = wcnt; in cache_block_operation() 168 imcr_set(wc_reg, wc & 0xffff); in cache_block_operation() 186 unsigned int wc = 0; in cache_block_operation_nowait() local 188 for (; wcnt; wcnt -= wc, start += wc) { in cache_block_operation_nowait() 195 wc = 0xffff; in cache_block_operation_nowait() 197 wc = wcnt; in cache_block_operation_nowait() 200 imcr_set(wc_reg, wc & 0xffff); in cache_block_operation_nowait()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_reqs.c | 626 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) in ehca_poll_cq_one() argument 706 wc->qp = &my_qp->ib_qp; in ehca_poll_cq_one() 744 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id); in ehca_poll_cq_one() 759 wc->opcode = ib_wc_opcode[cqe->optype]-1; in ehca_poll_cq_one() 760 if (unlikely(wc->opcode == -1)) { in ehca_poll_cq_one() 774 map_ib_wc_status(cqe->status, &wc->status); in ehca_poll_cq_one() 775 wc->vendor_err = wc->status; in ehca_poll_cq_one() 777 wc->status = IB_WC_SUCCESS; in ehca_poll_cq_one() 779 wc->byte_len = cqe->nr_bytes_transferred; in ehca_poll_cq_one() 780 wc->pkey_index = cqe->pkey_index; in ehca_poll_cq_one() [all …]
|
D | ehca_iverbs.h | 142 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
/linux-4.4.14/drivers/scsi/bfa/ |
D | bfa_cs.h | 293 bfa_wc_up(struct bfa_wc_s *wc) in bfa_wc_up() argument 295 wc->wc_count++; in bfa_wc_up() 299 bfa_wc_down(struct bfa_wc_s *wc) in bfa_wc_down() argument 301 wc->wc_count--; in bfa_wc_down() 302 if (wc->wc_count == 0) in bfa_wc_down() 303 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down() 310 bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) in bfa_wc_init() argument 312 wc->wc_resume = wc_resume; in bfa_wc_init() 313 wc->wc_cbarg = wc_cbarg; in bfa_wc_init() 314 wc->wc_count = 0; in bfa_wc_init() [all …]
|
D | bfa_fcs.c | 128 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); in bfa_fcs_stop() 129 bfa_wc_up(&fcs->wc); in bfa_fcs_stop() 131 bfa_wc_wait(&fcs->wc); in bfa_fcs_stop() 185 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); in bfa_fcs_exit() 193 bfa_wc_up(&fcs->wc); in bfa_fcs_exit() 198 bfa_wc_wait(&fcs->wc); in bfa_fcs_exit() 726 bfa_wc_down(&fabric->fcs->wc); in bfa_fcs_fabric_sm_deleting() 792 bfa_wc_down(&(fabric->fcs)->wc); in bfa_fcs_fabric_sm_cleanup() 1106 bfa_wc_wait(&fabric->wc); in bfa_fcs_fabric_delete() 1153 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); in bfa_fcs_fabric_attach() [all …]
|
D | bfa_fcpim.h | 213 struct bfa_wc_s wc; /* waiting counter */ member 239 struct bfa_wc_s wc; /* waiting counter */ member
|
D | bfa_fcs.h | 159 struct bfa_wc_s wc; /* waiting counter for events */ member 206 struct bfa_wc_s wc; /* wait counter for delete */ member 706 struct bfa_wc_s wc; /* waiting counter */ member
|
D | bfa_fcpim.c | 1025 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); in bfa_itnim_cleanup() 1037 bfa_wc_up(&itnim->wc); in bfa_itnim_cleanup() 1043 bfa_wc_up(&itnim->wc); in bfa_itnim_cleanup() 1047 bfa_wc_wait(&itnim->wc); in bfa_itnim_cleanup() 1095 bfa_wc_down(&itnim->wc); in bfa_itnim_iodone() 1101 bfa_wc_down(&itnim->wc); in bfa_itnim_tskdone() 2698 bfa_wc_down(&ioim->iosp->tskim->wc); in bfa_ioim_notify_cleanup() 3375 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim); in bfa_tskim_cleanup_ios() 3379 bfa_wc_up(&tskim->wc); in bfa_tskim_cleanup_ios() 3383 bfa_wc_wait(&tskim->wc); in bfa_tskim_cleanup_ios() [all …]
|
D | bfa_fcs_lport.c | 757 bfa_wc_down(&port->fabric->wc); in bfa_fcs_lport_deleted()
|
/linux-4.4.14/sound/isa/wavefront/ |
D | wavefront_synth.c | 1495 wavefront_control *wc) in wavefront_synth_control() argument 1503 "cmd 0x%x\n", wc->cmd); in wavefront_synth_control() 1507 switch (wc->cmd) { in wavefront_synth_control() 1522 wc->rbuf[0] = dev->interrupts_are_midi; in wavefront_synth_control() 1526 dev->rom_samples_rdonly = wc->wbuf[0]; in wavefront_synth_control() 1527 wc->status = 0; in wavefront_synth_control() 1531 i = wc->wbuf[0] | (wc->wbuf[1] << 7); in wavefront_synth_control() 1535 wc->status = EINVAL; in wavefront_synth_control() 1538 wc->rbuf[0] = dev->sample_status[i]; in wavefront_synth_control() 1539 wc->status = 0; in wavefront_synth_control() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 682 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) in c4iw_poll_cq_one() argument 708 wc->wr_id = cookie; in c4iw_poll_cq_one() 709 wc->qp = &qhp->ibqp; in c4iw_poll_cq_one() 710 wc->vendor_err = CQE_STATUS(&cqe); in c4iw_poll_cq_one() 711 wc->wc_flags = 0; in c4iw_poll_cq_one() 720 wc->byte_len = CQE_LEN(&cqe); in c4iw_poll_cq_one() 722 wc->byte_len = 0; in c4iw_poll_cq_one() 723 wc->opcode = IB_WC_RECV; in c4iw_poll_cq_one() 726 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); in c4iw_poll_cq_one() 727 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in c4iw_poll_cq_one() [all …]
|
D | iw_cxgb4.h | 945 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
/linux-4.4.14/tools/testing/selftests/ftrace/ |
D | ftracetest | 261 prlog "# of passed: " `echo $PASSED_CASES | wc -w` 262 prlog "# of failed: " `echo $FAILED_CASES | wc -w` 263 prlog "# of unresolved: " `echo $UNRESOLVED_CASES | wc -w` 264 prlog "# of untested: " `echo $UNTESTED_CASES | wc -w` 265 prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w` 266 prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w` 267 prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
|
/linux-4.4.14/tools/testing/selftests/ftrace/test.d/ftrace/ |
D | fgraph-filter-stack.tc | 61 count=`cat trace | grep '()' | grep -v schedule | wc -l` 68 count=`cat trace | grep 'schedule()' | wc -l` 78 count=`cat trace | grep '()' | grep -v schedule | wc -l` 84 count=`cat trace | grep 'schedule()' | wc -l`
|
D | fgraph-filter.tc | 39 count=`cat trace | grep '()' | grep -v schedule | wc -l` 45 count=`cat trace | grep 'schedule()' | wc -l`
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 176 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_ib_handle_rx_wc() argument 179 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; in ipoib_ib_handle_rx_wc() 185 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 195 if (unlikely(wc->status != IB_WC_SUCCESS)) { in ipoib_ib_handle_rx_wc() 196 if (wc->status != IB_WC_WR_FLUSH_ERR) in ipoib_ib_handle_rx_wc() 199 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 210 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) in ipoib_ib_handle_rx_wc() 226 wc->byte_len, wc->slid); in ipoib_ib_handle_rx_wc() 230 skb_put(skb, wc->byte_len); in ipoib_ib_handle_rx_wc() 235 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) in ipoib_ib_handle_rx_wc() [all …]
|
D | ipoib_cm.c | 558 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_cm_handle_rx_wc() argument 562 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); in ipoib_cm_handle_rx_wc() 572 wr_id, wc->status); in ipoib_cm_handle_rx_wc() 587 p = wc->qp->qp_context; in ipoib_cm_handle_rx_wc() 594 if (unlikely(wc->status != IB_WC_SUCCESS)) { in ipoib_cm_handle_rx_wc() 597 wc->status, wr_id, wc->vendor_err); in ipoib_cm_handle_rx_wc() 624 if (wc->byte_len < IPOIB_CM_COPYBREAK) { in ipoib_cm_handle_rx_wc() 625 int dlen = wc->byte_len; in ipoib_cm_handle_rx_wc() 641 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, in ipoib_cm_handle_rx_wc() 660 wc->byte_len, wc->slid); in ipoib_cm_handle_rx_wc() [all …]
|
D | ipoib.h | 645 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc); 646 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc); 743 static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_cm_handle_rx_wc() argument 747 static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_cm_handle_tx_wc() argument
|
/linux-4.4.14/drivers/infiniband/core/ |
D | mad.c | 694 struct ib_wc *wc) in build_smp_wc() argument 696 memset(wc, 0, sizeof *wc); in build_smp_wc() 697 wc->wr_id = wr_id; in build_smp_wc() 698 wc->status = IB_WC_SUCCESS; in build_smp_wc() 699 wc->opcode = IB_WC_RECV; in build_smp_wc() 700 wc->pkey_index = pkey_index; in build_smp_wc() 701 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); in build_smp_wc() 702 wc->src_qp = IB_QP0; in build_smp_wc() 703 wc->qp = qp; in build_smp_wc() 704 wc->slid = slid; in build_smp_wc() [all …]
|
D | mad_rmpp.c | 142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, in ack_recv() 143 recv_wc->wc->pkey_index, 1, hdr_len, in ack_recv() 163 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, in alloc_response_msg() 169 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, in alloc_response_msg() 170 recv_wc->wc->pkey_index, 1, in alloc_response_msg() 295 mad_recv_wc->wc, in create_rmpp_recv() 318 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; in create_rmpp_recv() 319 rmpp_recv->slid = mad_recv_wc->wc->slid; in create_rmpp_recv() 339 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && in find_rmpp_recv() 340 rmpp_recv->slid == mad_recv_wc->wc->slid && in find_rmpp_recv() [all …]
|
D | agent.c | 82 const struct ib_wc *wc, const struct ib_device *device, in agent_send_response() argument 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); in agent_send_response() 112 send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, in agent_send_response()
|
D | verbs.c | 344 const struct ib_wc *wc, const struct ib_grh *grh, in ib_init_ah_from_wc() argument 353 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? in ib_init_ah_from_wc() 354 wc->vlan_id : 0xffff; in ib_init_ah_from_wc() 356 if (!(wc->wc_flags & IB_WC_GRH)) in ib_init_ah_from_wc() 359 if (!(wc->wc_flags & IB_WC_WITH_SMAC) || in ib_init_ah_from_wc() 360 !(wc->wc_flags & IB_WC_WITH_VLAN)) { in ib_init_ah_from_wc() 363 wc->wc_flags & IB_WC_WITH_VLAN ? in ib_init_ah_from_wc() 375 if (wc->wc_flags & IB_WC_WITH_SMAC) in ib_init_ah_from_wc() 376 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN); in ib_init_ah_from_wc() 379 ah_attr->dlid = wc->slid; in ib_init_ah_from_wc() [all …]
|
D | agent.h | 48 const struct ib_wc *wc, const struct ib_device *device,
|
D | user_mad.c | 218 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) in recv_handler() 230 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler() 231 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); in recv_handler() 232 packet->mad.hdr.sl = mad_recv_wc->wc->sl; in recv_handler() 233 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; in recv_handler() 234 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; in recv_handler() 235 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); in recv_handler() 240 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, in recv_handler()
|
D | cm.c | 295 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, in cm_alloc_response_msg() 300 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, in cm_alloc_response_msg() 346 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, in cm_init_av_for_response() argument 350 av->pkey_index = wc->pkey_index; in cm_init_av_for_response() 351 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc, in cm_init_av_for_response() 1345 u16 pkey_index = work->mad_recv_wc->wc->pkey_index; in cm_get_bth_pkey() 1575 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) in cm_process_routed_req() argument 1579 req_msg->primary_local_lid = cpu_to_be16(wc->slid); in cm_process_routed_req() 1580 cm_req_set_primary_sl(req_msg, wc->sl); in cm_process_routed_req() 1584 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); in cm_process_routed_req() [all …]
|
D | uverbs_cmd.c | 1592 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) in copy_wc_to_user() argument 1596 tmp.wr_id = wc->wr_id; in copy_wc_to_user() 1597 tmp.status = wc->status; in copy_wc_to_user() 1598 tmp.opcode = wc->opcode; in copy_wc_to_user() 1599 tmp.vendor_err = wc->vendor_err; in copy_wc_to_user() 1600 tmp.byte_len = wc->byte_len; in copy_wc_to_user() 1601 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; in copy_wc_to_user() 1602 tmp.qp_num = wc->qp->qp_num; in copy_wc_to_user() 1603 tmp.src_qp = wc->src_qp; in copy_wc_to_user() 1604 tmp.wc_flags = wc->wc_flags; in copy_wc_to_user() [all …]
|
D | mad_priv.h | 73 struct ib_wc wc; member
|
D | sa_query.c | 1617 mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id; in recv_handler() 1621 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) in recv_handler()
|
/linux-4.4.14/drivers/net/wireless/hostap/ |
D | hostap_cs.c | 107 u8 *buf, int wc) in hfa384x_outsw_debug() argument 116 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); in hfa384x_outsw_debug() 117 outsw(dev->base_addr + a, buf, wc); in hfa384x_outsw_debug() 122 u8 *buf, int wc) in hfa384x_insw_debug() argument 131 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); in hfa384x_insw_debug() 132 insw(dev->base_addr + a, buf, wc); in hfa384x_insw_debug() 140 #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) argument 141 #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) argument 149 #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) argument 150 #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) argument
|
D | hostap_plx.c | 173 u8 *buf, int wc) in hfa384x_outsw_debug() argument 183 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); in hfa384x_outsw_debug() 184 outsw(dev->base_addr + a, buf, wc); in hfa384x_outsw_debug() 189 u8 *buf, int wc) in hfa384x_insw_debug() argument 199 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); in hfa384x_insw_debug() 200 insw(dev->base_addr + a, buf, wc); in hfa384x_insw_debug() 208 #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) argument 209 #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) argument 217 #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) argument 218 #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) argument
|
/linux-4.4.14/tools/testing/selftests/ftrace/test.d/event/ |
D | event-enable.tc | 31 count=`cat trace | grep sched_switch | wc -l` 42 count=`cat trace | grep sched_switch | wc -l` 53 count=`cat trace | grep sched_switch | wc -l`
|
/linux-4.4.14/net/rds/ |
D | iw_send.c | 192 struct ib_wc wc; in rds_iw_send_cq_comp_handler() local 205 while (ib_poll_cq(cq, 1, &wc) > 0) { in rds_iw_send_cq_comp_handler() 207 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, in rds_iw_send_cq_comp_handler() 208 be32_to_cpu(wc.ex.imm_data)); in rds_iw_send_cq_comp_handler() 211 if (wc.status != IB_WC_SUCCESS) { in rds_iw_send_cq_comp_handler() 212 printk(KERN_ERR "WC Error: status = %d opcode = %d\n", wc.status, wc.opcode); in rds_iw_send_cq_comp_handler() 216 if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) { in rds_iw_send_cq_comp_handler() 221 if (wc.opcode == IB_WC_REG_MR && wc.wr_id == RDS_IW_REG_WR_ID) { in rds_iw_send_cq_comp_handler() 226 if (wc.wr_id == RDS_IW_ACK_WR_ID) { in rds_iw_send_cq_comp_handler() 235 completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); in rds_iw_send_cq_comp_handler() [all …]
|
D | iw_recv.c | 784 struct ib_wc wc; in rds_poll_cq() local 787 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { in rds_poll_cq() 789 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, in rds_poll_cq() 790 be32_to_cpu(wc.ex.imm_data)); in rds_poll_cq() 804 if (wc.status == IB_WC_SUCCESS) { in rds_poll_cq() 805 rds_iw_process_recv(conn, recv, wc.byte_len, state); in rds_poll_cq() 810 wc.status); in rds_poll_cq()
|
D | ib_send.c | 240 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) in rds_ib_send_cqe_handler() argument 252 (unsigned long long)wc->wr_id, wc->status, in rds_ib_send_cqe_handler() 253 ib_wc_status_msg(wc->status), wc->byte_len, in rds_ib_send_cqe_handler() 254 be32_to_cpu(wc->ex.imm_data)); in rds_ib_send_cqe_handler() 257 if (wc->wr_id == RDS_IB_ACK_WR_ID) { in rds_ib_send_cqe_handler() 267 (wc->wr_id & ~RDS_IB_SEND_OP), in rds_ib_send_cqe_handler() 275 rm = rds_ib_send_unmap_op(ic, send, wc->status); in rds_ib_send_cqe_handler() 303 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { in rds_ib_send_cqe_handler() 305 &conn->c_faddr, wc->status, in rds_ib_send_cqe_handler() 306 ib_wc_status_msg(wc->status)); in rds_ib_send_cqe_handler()
|
D | ib_recv.c | 957 struct ib_wc *wc, in rds_ib_recv_cqe_handler() argument 964 (unsigned long long)wc->wr_id, wc->status, in rds_ib_recv_cqe_handler() 965 ib_wc_status_msg(wc->status), wc->byte_len, in rds_ib_recv_cqe_handler() 966 be32_to_cpu(wc->ex.imm_data)); in rds_ib_recv_cqe_handler() 977 if (wc->status == IB_WC_SUCCESS) { in rds_ib_recv_cqe_handler() 978 rds_ib_process_recv(conn, recv, wc->byte_len, state); in rds_ib_recv_cqe_handler() 984 wc->status, in rds_ib_recv_cqe_handler() 985 ib_wc_status_msg(wc->status)); in rds_ib_recv_cqe_handler()
|
D | ib_cm.c | 245 struct ib_wc *wc; in poll_cq() local 249 wc = wcs + i; in poll_cq() 251 (unsigned long long)wc->wr_id, wc->status, in poll_cq() 252 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); in poll_cq() 254 if (wc->wr_id & RDS_IB_SEND_OP) in poll_cq() 255 rds_ib_send_cqe_handler(ic, wc); in poll_cq() 257 rds_ib_recv_cqe_handler(ic, wc, ack_state); in poll_cq()
|
D | ib.h | 372 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, 399 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | verbs.c | 131 rpcrdma_sendcq_process_wc(struct ib_wc *wc) in rpcrdma_sendcq_process_wc() argument 134 if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) { in rpcrdma_sendcq_process_wc() 135 if (wc->status != IB_WC_SUCCESS && in rpcrdma_sendcq_process_wc() 136 wc->status != IB_WC_WR_FLUSH_ERR) in rpcrdma_sendcq_process_wc() 138 __func__, ib_wc_status_msg(wc->status)); in rpcrdma_sendcq_process_wc() 142 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; in rpcrdma_sendcq_process_wc() 143 r->mw_sendcompletion(wc); in rpcrdma_sendcq_process_wc() 194 rpcrdma_recvcq_process_wc(struct ib_wc *wc) in rpcrdma_recvcq_process_wc() argument 197 (struct rpcrdma_rep *)(unsigned long)wc->wr_id; in rpcrdma_recvcq_process_wc() 200 if (wc->status != IB_WC_SUCCESS) in rpcrdma_recvcq_process_wc() [all …]
|
D | frwr_ops.c | 250 frwr_sendcompletion(struct ib_wc *wc) in frwr_sendcompletion() argument 254 if (likely(wc->status == IB_WC_SUCCESS)) in frwr_sendcompletion() 258 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; in frwr_sendcompletion() 259 if (wc->status == IB_WC_WR_FLUSH_ERR) in frwr_sendcompletion() 263 __func__, r, ib_wc_status_msg(wc->status), wc->status); in frwr_sendcompletion()
|
D | svc_rdma_transport.c | 342 struct ib_wc wc; in rq_cq_reap() local 351 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { in rq_cq_reap() 352 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; in rq_cq_reap() 353 ctxt->wc_status = wc.status; in rq_cq_reap() 354 ctxt->byte_len = wc.byte_len; in rq_cq_reap() 356 if (wc.status != IB_WC_SUCCESS) { in rq_cq_reap() 440 struct ib_wc *wc; in sq_cq_reap() local 455 wc = &wc_a[i]; in sq_cq_reap() 456 if (wc->status != IB_WC_SUCCESS) { in sq_cq_reap() 458 ib_wc_status_msg(wc->status), in sq_cq_reap() [all …]
|
/linux-4.4.14/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 1167 struct ib_wc *wc) in iser_handle_comp_error() argument 1169 void *wr_id = (void *)(uintptr_t)wc->wr_id; in iser_handle_comp_error() 1173 if (wc->status != IB_WC_WR_FLUSH_ERR) in iser_handle_comp_error() 1178 if (wc->wr_id == ISER_FASTREG_LI_WRID) in iser_handle_comp_error() 1199 static void iser_handle_wc(struct ib_wc *wc) in iser_handle_wc() argument 1205 ib_conn = wc->qp->qp_context; in iser_handle_wc() 1206 if (likely(wc->status == IB_WC_SUCCESS)) { in iser_handle_wc() 1207 if (wc->opcode == IB_WC_RECV) { in iser_handle_wc() 1208 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; in iser_handle_wc() 1209 iser_rcv_completion(rx_desc, wc->byte_len, in iser_handle_wc() [all …]
|
/linux-4.4.14/arch/sh/include/asm/ |
D | sfp-machine.h | 57 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ argument 63 _FP_FRAC_COPY_##wc(R,Y); \ 68 _FP_FRAC_COPY_##wc(R,X); \
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | sfp-machine_64.h | 59 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ argument 65 _FP_FRAC_COPY_##wc(R,X); \ 70 _FP_FRAC_COPY_##wc(R,Y); \
|
D | sfp-machine_32.h | 61 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ argument 67 _FP_FRAC_COPY_##wc(R,X); \ 72 _FP_FRAC_COPY_##wc(R,Y); \
|
/linux-4.4.14/Documentation/DocBook/ |
D | deviceiobook.xml.db | 5 API-pci-iomap-wc-range 7 API-pci-iomap-wc
|
D | kernel-api.xml.db | 401 API-arch-phys-wc-add
|
/linux-4.4.14/sound/pci/ctxfi/ |
D | cttimer.c | 55 unsigned int wc; /* current wallclock */ member 188 unsigned int wc, diff; in ct_xfitimer_reprogram() local 196 wc = ct_xfitimer_get_wc(atimer); in ct_xfitimer_reprogram() 197 diff = wc - atimer->wc; in ct_xfitimer_reprogram() 198 atimer->wc = wc; in ct_xfitimer_reprogram() 300 atimer->wc = ct_xfitimer_get_wc(atimer); in ct_xfitimer_start()
|
/linux-4.4.14/fs/logfs/ |
D | readwrite.c | 1341 struct write_control *wc) in logfs_write_i0() argument 1349 if (wc->ofs == 0) in logfs_write_i0() 1353 shadow = alloc_shadow(inode, bix, level, wc->ofs); in logfs_write_i0() 1354 if (wc->flags & WF_WRITE) in logfs_write_i0() 1356 if (wc->flags & WF_DELETE) in logfs_write_i0() 1370 wc->ofs = shadow->new_ofs; in logfs_write_i0() 1371 if (wc->ofs && full) in logfs_write_i0() 1372 wc->ofs |= LOGFS_FULLY_POPULATED; in logfs_write_i0() 1380 struct write_control wc = { in logfs_write_direct() local 1388 err = logfs_write_i0(inode, page, &wc); in logfs_write_direct() [all …]
|
/linux-4.4.14/fs/btrfs/ |
D | extent-tree.c | 8034 struct walk_control *wc, in reada_walk_down() argument 8049 if (path->slots[wc->level] < wc->reada_slot) { in reada_walk_down() 8050 wc->reada_count = wc->reada_count * 2 / 3; in reada_walk_down() 8051 wc->reada_count = max(wc->reada_count, 2); in reada_walk_down() 8053 wc->reada_count = wc->reada_count * 3 / 2; in reada_walk_down() 8054 wc->reada_count = min_t(int, wc->reada_count, in reada_walk_down() 8058 eb = path->nodes[wc->level]; in reada_walk_down() 8062 for (slot = path->slots[wc->level]; slot < nritems; slot++) { in reada_walk_down() 8063 if (nread >= wc->reada_count) in reada_walk_down() 8070 if (slot == path->slots[wc->level]) in reada_walk_down() [all …]
|
D | tree-log.c | 283 struct walk_control *wc, u64 gen); 291 struct walk_control *wc, u64 gen) in process_one_buffer() argument 305 if (wc->pin) in process_one_buffer() 310 if (wc->pin && btrfs_header_level(eb) == 0) in process_one_buffer() 312 if (wc->write) in process_one_buffer() 314 if (wc->wait) in process_one_buffer() 2280 struct walk_control *wc, u64 gen) in replay_one_buffer() argument 2284 struct btrfs_root *root = wc->replay_dest; in replay_one_buffer() 2309 wc->stage == LOG_WALK_REPLAY_INODES) { in replay_one_buffer() 2315 ret = replay_xattr_deletes(wc->trans, root, log, in replay_one_buffer() [all …]
|
/linux-4.4.14/tools/lib/lockdep/ |
D | run_tests.sh | 9 if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then 21 if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
|
/linux-4.4.14/arch/s390/include/asm/ |
D | sfp-machine.h | 58 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ argument 64 _FP_FRAC_COPY_##wc(R,Y); \ 69 _FP_FRAC_COPY_##wc(R,X); \
|
/linux-4.4.14/net/9p/ |
D | trans_rdma.c | 347 struct ib_wc wc; in cq_comp_handler() local 350 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { in cq_comp_handler() 351 struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id; in cq_comp_handler() 355 handle_recv(client, rdma, c, wc.status, wc.byte_len); in cq_comp_handler() 360 handle_send(client, rdma, c, wc.status, wc.byte_len); in cq_comp_handler() 366 c->wc_op, wc.opcode, wc.status); in cq_comp_handler()
|
/linux-4.4.14/include/uapi/linux/netfilter/ |
D | xt_osf.h | 53 __u32 wc; member 63 struct xt_osf_wc wc; member
|
/linux-4.4.14/fs/ocfs2/cluster/ |
D | heartbeat.c | 348 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc) in o2hb_bio_wait_init() argument 350 atomic_set(&wc->wc_num_reqs, 1); in o2hb_bio_wait_init() 351 init_completion(&wc->wc_io_complete); in o2hb_bio_wait_init() 352 wc->wc_error = 0; in o2hb_bio_wait_init() 356 static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc, in o2hb_bio_wait_dec() argument 362 if (atomic_dec_and_test(&wc->wc_num_reqs)) { in o2hb_bio_wait_dec() 364 complete(&wc->wc_io_complete); in o2hb_bio_wait_dec() 370 struct o2hb_bio_wait_ctxt *wc) in o2hb_wait_on_io() argument 372 o2hb_bio_wait_dec(wc, 1); in o2hb_wait_on_io() 373 wait_for_completion(&wc->wc_io_complete); in o2hb_wait_on_io() [all …]
|
/linux-4.4.14/arch/alpha/include/asm/ |
D | sfp-machine.h | 55 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ argument 58 _FP_FRAC_COPY_##wc(R,X); \
|
/linux-4.4.14/scripts/ |
D | decodecode | 95 faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ 96 $(wc -l $T.aa | cut -d" " -f1) + 3))
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | sfp-machine.h | 144 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ argument 150 _FP_FRAC_COPY_##wc(R,X); \ 155 _FP_FRAC_COPY_##wc(R,Y); \
|
/linux-4.4.14/arch/powerpc/ |
D | relocs_check.sh | 51 num_bad=$(echo "$bad_relocs" | wc -l)
|
/linux-4.4.14/tools/testing/selftests/rcutorture/bin/ |
D | cpus2use.sh | 25 ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
|
D | parse-torture.sh | 99 …echo $title no success message, `grep --binary-files=text 'ver:' $file | wc -l` successful version…
|
/linux-4.4.14/Documentation/x86/ |
D | pat.txt | 156 set_memory_[uc|wc|wt] and set_memory_wb should be used in pairs, where driver 157 will first make a region uc, wc or wt and switch it back to wb after use. 162 Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access 165 Drivers should use set_memory_[uc|wc|wt] to set access type for RAM ranges.
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
D | mkcapflags.sh | 16 PFX_SZ=$(echo $PFX | wc -c)
|
/linux-4.4.14/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 466 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, in srpt_mad_recv_handler() 473 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, in srpt_mad_recv_handler() 474 mad_wc->wc->pkey_index, 0, in srpt_mad_recv_handler() 1874 struct ib_wc *wc) in srpt_process_rcv_completion() argument 1880 index = idx_from_wr_id(wc->wr_id); in srpt_process_rcv_completion() 1881 if (wc->status == IB_WC_SUCCESS) { in srpt_process_rcv_completion() 1891 index, wc->status); in srpt_process_rcv_completion() 1912 struct ib_wc *wc) in srpt_process_send_completion() argument 1918 index = idx_from_wr_id(wc->wr_id); in srpt_process_send_completion() 1919 opcode = opcode_from_wr_id(wc->wr_id); in srpt_process_send_completion() [all …]
|
D | ib_srpt.h | 320 struct ib_wc wc[16]; member
|
/linux-4.4.14/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 2048 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) in isert_cq_comp_err() argument 2050 if (wc->wr_id == ISER_BEACON_WRID) { in isert_cq_comp_err() 2054 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { in isert_cq_comp_err() 2059 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; in isert_cq_comp_err() 2069 isert_handle_wc(struct ib_wc *wc) in isert_handle_wc() argument 2075 isert_conn = wc->qp->qp_context; in isert_handle_wc() 2076 if (likely(wc->status == IB_WC_SUCCESS)) { in isert_handle_wc() 2077 if (wc->opcode == IB_WC_RECV) { in isert_handle_wc() 2078 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; in isert_handle_wc() 2079 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len); in isert_handle_wc() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.h | 87 struct ib_wc *wc);
|
D | usnic_ib_verbs.c | 770 struct ib_wc *wc) in usnic_ib_poll_cq() argument
|
/linux-4.4.14/include/rdma/ |
D | ib_mad.h | 526 struct ib_wc *wc; member 713 struct ib_wc *wc);
|
D | ib_verbs.h | 1719 struct ib_wc *wc); 2254 const struct ib_wc *wc, const struct ib_grh *grh, 2269 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2513 struct ib_wc *wc) in ib_poll_cq() argument 2515 return cq->device->poll_cq(cq, num_entries, wc); in ib_poll_cq()
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.h | 51 int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
|
D | ocrdma_verbs.c | 2951 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in ocrdma_poll_cq() argument 2962 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); in ocrdma_poll_cq() 2967 wc = wc + num_os_cqe; in ocrdma_poll_cq() 2977 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); in ocrdma_poll_cq() 2980 wc = wc + err_cqes; in ocrdma_poll_cq()
|
/linux-4.4.14/net/netfilter/ |
D | xt_osf.c | 244 if (f->wss.wc >= OSF_WSS_MAX) in xt_osf_match_packet() 258 check_WSS = f->wss.wc; in xt_osf_match_packet()
|
/linux-4.4.14/arch/arm/mach-ixp4xx/ |
D | ixp4xx_npe.c | 219 u32 wc; in npe_debug_instr() local 243 wc = __raw_readl(&npe->regs->watch_count); in npe_debug_instr() 250 if (wc != __raw_readl(&npe->regs->watch_count)) in npe_debug_instr()
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 1905 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) in srp_handle_recv() argument 1909 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; in srp_handle_recv() 1922 iu->buf, wc->byte_len, true); in srp_handle_recv() 2009 struct ib_wc wc; in srp_recv_completion() local 2012 while (ib_poll_cq(cq, 1, &wc) > 0) { in srp_recv_completion() 2013 if (likely(wc.status == IB_WC_SUCCESS)) { in srp_recv_completion() 2014 srp_handle_recv(ch, &wc); in srp_recv_completion() 2016 srp_handle_qp_err(wc.wr_id, wc.status, false, ch); in srp_recv_completion() 2024 struct ib_wc wc; in srp_send_completion() local 2027 while (ib_poll_cq(cq, 1, &wc) > 0) { in srp_send_completion() [all …]
|
/linux-4.4.14/tools/testing/selftests/zram/ |
D | zram_lib.sh | 74 dev_num_created=$(ls /dev/zram* | wc -w)
|
/linux-4.4.14/tools/vm/ |
D | slabinfo-gnuplot.sh | 74 wc_lines=`cat "$file" | wc -l`
|
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd_cb.c | 3202 kiblnd_complete(struct ib_wc *wc) in kiblnd_complete() argument 3204 switch (kiblnd_wreqid2type(wc->wr_id)) { in kiblnd_complete() 3216 kiblnd_wreqid2ptr(wc->wr_id), wc->status); in kiblnd_complete() 3220 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status); in kiblnd_complete() 3224 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status, in kiblnd_complete() 3225 wc->byte_len); in kiblnd_complete() 3279 struct ib_wc wc; in kiblnd_scheduler() local 3320 rc = ib_poll_cq(conn->ibc_cq, 1, &wc); in kiblnd_scheduler() 3334 rc = ib_poll_cq(conn->ibc_cq, 1, &wc); in kiblnd_scheduler() 3365 kiblnd_complete(&wc); in kiblnd_scheduler()
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | stih415-clock.dtsi | 437 "", /* clk-m-mpestac-wc */ 451 clock-output-names = "", /* clk-m-mpevtacrx0-wc */ 452 "", /* clk-m-mpevtacrx1-wc */
|
D | stih416-clock.dtsi | 439 "", /* clk-m-mpestac-wc */ 453 clock-output-names = "", /* clk-m-mpevtacrx0-wc */ 454 "", /* clk-m-mpevtacrx1-wc */
|
/linux-4.4.14/include/xen/interface/ |
D | xen.h | 583 struct pvclock_wall_clock wc; member
|
/linux-4.4.14/arch/arm/mm/ |
D | proc-arm740.S | 57 bic ip, ip, #0x0000000c @ ............wc..
|
/linux-4.4.14/drivers/isdn/hardware/mISDN/ |
D | hfcmulti.c | 746 vpm_init(struct hfc_multi *wc) in vpm_init() argument 756 ver = vpm_in(wc, x, 0x1a0); in vpm_init() 761 vpm_out(wc, x, 0x1a8 + y, 0x00); /* GPIO out */ in vpm_init() 762 vpm_out(wc, x, 0x1ac + y, 0x00); /* GPIO dir */ in vpm_init() 763 vpm_out(wc, x, 0x1b0 + y, 0x00); /* GPIO sel */ in vpm_init() 767 reg = vpm_in(wc, x, 0x1a3); /* misc_con */ in vpm_init() 768 vpm_out(wc, x, 0x1a3, reg & ~2); in vpm_init() 771 vpm_out(wc, x, 0x022, 1); in vpm_init() 772 vpm_out(wc, x, 0x023, 0xff); in vpm_init() 775 vpm_out(wc, x, 0x02f, 0x00); in vpm_init() [all …]
|
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/ |
D | lio_ethtool.c | 45 wait_queue_head_t wc; member 271 wake_up_interruptible(&mdio_cmd_ctx->wc); in octnet_mdio_resp_callback() 316 init_waitqueue_head(&mdio_cmd_ctx->wc); in octnet_mdio45_access() 329 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); in octnet_mdio45_access()
|
D | lio_main.c | 102 wait_queue_head_t wc; member 1736 wake_up_interruptible(&ctx->wc); in if_cfg_callback() 3181 init_waitqueue_head(&ctx->wc); in setup_nic_devices() 3208 sleep_cond(&ctx->wc, &ctx->cond); in setup_nic_devices()
|
/linux-4.4.14/Documentation/DocBook/media/ |
D | bayer.png.b64 | 61 kauYEP0bslw4c9MD9xPIA9d/5wc/JJH+uWUlaunL6Di3P1GzPxhMaMfV920N0q8qcvVO27/34/80
|
/linux-4.4.14/arch/x86/xen/ |
D | time.c | 175 struct pvclock_wall_clock *wall_clock = &(s->wc); in xen_read_wallclock()
|
/linux-4.4.14/kernel/debug/kdb/ |
D | kdb_main.c | 1518 } wc; in kdb_md_line() local 1521 cp = wc.c + 8 - bytesperword; in kdb_md_line() 1523 cp = wc.c; in kdb_md_line() 1525 wc.word = word; in kdb_md_line()
|
/linux-4.4.14/fs/fat/ |
D | dir.c | 210 wchar_t wc; in fat_short2lower_uni() local 212 charlen = t->char2uni(c, clen, &wc); in fat_short2lower_uni() 228 *uni = wc; in fat_short2lower_uni()
|
/linux-4.4.14/arch/arm/mach-sa1100/ |
D | Kconfig | 96 <http://h10025.www1.hp.com/ewfrf/wc/product?product=61677&cc=us&lc=en&dlc=en&product=61677#>
|
/linux-4.4.14/Documentation/ |
D | dynamic-debug-howto.txt | 72 nullarbor:~ # grep -i rdma <debugfs>/dynamic_debug/control | wc -l 75 nullarbor:~ # grep -i tcp <debugfs>/dynamic_debug/control | wc -l
|
/linux-4.4.14/drivers/gpu/drm/msm/dsi/ |
D | dsi_host.c | 722 u32 wc; in dsi_timing_setup() local 746 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1; in dsi_timing_setup() 749 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) | in dsi_timing_setup()
|
/linux-4.4.14/drivers/scsi/qla2xxx/ |
D | qla_gs.c | 718 uint16_t wc; in qla2x00_prep_sns_cmd() local 724 wc = data_size / 2; /* Size in 16bit words. */ in qla2x00_prep_sns_cmd() 725 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc); in qla2x00_prep_sns_cmd() 730 wc = (data_size - 16) / 4; /* Size in 32bit words. */ in qla2x00_prep_sns_cmd() 731 sns_cmd->p.cmd.size = cpu_to_le16(wc); in qla2x00_prep_sns_cmd()
|
/linux-4.4.14/include/uapi/rdma/ |
D | ib_user_verbs.h | 424 struct ib_uverbs_wc wc[0]; member
|
/linux-4.4.14/arch/x86/kvm/ |
D | x86.c | 1154 struct pvclock_wall_clock wc; in kvm_write_wall_clock() local 1183 wc.sec = boot.tv_sec; in kvm_write_wall_clock() 1184 wc.nsec = boot.tv_nsec; in kvm_write_wall_clock() 1185 wc.version = version; in kvm_write_wall_clock() 1187 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); in kvm_write_wall_clock()
|
/linux-4.4.14/ |
D | Makefile | 1000 if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \
|
/linux-4.4.14/drivers/video/fbdev/omap2/dss/ |
D | dsi.c | 4341 int bl, wc, pps, tot; in print_dsi_vm() local 4343 wc = DIV_ROUND_UP(t->hact * t->bitspp, 8); in print_dsi_vm() 4344 pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */ in print_dsi_vm()
|