rhp 84 drivers/infiniband/hw/cxgb3/iwch.c ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, rhp 126 drivers/infiniband/hw/cxgb3/iwch.h static inline int t3b_device(const struct iwch_dev *rhp) rhp 128 drivers/infiniband/hw/cxgb3/iwch.h return rhp->rdev.t3cdev_p->type == T3B; rhp 131 drivers/infiniband/hw/cxgb3/iwch.h static inline int t3a_device(const struct iwch_dev *rhp) rhp 133 drivers/infiniband/hw/cxgb3/iwch.h return rhp->rdev.t3cdev_p->type == T3A; rhp 136 drivers/infiniband/hw/cxgb3/iwch.h static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid) rhp 138 drivers/infiniband/hw/cxgb3/iwch.h return xa_load(&rhp->cqs, cqid); rhp 141 drivers/infiniband/hw/cxgb3/iwch.h static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid) rhp 143 drivers/infiniband/hw/cxgb3/iwch.h return xa_load(&rhp->qps, qpid); rhp 146 drivers/infiniband/hw/cxgb3/iwch.h static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid) rhp 148 drivers/infiniband/hw/cxgb3/iwch.h return xa_load(&rhp->mrs, mmid); rhp 921 drivers/infiniband/hw/cxgb3/iwch_cm.c err = iwch_modify_qp(ep->com.qp->rhp, rhp 1473 drivers/infiniband/hw/cxgb3/iwch_cm.c iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 1488 drivers/infiniband/hw/cxgb3/iwch_cm.c iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 1583 drivers/infiniband/hw/cxgb3/iwch_cm.c ret = iwch_modify_qp(ep->com.qp->rhp, rhp 1647 drivers/infiniband/hw/cxgb3/iwch_cm.c iwch_modify_qp(ep->com.qp->rhp, rhp 1712 drivers/infiniband/hw/cxgb3/iwch_cm.c iwch_modify_qp(ep->com.qp->rhp, rhp 1742 drivers/infiniband/hw/cxgb3/iwch_cm.c iwch_modify_qp(ep->com.qp->rhp, rhp 1798 drivers/infiniband/hw/cxgb3/iwch_cm.c if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || rhp 1799 drivers/infiniband/hw/cxgb3/iwch_cm.c (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { rhp 1831 drivers/infiniband/hw/cxgb3/iwch_cm.c err = iwch_modify_qp(ep->com.qp->rhp, rhp 35 drivers/infiniband/hw/cxgb3/iwch_cq.c static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp, rhp 47 drivers/infiniband/hw/cxgb3/iwch_cq.c if (t3a_device(chp->rhp) && credit) { rhp 50 drivers/infiniband/hw/cxgb3/iwch_cq.c cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); rhp 175 drivers/infiniband/hw/cxgb3/iwch_cq.c static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp, rhp 187 drivers/infiniband/hw/cxgb3/iwch_cq.c qhp = get_qhp(rhp, CQE_QPID(*rd_cqe)); rhp 190 drivers/infiniband/hw/cxgb3/iwch_cq.c ret = __iwch_poll_cq_one(rhp, chp, qhp, wc); rhp 193 drivers/infiniband/hw/cxgb3/iwch_cq.c ret = __iwch_poll_cq_one(rhp, chp, NULL, wc); rhp 200 drivers/infiniband/hw/cxgb3/iwch_cq.c struct iwch_dev *rhp; rhp 207 drivers/infiniband/hw/cxgb3/iwch_cq.c rhp = chp->rhp; rhp 218 drivers/infiniband/hw/cxgb3/iwch_cq.c err = iwch_poll_cq_one(rhp, chp, wc + npolled); rhp 83 drivers/infiniband/hw/cxgb3/iwch_ev.c iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, rhp 52 drivers/infiniband/hw/cxgb3/iwch_mem.c return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); rhp 55 drivers/infiniband/hw/cxgb3/iwch_mem.c int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, rhp 61 drivers/infiniband/hw/cxgb3/iwch_mem.c if (cxio_register_phys_mem(&rhp->rdev, rhp 73 drivers/infiniband/hw/cxgb3/iwch_mem.c cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, rhp 80 drivers/infiniband/hw/cxgb3/iwch_mem.c mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev, rhp 93 drivers/infiniband/hw/cxgb3/iwch_mem.c cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, rhp 99 drivers/infiniband/hw/cxgb3/iwch_mem.c return cxio_write_pbl(&mhp->rhp->rdev, pages, rhp 67 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp = to_iwch_dev(context->device); rhp 74 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); rhp 82 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp = to_iwch_dev(ibdev); rhp 85 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_init_ucontext(&rhp->rdev, &context->uctx); rhp 98 drivers/infiniband/hw/cxgb3/iwch_provider.c xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); rhp 102 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); rhp 111 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp = to_iwch_dev(ibcq->device); rhp 123 drivers/infiniband/hw/cxgb3/iwch_provider.c if (!t3a_device(rhp)) { rhp 131 drivers/infiniband/hw/cxgb3/iwch_provider.c if (t3a_device(rhp)) { rhp 147 drivers/infiniband/hw/cxgb3/iwch_provider.c if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) rhp 150 drivers/infiniband/hw/cxgb3/iwch_provider.c chp->rhp = rhp; rhp 156 drivers/infiniband/hw/cxgb3/iwch_provider.c if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) { rhp 157 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); rhp 207 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 215 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = chp->rhp; rhp 228 drivers/infiniband/hw/cxgb3/iwch_provider.c err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); rhp 295 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 299 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = php->rhp; rhp 301 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); rhp 309 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 312 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = (struct iwch_dev *) ibdev; rhp 313 drivers/infiniband/hw/cxgb3/iwch_provider.c pdid = cxio_hal_get_pdid(rhp->rdev.rscp); rhp 318 drivers/infiniband/hw/cxgb3/iwch_provider.c php->rhp = rhp; rhp 333 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 341 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = mhp->rhp; rhp 343 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, rhp 346 drivers/infiniband/hw/cxgb3/iwch_provider.c xa_erase_irq(&rhp->mrs, mmid); rhp 360 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp = php->rhp; rhp 379 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->rhp = rhp; rhp 419 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = iwch_register_mem(rhp, php, mhp, shift); rhp 439 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 447 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = php->rhp; rhp 452 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->rhp = rhp; rhp 503 drivers/infiniband/hw/cxgb3/iwch_provider.c err = iwch_register_mem(rhp, php, mhp, shift); rhp 507 drivers/infiniband/hw/cxgb3/iwch_provider.c if (udata && !t3a_device(rhp)) { rhp 509 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp->rdev.rnic_info.pbl_base) >> 3; rhp 534 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 545 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = php->rhp; rhp 549 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); rhp 554 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->rhp = rhp; rhp 560 drivers/infiniband/hw/cxgb3/iwch_provider.c if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { rhp 561 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); rhp 571 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 576 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = mhp->rhp; rhp 578 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); rhp 579 drivers/infiniband/hw/cxgb3/iwch_provider.c xa_erase_irq(&rhp->mrs, mmid); rhp 588 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 600 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = php->rhp; rhp 609 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->rhp = rhp; rhp 614 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, rhp 624 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL); rhp 631 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, rhp 667 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 673 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = qhp->rhp; rhp 676 drivers/infiniband/hw/cxgb3/iwch_provider.c iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); rhp 679 drivers/infiniband/hw/cxgb3/iwch_provider.c xa_erase_irq(&rhp->qps, qhp->wq.qpid); rhp 686 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_qp(&rhp->rdev, &qhp->wq, rhp 687 drivers/infiniband/hw/cxgb3/iwch_provider.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx); rhp 699 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 712 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = php->rhp; rhp 713 drivers/infiniband/hw/cxgb3/iwch_provider.c schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); rhp 714 drivers/infiniband/hw/cxgb3/iwch_provider.c rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); rhp 758 drivers/infiniband/hw/cxgb3/iwch_provider.c if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, rhp 759 drivers/infiniband/hw/cxgb3/iwch_provider.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { rhp 768 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->rhp = rhp; rhp 795 drivers/infiniband/hw/cxgb3/iwch_provider.c if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) { rhp 796 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_qp(&rhp->rdev, &qhp->wq, rhp 797 drivers/infiniband/hw/cxgb3/iwch_provider.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx); rhp 856 drivers/infiniband/hw/cxgb3/iwch_provider.c struct iwch_dev *rhp; rhp 872 drivers/infiniband/hw/cxgb3/iwch_provider.c rhp = qhp->rhp; rhp 888 drivers/infiniband/hw/cxgb3/iwch_provider.c return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); rhp 47 drivers/infiniband/hw/cxgb3/iwch_provider.h struct iwch_dev *rhp; rhp 77 drivers/infiniband/hw/cxgb3/iwch_provider.h struct iwch_dev *rhp; rhp 93 drivers/infiniband/hw/cxgb3/iwch_provider.h struct iwch_dev *rhp; rhp 105 drivers/infiniband/hw/cxgb3/iwch_provider.h struct iwch_dev *rhp; rhp 163 drivers/infiniband/hw/cxgb3/iwch_provider.h struct iwch_dev *rhp; rhp 260 drivers/infiniband/hw/cxgb3/iwch_provider.h int iwch_modify_qp(struct iwch_dev *rhp, rhp 339 drivers/infiniband/hw/cxgb3/iwch_provider.h int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, rhp 201 drivers/infiniband/hw/cxgb3/iwch_qp.c static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, rhp 209 drivers/infiniband/hw/cxgb3/iwch_qp.c mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); rhp 241 drivers/infiniband/hw/cxgb3/iwch_qp.c rhp->rdev.rnic_info.pbl_base) >> 3) + rhp 255 drivers/infiniband/hw/cxgb3/iwch_qp.c err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr, rhp 302 drivers/infiniband/hw/cxgb3/iwch_qp.c pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE); rhp 309 drivers/infiniband/hw/cxgb3/iwch_qp.c pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3; rhp 686 drivers/infiniband/hw/cxgb3/iwch_qp.c return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); rhp 717 drivers/infiniband/hw/cxgb3/iwch_qp.c return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); rhp 777 drivers/infiniband/hw/cxgb3/iwch_qp.c rchp = get_chp(qhp->rhp, qhp->attr.rcq); rhp 778 drivers/infiniband/hw/cxgb3/iwch_qp.c schp = get_chp(qhp->rhp, qhp->attr.scq); rhp 815 drivers/infiniband/hw/cxgb3/iwch_qp.c static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, rhp 862 drivers/infiniband/hw/cxgb3/iwch_qp.c ret = cxio_rdma_init(&rhp->rdev, &init_attr); rhp 867 drivers/infiniband/hw/cxgb3/iwch_qp.c int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, rhp 901 drivers/infiniband/hw/cxgb3/iwch_qp.c rhp->attr.max_rdma_read_qp_depth) { rhp 909 drivers/infiniband/hw/cxgb3/iwch_qp.c rhp->attr.max_rdma_reads_per_qp) { rhp 948 drivers/infiniband/hw/cxgb3/iwch_qp.c ret = rdma_init(rhp, qhp, mask, attrs); rhp 1643 drivers/infiniband/hw/cxgb4/cm.c err = c4iw_modify_qp(ep->com.qp->rhp, rhp 1658 drivers/infiniband/hw/cxgb4/cm.c err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 1677 drivers/infiniband/hw/cxgb4/cm.c err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 1882 drivers/infiniband/hw/cxgb4/cm.c c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 2756 drivers/infiniband/hw/cxgb4/cm.c ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 2774 drivers/infiniband/hw/cxgb4/cm.c c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 2804 drivers/infiniband/hw/cxgb4/cm.c c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 2908 drivers/infiniband/hw/cxgb4/cm.c ret = c4iw_modify_qp(ep->com.qp->rhp, rhp 2997 drivers/infiniband/hw/cxgb4/cm.c c4iw_modify_qp(ep->com.qp->rhp, rhp 3034 drivers/infiniband/hw/cxgb4/cm.c c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rhp 3203 drivers/infiniband/hw/cxgb4/cm.c err = c4iw_modify_qp(ep->com.qp->rhp, rhp 3726 drivers/infiniband/hw/cxgb4/cm.c ret = c4iw_modify_qp(ep->com.qp->rhp, rhp 4243 drivers/infiniband/hw/cxgb4/cm.c c4iw_modify_qp(ep->com.qp->rhp, rhp 351 drivers/infiniband/hw/cxgb4/cq.c qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); rhp 803 drivers/infiniband/hw/cxgb4/cq.c c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); rhp 844 drivers/infiniband/hw/cxgb4/cq.c c4iw_invalidate_mr(qhp->rhp, rhp 933 drivers/infiniband/hw/cxgb4/cq.c qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); rhp 978 drivers/infiniband/hw/cxgb4/cq.c xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); rhp 984 drivers/infiniband/hw/cxgb4/cq.c destroy_cq(&chp->rhp->rdev, &chp->cq, rhp 996 drivers/infiniband/hw/cxgb4/cq.c struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device); rhp 1010 drivers/infiniband/hw/cxgb4/cq.c if (vector >= rhp->rdev.lldi.nciq) rhp 1046 drivers/infiniband/hw/cxgb4/cq.c hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); rhp 1068 drivers/infiniband/hw/cxgb4/cq.c ret = create_cq(&rhp->rdev, &chp->cq, rhp 1069 drivers/infiniband/hw/cxgb4/cq.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx, rhp 1074 drivers/infiniband/hw/cxgb4/cq.c chp->rhp = rhp; rhp 1081 drivers/infiniband/hw/cxgb4/cq.c ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); rhp 1095 drivers/infiniband/hw/cxgb4/cq.c uresp.qid_mask = rhp->rdev.cqmask; rhp 1137 drivers/infiniband/hw/cxgb4/cq.c xa_erase_irq(&rhp->cqs, chp->cq.cqid); rhp 1139 drivers/infiniband/hw/cxgb4/cq.c destroy_cq(&chp->rhp->rdev, &chp->cq, rhp 1140 drivers/infiniband/hw/cxgb4/cq.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx, rhp 1375 drivers/infiniband/hw/cxgb4/device.c xa_lock_irq(&qp->rhp->qps); rhp 1377 drivers/infiniband/hw/cxgb4/device.c ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], rhp 1385 drivers/infiniband/hw/cxgb4/device.c xa_unlock_irq(&qp->rhp->qps); rhp 1390 drivers/infiniband/hw/cxgb4/device.c ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], rhp 1399 drivers/infiniband/hw/cxgb4/device.c xa_unlock_irq(&qp->rhp->qps); rhp 1404 drivers/infiniband/hw/cxgb4/device.c xa_unlock_irq(&qp->rhp->qps); rhp 1407 drivers/infiniband/hw/cxgb4/device.c while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { rhp 100 drivers/infiniband/hw/cxgb4/ev.c c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, rhp 349 drivers/infiniband/hw/cxgb4/iw_cxgb4.h static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) rhp 351 drivers/infiniband/hw/cxgb4/iw_cxgb4.h return xa_load(&rhp->cqs, cqid); rhp 354 drivers/infiniband/hw/cxgb4/iw_cxgb4.h static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) rhp 356 drivers/infiniband/hw/cxgb4/iw_cxgb4.h return xa_load(&rhp->qps, qpid); rhp 369 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_dev *rhp; rhp 398 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_dev *rhp; rhp 416 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_dev *rhp; rhp 430 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_dev *rhp; rhp 487 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_dev *rhp; rhp 510 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_dev *rhp; rhp 603 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_modify_qp(struct c4iw_dev *rhp, rhp 1048 drivers/infiniband/hw/cxgb4/iw_cxgb4.h void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); rhp 405 drivers/infiniband/hw/cxgb4/mem.c return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); rhp 408 drivers/infiniband/hw/cxgb4/mem.c static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, rhp 414 drivers/infiniband/hw/cxgb4/mem.c ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, rhp 427 drivers/infiniband/hw/cxgb4/mem.c dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, rhp 436 drivers/infiniband/hw/cxgb4/mem.c mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, rhp 449 drivers/infiniband/hw/cxgb4/mem.c struct c4iw_dev *rhp; rhp 457 drivers/infiniband/hw/cxgb4/mem.c rhp = php->rhp; rhp 475 drivers/infiniband/hw/cxgb4/mem.c mhp->rhp = rhp; rhp 485 drivers/infiniband/hw/cxgb4/mem.c ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, rhp 497 drivers/infiniband/hw/cxgb4/mem.c dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, rhp 515 drivers/infiniband/hw/cxgb4/mem.c struct c4iw_dev *rhp; rhp 528 drivers/infiniband/hw/cxgb4/mem.c rhp = php->rhp; rhp 530 drivers/infiniband/hw/cxgb4/mem.c if (mr_exceeds_hw_limits(rhp, length)) rhp 544 drivers/infiniband/hw/cxgb4/mem.c mhp->rhp = rhp; rhp 568 drivers/infiniband/hw/cxgb4/mem.c err = write_pbl(&mhp->rhp->rdev, pages, rhp 579 drivers/infiniband/hw/cxgb4/mem.c err = write_pbl(&mhp->rhp->rdev, pages, rhp 595 drivers/infiniband/hw/cxgb4/mem.c err = register_mem(rhp, php, mhp, shift); rhp 602 drivers/infiniband/hw/cxgb4/mem.c c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, rhp 618 drivers/infiniband/hw/cxgb4/mem.c struct c4iw_dev *rhp; rhp 629 drivers/infiniband/hw/cxgb4/mem.c rhp = php->rhp; rhp 646 drivers/infiniband/hw/cxgb4/mem.c ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp); rhp 649 drivers/infiniband/hw/cxgb4/mem.c mhp->rhp = rhp; rhp 655 drivers/infiniband/hw/cxgb4/mem.c if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { rhp 663 drivers/infiniband/hw/cxgb4/mem.c deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, rhp 676 drivers/infiniband/hw/cxgb4/mem.c struct c4iw_dev *rhp; rhp 681 drivers/infiniband/hw/cxgb4/mem.c rhp = mhp->rhp; rhp 683 drivers/infiniband/hw/cxgb4/mem.c xa_erase_irq(&rhp->mrs, mmid); rhp 684 drivers/infiniband/hw/cxgb4/mem.c deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, rhp 696 drivers/infiniband/hw/cxgb4/mem.c struct c4iw_dev *rhp; rhp 705 drivers/infiniband/hw/cxgb4/mem.c rhp = php->rhp; rhp 708 drivers/infiniband/hw/cxgb4/mem.c max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && rhp 725 drivers/infiniband/hw/cxgb4/mem.c mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, rhp 733 drivers/infiniband/hw/cxgb4/mem.c mhp->rhp = rhp; rhp 738 drivers/infiniband/hw/cxgb4/mem.c ret = allocate_stag(&rhp->rdev, &stag, php->pdid, rhp 749 drivers/infiniband/hw/cxgb4/mem.c if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { rhp 757 drivers/infiniband/hw/cxgb4/mem.c dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, rhp 760 drivers/infiniband/hw/cxgb4/mem.c c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, rhp 763 drivers/infiniband/hw/cxgb4/mem.c dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, rhp 797 drivers/infiniband/hw/cxgb4/mem.c struct c4iw_dev *rhp; rhp 804 drivers/infiniband/hw/cxgb4/mem.c rhp = mhp->rhp; rhp 806 drivers/infiniband/hw/cxgb4/mem.c xa_erase_irq(&rhp->mrs, mmid); rhp 808 drivers/infiniband/hw/cxgb4/mem.c dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, rhp 810 drivers/infiniband/hw/cxgb4/mem.c dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, rhp 813 drivers/infiniband/hw/cxgb4/mem.c c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, rhp 824 drivers/infiniband/hw/cxgb4/mem.c void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) rhp 829 drivers/infiniband/hw/cxgb4/mem.c xa_lock_irqsave(&rhp->mrs, flags); rhp 830 drivers/infiniband/hw/cxgb4/mem.c mhp = xa_load(&rhp->mrs, rkey >> 8); rhp 833 drivers/infiniband/hw/cxgb4/mem.c xa_unlock_irqrestore(&rhp->mrs, flags); rhp 64 drivers/infiniband/hw/cxgb4/provider.c struct c4iw_dev *rhp; rhp 68 drivers/infiniband/hw/cxgb4/provider.c rhp = to_c4iw_dev(ucontext->ibucontext.device); rhp 72 drivers/infiniband/hw/cxgb4/provider.c c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); rhp 80 drivers/infiniband/hw/cxgb4/provider.c struct c4iw_dev *rhp = to_c4iw_dev(ibdev); rhp 86 drivers/infiniband/hw/cxgb4/provider.c c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); rhp 92 drivers/infiniband/hw/cxgb4/provider.c rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; rhp 113 drivers/infiniband/hw/cxgb4/provider.c mm->addr = virt_to_phys(rhp->rdev.status_page); rhp 195 drivers/infiniband/hw/cxgb4/provider.c struct c4iw_dev *rhp; rhp 199 drivers/infiniband/hw/cxgb4/provider.c rhp = php->rhp; rhp 201 drivers/infiniband/hw/cxgb4/provider.c c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); rhp 202 drivers/infiniband/hw/cxgb4/provider.c mutex_lock(&rhp->rdev.stats.lock); rhp 203 drivers/infiniband/hw/cxgb4/provider.c rhp->rdev.stats.pd.cur--; rhp 204 drivers/infiniband/hw/cxgb4/provider.c mutex_unlock(&rhp->rdev.stats.lock); rhp 212 drivers/infiniband/hw/cxgb4/provider.c struct c4iw_dev *rhp; rhp 215 drivers/infiniband/hw/cxgb4/provider.c rhp = (struct c4iw_dev *) ibdev; rhp 216 drivers/infiniband/hw/cxgb4/provider.c pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); rhp 221 drivers/infiniband/hw/cxgb4/provider.c php->rhp = rhp; rhp 230 drivers/infiniband/hw/cxgb4/provider.c mutex_lock(&rhp->rdev.stats.lock); rhp 231 drivers/infiniband/hw/cxgb4/provider.c rhp->rdev.stats.pd.cur++; rhp 232 drivers/infiniband/hw/cxgb4/provider.c if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) rhp 233 drivers/infiniband/hw/cxgb4/provider.c rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; rhp 234 drivers/infiniband/hw/cxgb4/provider.c mutex_unlock(&rhp->rdev.stats.lock); rhp 719 drivers/infiniband/hw/cxgb4/qp.c cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); rhp 743 drivers/infiniband/hw/cxgb4/qp.c cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); rhp 806 drivers/infiniband/hw/cxgb4/qp.c PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); rhp 915 drivers/infiniband/hw/cxgb4/qp.c xa_lock_irqsave(&qhp->rhp->qps, flags); rhp 917 drivers/infiniband/hw/cxgb4/qp.c if (qhp->rhp->db_state == NORMAL) rhp 920 drivers/infiniband/hw/cxgb4/qp.c add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); rhp 924 drivers/infiniband/hw/cxgb4/qp.c xa_unlock_irqrestore(&qhp->rhp->qps, flags); rhp 932 drivers/infiniband/hw/cxgb4/qp.c xa_lock_irqsave(&qhp->rhp->qps, flags); rhp 934 drivers/infiniband/hw/cxgb4/qp.c if (qhp->rhp->db_state == NORMAL) rhp 937 drivers/infiniband/hw/cxgb4/qp.c add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); rhp 941 drivers/infiniband/hw/cxgb4/qp.c xa_unlock_irqrestore(&qhp->rhp->qps, flags); rhp 1084 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_dev *rhp; rhp 1092 drivers/infiniband/hw/cxgb4/qp.c rhp = qhp->rhp; rhp 1120 drivers/infiniband/hw/cxgb4/qp.c if (qhp->rhp->rdev.lldi.write_cmpl_support && rhp 1121 drivers/infiniband/hw/cxgb4/qp.c CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >= rhp 1163 drivers/infiniband/hw/cxgb4/qp.c if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) { rhp 1179 drivers/infiniband/hw/cxgb4/qp.c c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey); rhp 1195 drivers/infiniband/hw/cxgb4/qp.c if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support && rhp 1204 drivers/infiniband/hw/cxgb4/qp.c rhp->rdev.lldi.ulptx_memwrite_dsgl); rhp 1217 drivers/infiniband/hw/cxgb4/qp.c c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey); rhp 1236 drivers/infiniband/hw/cxgb4/qp.c rhp->rdev.lldi.ports[0]); rhp 1250 drivers/infiniband/hw/cxgb4/qp.c if (!rhp->rdev.status_page->db_off) { rhp 1311 drivers/infiniband/hw/cxgb4/qp.c qhp->rhp->rdev.lldi.ports[0]); rhp 1330 drivers/infiniband/hw/cxgb4/qp.c if (!qhp->rhp->rdev.status_page->db_off) { rhp 1589 drivers/infiniband/hw/cxgb4/qp.c c4iw_ofld_send(&qhp->rhp->rdev, skb); rhp 1691 drivers/infiniband/hw/cxgb4/qp.c static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, rhp 1717 drivers/infiniband/hw/cxgb4/qp.c ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp, rhp 1748 drivers/infiniband/hw/cxgb4/qp.c static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) rhp 1762 drivers/infiniband/hw/cxgb4/qp.c ret = alloc_ird(rhp, qhp->attr.max_ird); rhp 1809 drivers/infiniband/hw/cxgb4/qp.c rhp->rdev.lldi.vr->rq.start); rhp 1820 drivers/infiniband/hw/cxgb4/qp.c ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp, rhp 1825 drivers/infiniband/hw/cxgb4/qp.c free_ird(rhp, qhp->attr.max_ird); rhp 1831 drivers/infiniband/hw/cxgb4/qp.c int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, rhp 1870 drivers/infiniband/hw/cxgb4/qp.c if (attrs->max_ird > cur_max_read_depth(rhp)) { rhp 1917 drivers/infiniband/hw/cxgb4/qp.c ret = rdma_init(rhp, qhp); rhp 1941 drivers/infiniband/hw/cxgb4/qp.c ret = rdma_fini(rhp, qhp, ep); rhp 1957 drivers/infiniband/hw/cxgb4/qp.c ret = rdma_fini(rhp, qhp, ep); rhp 2072 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_dev *rhp; rhp 2078 drivers/infiniband/hw/cxgb4/qp.c rhp = qhp->rhp; rhp 2083 drivers/infiniband/hw/cxgb4/qp.c c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); rhp 2085 drivers/infiniband/hw/cxgb4/qp.c c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); rhp 2088 drivers/infiniband/hw/cxgb4/qp.c xa_lock_irq(&rhp->qps); rhp 2089 drivers/infiniband/hw/cxgb4/qp.c __xa_erase(&rhp->qps, qhp->wq.sq.qid); rhp 2092 drivers/infiniband/hw/cxgb4/qp.c xa_unlock_irq(&rhp->qps); rhp 2093 drivers/infiniband/hw/cxgb4/qp.c free_ird(rhp, qhp->attr.max_ird); rhp 2102 drivers/infiniband/hw/cxgb4/qp.c destroy_qp(&rhp->rdev, &qhp->wq, rhp 2103 drivers/infiniband/hw/cxgb4/qp.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); rhp 2114 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_dev *rhp; rhp 2133 drivers/infiniband/hw/cxgb4/qp.c rhp = php->rhp; rhp 2134 drivers/infiniband/hw/cxgb4/qp.c schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); rhp 2135 drivers/infiniband/hw/cxgb4/qp.c rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); rhp 2143 drivers/infiniband/hw/cxgb4/qp.c if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) rhp 2150 drivers/infiniband/hw/cxgb4/qp.c if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) rhp 2168 drivers/infiniband/hw/cxgb4/qp.c (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * rhp 2174 drivers/infiniband/hw/cxgb4/qp.c (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * rhp 2185 drivers/infiniband/hw/cxgb4/qp.c ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, rhp 2186 drivers/infiniband/hw/cxgb4/qp.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx, rhp 2195 drivers/infiniband/hw/cxgb4/qp.c qhp->rhp = rhp; rhp 2220 drivers/infiniband/hw/cxgb4/qp.c ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); rhp 2260 drivers/infiniband/hw/cxgb4/qp.c if (rhp->rdev.lldi.write_w_imm_support) rhp 2262 drivers/infiniband/hw/cxgb4/qp.c uresp.qid_mask = rhp->rdev.qpmask; rhp 2316 drivers/infiniband/hw/cxgb4/qp.c (pci_resource_start(rhp->rdev.lldi.pdev, 0) + rhp 2356 drivers/infiniband/hw/cxgb4/qp.c xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); rhp 2358 drivers/infiniband/hw/cxgb4/qp.c destroy_qp(&rhp->rdev, &qhp->wq, rhp 2359 drivers/infiniband/hw/cxgb4/qp.c ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); rhp 2370 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_dev *rhp; rhp 2386 drivers/infiniband/hw/cxgb4/qp.c rhp = qhp->rhp; rhp 2411 drivers/infiniband/hw/cxgb4/qp.c if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && rhp 2415 drivers/infiniband/hw/cxgb4/qp.c return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); rhp 2428 drivers/infiniband/hw/cxgb4/qp.c event.device = &srq->rhp->ibdev; rhp 2483 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_rdev *rdev = &srq->rhp->rdev; rhp 2520 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_rdev *rdev = &srq->rhp->rdev; rhp 2673 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_dev *rhp; rhp 2686 drivers/infiniband/hw/cxgb4/qp.c rhp = php->rhp; rhp 2688 drivers/infiniband/hw/cxgb4/qp.c if (!rhp->rdev.lldi.vr->srq.size) rhp 2690 drivers/infiniband/hw/cxgb4/qp.c if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size) rhp 2708 drivers/infiniband/hw/cxgb4/qp.c srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); rhp 2721 drivers/infiniband/hw/cxgb4/qp.c srq->rhp = rhp; rhp 2726 drivers/infiniband/hw/cxgb4/qp.c (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * rhp 2732 drivers/infiniband/hw/cxgb4/qp.c &rhp->rdev.uctx, srq->wr_waitp); rhp 2737 drivers/infiniband/hw/cxgb4/qp.c if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) rhp 2753 drivers/infiniband/hw/cxgb4/qp.c uresp.qid_mask = rhp->rdev.qpmask; rhp 2789 drivers/infiniband/hw/cxgb4/qp.c free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, rhp 2794 drivers/infiniband/hw/cxgb4/qp.c c4iw_free_srq_idx(&rhp->rdev, srq->idx); rhp 2802 drivers/infiniband/hw/cxgb4/qp.c struct c4iw_dev *rhp; rhp 2807 drivers/infiniband/hw/cxgb4/qp.c rhp = srq->rhp; rhp 2812 drivers/infiniband/hw/cxgb4/qp.c free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, rhp 2814 drivers/infiniband/hw/cxgb4/qp.c c4iw_free_srq_idx(&rhp->rdev, srq->idx); rhp 441 drivers/infiniband/hw/cxgb4/restrack.c struct c4iw_dev *dev = mhp->rhp; rhp 42 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c struct pvr2_ioread *rhp; rhp 919 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (fhp->rhp) { rhp 922 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c sp = pvr2_ioread_get_stream(fhp->rhp); rhp 924 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c pvr2_ioread_destroy(fhp->rhp); rhp 925 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c fhp->rhp = NULL; rhp 1050 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (fh->rhp) return 0; rhp 1066 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); rhp 1067 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (!fh->rhp) { rhp 1077 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c return pvr2_ioread_set_enabled(fh->rhp,!0); rhp 1120 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (!fh->rhp) { rhp 1128 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c ret = pvr2_ioread_read(fh->rhp,buff,count); rhp 1135 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c pvr2_ioread_avail(fh->rhp) >= 0); rhp 1154 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (!fh->rhp) { rhp 1161 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (pvr2_ioread_avail(fh->rhp) >= 0) { rhp 4406 fs/xfs/xfs_log_recover.c struct hlist_head *rhp; rhp 4409 fs/xfs/xfs_log_recover.c rhp = &rhash[XLOG_RHASH(tid)]; rhp 4410 fs/xfs/xfs_log_recover.c hlist_for_each_entry(trans, rhp, r_list) { rhp 4433 fs/xfs/xfs_log_recover.c hlist_add_head(&trans->r_list, rhp); rhp 868 include/linux/rcupdate.h static inline void rcu_head_init(struct rcu_head *rhp) rhp 870 include/linux/rcupdate.h rhp->func = (rcu_callback_t)~0L; rhp 887 include/linux/rcupdate.h rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) rhp 889 include/linux/rcupdate.h rcu_callback_t func = READ_ONCE(rhp->func); rhp 477 include/trace/events/rcu.h TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, rhp 480 include/trace/events/rcu.h TP_ARGS(rcuname, rhp, qlen_lazy, qlen), rhp 484 include/trace/events/rcu.h __field(void *, rhp) rhp 492 include/trace/events/rcu.h __entry->rhp = rhp; rhp 493 include/trace/events/rcu.h __entry->func = rhp->func; rhp 499 include/trace/events/rcu.h __entry->rcuname, __entry->rhp, __entry->func, rhp 513 include/trace/events/rcu.h TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, rhp 516 include/trace/events/rcu.h TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), rhp 520 include/trace/events/rcu.h __field(void *, rhp) rhp 528 include/trace/events/rcu.h __entry->rhp = rhp; rhp 535 include/trace/events/rcu.h __entry->rcuname, __entry->rhp, __entry->offset, rhp 578 include/trace/events/rcu.h TP_PROTO(const char *rcuname, struct rcu_head *rhp), rhp 580 include/trace/events/rcu.h TP_ARGS(rcuname, rhp), rhp 584 include/trace/events/rcu.h __field(void *, rhp) rhp 590 include/trace/events/rcu.h __entry->rhp = rhp; rhp 591 include/trace/events/rcu.h __entry->func = rhp->func; rhp 595 include/trace/events/rcu.h __entry->rcuname, __entry->rhp, __entry->func) rhp 607 include/trace/events/rcu.h TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), rhp 609 include/trace/events/rcu.h TP_ARGS(rcuname, rhp, offset), rhp 613 include/trace/events/rcu.h __field(void *, rhp) rhp 619 include/trace/events/rcu.h __entry->rhp = rhp; rhp 624 include/trace/events/rcu.h __entry->rcuname, __entry->rhp, __entry->offset) rhp 682 include/trace/events/rcu.h TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, rhp 685 include/trace/events/rcu.h TP_ARGS(rcutorturename, rhp, secs, c_old, c), rhp 689 include/trace/events/rcu.h __field(struct rcu_head *, rhp) rhp 699 include/trace/events/rcu.h __entry->rhp = rhp; rhp 706 include/trace/events/rcu.h __entry->rcutorturename, __entry->rhp, rhp 176 kernel/exit.c static void delayed_put_task_struct(struct rcu_head *rhp) rhp 178 kernel/exit.c struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); rhp 1737 kernel/fork.c static void __delayed_free_task(struct rcu_head *rhp) rhp 1739 kernel/fork.c struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); rhp 431 kernel/irq/irqdesc.c static void delayed_free_desc(struct rcu_head *rhp) rhp 433 kernel/irq/irqdesc.c struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); rhp 118 kernel/pid.c static void delayed_put_pid(struct rcu_head *rhp) rhp 120 kernel/pid.c struct pid *pid = container_of(rhp, struct pid, rcu); rhp 460 kernel/rcu/rcu.h struct rcu_head *rhp, rhp 474 kernel/rcu/rcu.h struct rcu_head *rhp, rhp 479 kernel/rcu/rcu.h #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ rhp 32 kernel/rcu/rcu_segcblist.c void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp) rhp 34 kernel/rcu/rcu_segcblist.c *rclp->tail = rhp; rhp 35 kernel/rcu/rcu_segcblist.c rclp->tail = &rhp->next; rhp 49 kernel/rcu/rcu_segcblist.c struct rcu_head *rhp) rhp 58 kernel/rcu/rcu_segcblist.c if (!rhp) { rhp 61 kernel/rcu/rcu_segcblist.c rhp->next = NULL; rhp 62 kernel/rcu/rcu_segcblist.c srclp->head = rhp; rhp 63 kernel/rcu/rcu_segcblist.c srclp->tail = &rhp->next; rhp 78 kernel/rcu/rcu_segcblist.c struct rcu_head *rhp; rhp 80 kernel/rcu/rcu_segcblist.c rhp = rclp->head; rhp 81 kernel/rcu/rcu_segcblist.c if (!rhp) rhp 84 kernel/rcu/rcu_segcblist.c rclp->head = rhp->next; rhp 87 kernel/rcu/rcu_segcblist.c return rhp; rhp 256 kernel/rcu/rcu_segcblist.c struct rcu_head *rhp, bool lazy) rhp 262 kernel/rcu/rcu_segcblist.c rhp->next = NULL; rhp 263 kernel/rcu/rcu_segcblist.c WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp); rhp 264 kernel/rcu/rcu_segcblist.c WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); rhp 278 kernel/rcu/rcu_segcblist.c struct rcu_head *rhp, bool lazy) rhp 288 kernel/rcu/rcu_segcblist.c rhp->next = NULL; rhp 292 kernel/rcu/rcu_segcblist.c WRITE_ONCE(*rsclp->tails[i], rhp); rhp 294 kernel/rcu/rcu_segcblist.c WRITE_ONCE(rsclp->tails[i], &rhp->next); rhp 28 kernel/rcu/rcu_segcblist.h void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp); rhp 31 kernel/rcu/rcu_segcblist.h struct rcu_head *rhp); rhp 109 kernel/rcu/rcu_segcblist.h struct rcu_head *rhp, bool lazy); rhp 111 kernel/rcu/rcu_segcblist.h struct rcu_head *rhp, bool lazy); rhp 347 kernel/rcu/rcuperf.c static void rcu_perf_async_cb(struct rcu_head *rhp) rhp 350 kernel/rcu/rcuperf.c kfree(rhp); rhp 362 kernel/rcu/rcuperf.c struct rcu_head *rhp = NULL; rhp 404 kernel/rcu/rcuperf.c if (!rhp) rhp 405 kernel/rcu/rcuperf.c rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); rhp 406 kernel/rcu/rcuperf.c if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) { rhp 409 kernel/rcu/rcuperf.c cur_ops->async(rhp, rcu_perf_async_cb); rhp 410 kernel/rcu/rcuperf.c rhp = NULL; rhp 416 kernel/rcu/rcuperf.c kfree(rhp); /* Because we are stopping. */ rhp 1128 kernel/rcu/rcutorture.c static void rcu_torture_timer_cb(struct rcu_head *rhp) rhp 1130 kernel/rcu/rcutorture.c kfree(rhp); rhp 1340 kernel/rcu/rcutorture.c struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); rhp 1342 kernel/rcu/rcutorture.c if (rhp) rhp 1343 kernel/rcu/rcutorture.c cur_ops->call(rhp, rcu_torture_timer_cb); rhp 1644 kernel/rcu/rcutorture.c static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) rhp 1646 kernel/rcu/rcutorture.c struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); rhp 1703 kernel/rcu/rcutorture.c static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) rhp 1707 kernel/rcu/rcutorture.c struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); rhp 2260 kernel/rcu/rcutorture.c static void rcu_torture_leak_cb(struct rcu_head *rhp) rhp 2264 kernel/rcu/rcutorture.c static void rcu_torture_err_cb(struct rcu_head *rhp) rhp 113 kernel/rcu/srcutiny.c struct rcu_head *rhp; rhp 135 kernel/rcu/srcutiny.c rhp = lh; rhp 138 kernel/rcu/srcutiny.c rhp->func(rhp); rhp 158 kernel/rcu/srcutiny.c void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, rhp 163 kernel/rcu/srcutiny.c rhp->func = func; rhp 164 kernel/rcu/srcutiny.c rhp->next = NULL; rhp 166 kernel/rcu/srcutiny.c *ssp->srcu_cb_tail = rhp; rhp 167 kernel/rcu/srcutiny.c ssp->srcu_cb_tail = &rhp->next; rhp 803 kernel/rcu/srcutree.c static void srcu_leak_callback(struct rcu_head *rhp) rhp 835 kernel/rcu/srcutree.c static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, rhp 846 kernel/rcu/srcutree.c if (debug_rcu_head_queue(rhp)) { rhp 848 kernel/rcu/srcutree.c WRITE_ONCE(rhp->func, srcu_leak_callback); rhp 852 kernel/rcu/srcutree.c rhp->func = func; rhp 857 kernel/rcu/srcutree.c rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); rhp 895 kernel/rcu/srcutree.c void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, rhp 898 kernel/rcu/srcutree.c __call_srcu(ssp, rhp, func, true); rhp 1007 kernel/rcu/srcutree.c static void srcu_barrier_cb(struct rcu_head *rhp) rhp 1012 kernel/rcu/srcutree.c sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); rhp 1161 kernel/rcu/srcutree.c struct rcu_head *rhp; rhp 1182 kernel/rcu/srcutree.c rhp = rcu_cblist_dequeue(&ready_cbs); rhp 1183 kernel/rcu/srcutree.c for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { rhp 1184 kernel/rcu/srcutree.c debug_rcu_head_unqueue(rhp); rhp 1186 kernel/rcu/srcutree.c rhp->func(rhp); rhp 43 kernel/rcu/sync.c static void rcu_sync_func(struct rcu_head *rhp); rhp 73 kernel/rcu/sync.c static void rcu_sync_func(struct rcu_head *rhp) rhp 75 kernel/rcu/sync.c struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); rhp 2117 kernel/rcu/tree.c struct rcu_head *rhp; rhp 2155 kernel/rcu/tree.c rhp = rcu_cblist_dequeue(&rcl); rhp 2156 kernel/rcu/tree.c for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { rhp 2157 kernel/rcu/tree.c debug_rcu_head_unqueue(rhp); rhp 2158 kernel/rcu/tree.c if (__rcu_reclaim(rcu_state.name, rhp)) rhp 2545 kernel/rcu/tree.c static void rcu_leak_callback(struct rcu_head *rhp) rhp 2845 kernel/rcu/tree.c static void rcu_barrier_callback(struct rcu_head *rhp) rhp 434 kernel/rcu/tree.h static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 436 kernel/rcu/tree.h static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 1691 kernel/rcu/tree_plugin.h static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 1699 kernel/rcu/tree_plugin.h if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { rhp 1704 kernel/rcu/tree_plugin.h if (rhp) rhp 1706 kernel/rcu/tree_plugin.h rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); rhp 1721 kernel/rcu/tree_plugin.h static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 1728 kernel/rcu/tree_plugin.h return rcu_nocb_do_flush_bypass(rdp, rhp, j); rhp 1762 kernel/rcu/tree_plugin.h static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 1818 kernel/rcu/tree_plugin.h if (!rcu_nocb_flush_bypass(rdp, rhp, j)) { rhp 1841 kernel/rcu/tree_plugin.h rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); rhp 2498 kernel/rcu/tree_plugin.h static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 2504 kernel/rcu/tree_plugin.h static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rhp 439 kernel/rcu/update.c void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, rhp 443 kernel/rcu/update.c trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); rhp 447 kernel/rcu/update.c #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ rhp 522 kernel/rcu/update.c void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) rhp 527 kernel/rcu/update.c rhp->next = NULL; rhp 528 kernel/rcu/update.c rhp->func = func; rhp 531 kernel/rcu/update.c *rcu_tasks_cbs_tail = rhp; rhp 532 kernel/rcu/update.c rcu_tasks_cbs_tail = &rhp->next; rhp 6979 kernel/sched/core.c static void sched_free_group_rcu(struct rcu_head *rhp) rhp 6982 kernel/sched/core.c sched_free_group(container_of(rhp, struct task_group, rcu)); rhp 33 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \