Home
last modified time | relevance | path

Searched refs:rhp (Results 1 – 31 of 31) sorted by relevance

/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch.h127 static inline int t3b_device(const struct iwch_dev *rhp) in t3b_device() argument
129 return rhp->rdev.t3cdev_p->type == T3B; in t3b_device()
132 static inline int t3a_device(const struct iwch_dev *rhp) in t3a_device() argument
134 return rhp->rdev.t3cdev_p->type == T3A; in t3a_device()
137 static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid) in get_chp() argument
139 return idr_find(&rhp->cqidr, cqid); in get_chp()
142 static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid) in get_qhp() argument
144 return idr_find(&rhp->qpidr, qpid); in get_qhp()
147 static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid) in get_mhp() argument
149 return idr_find(&rhp->mmidr, mmid); in get_mhp()
[all …]
Diwch_provider.c101 struct iwch_dev *rhp = to_iwch_dev(context->device); in iwch_dealloc_ucontext() local
108 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); in iwch_dealloc_ucontext()
117 struct iwch_dev *rhp = to_iwch_dev(ibdev); in iwch_alloc_ucontext() local
123 cxio_init_ucontext(&rhp->rdev, &context->uctx); in iwch_alloc_ucontext()
136 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); in iwch_destroy_cq()
140 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); in iwch_destroy_cq()
151 struct iwch_dev *rhp; in iwch_create_cq() local
163 rhp = to_iwch_dev(ibdev); in iwch_create_cq()
170 if (!t3a_device(rhp)) { in iwch_create_cq()
179 if (t3a_device(rhp)) { in iwch_create_cq()
[all …]
Diwch_mem.c52 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); in iwch_finish_mem_reg()
55 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, in iwch_register_mem() argument
61 if (cxio_register_phys_mem(&rhp->rdev, in iwch_register_mem()
73 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in iwch_register_mem()
78 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, in iwch_reregister_mem() argument
91 if (cxio_reregister_phys_mem(&rhp->rdev, in iwch_reregister_mem()
103 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in iwch_reregister_mem()
111 mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev, in iwch_alloc_pbl()
124 cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in iwch_free_pbl()
130 return cxio_write_pbl(&mhp->rhp->rdev, pages, in iwch_write_pbl()
Diwch_cq.c44 static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp, in iwch_poll_cq_one() argument
60 qhp = get_qhp(rhp, CQE_QPID(*rd_cqe)); in iwch_poll_cq_one()
69 if (t3a_device(chp->rhp) && credit) { in iwch_poll_cq_one()
72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); in iwch_poll_cq_one()
197 struct iwch_dev *rhp; in iwch_poll_cq() local
204 rhp = chp->rhp; in iwch_poll_cq()
218 err = iwch_poll_cq_one(rhp, chp, wc + npolled); in iwch_poll_cq()
Diwch_provider.h47 struct iwch_dev *rhp; member
77 struct iwch_dev *rhp; member
93 struct iwch_dev *rhp; member
105 struct iwch_dev *rhp; member
163 struct iwch_dev *rhp; member
260 int iwch_modify_qp(struct iwch_dev *rhp,
342 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
344 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
Diwch_qp.c201 static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, in iwch_sgl2pbl_map() argument
209 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); in iwch_sgl2pbl_map()
241 rhp->rdev.rnic_info.pbl_base) >> 3) + in iwch_sgl2pbl_map()
255 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr, in build_rdma_recv()
302 pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE); in build_zero_stag_recv()
309 pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3; in build_zero_stag_recv()
533 struct iwch_dev *rhp; in iwch_bind_mw() local
549 rhp = qhp->rhp; in iwch_bind_mw()
584 err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size); in iwch_bind_mw()
769 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); in iwch_post_zb_read()
[all …]
Diwch_cm.c925 err = iwch_modify_qp(ep->com.qp->rhp, in process_mpa_reply()
1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
1499 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
1594 ret = iwch_modify_qp(ep->com.qp->rhp, in peer_abort()
1661 iwch_modify_qp(ep->com.qp->rhp, in close_con_rpl()
1726 iwch_modify_qp(ep->com.qp->rhp, in ec_status()
1756 iwch_modify_qp(ep->com.qp->rhp, in ep_timeout()
1812 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || in iwch_accept_cr()
1813 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { in iwch_accept_cr()
1845 err = iwch_modify_qp(ep->com.qp->rhp, in iwch_accept_cr()
Diwch_ev.c82 iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, in post_qp_event()
Diwch.c79 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); in enable_qp_db()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dmem.c369 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); in finish_mem_reg()
372 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, in register_mem() argument
378 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, in register_mem()
390 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in register_mem()
395 static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, in reregister_mem() argument
405 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, in reregister_mem()
415 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in reregister_mem()
423 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, in alloc_pbl()
509 struct c4iw_dev *rhp; in c4iw_reregister_phys_mem() local
523 rhp = mhp->rhp; in c4iw_reregister_phys_mem()
[all …]
Dprovider.c96 struct c4iw_dev *rhp = to_c4iw_dev(context->device); in c4iw_dealloc_ucontext() local
103 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); in c4iw_dealloc_ucontext()
112 struct c4iw_dev *rhp = to_c4iw_dev(ibdev); in c4iw_alloc_ucontext() local
125 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); in c4iw_alloc_ucontext()
132 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; in c4iw_alloc_ucontext()
153 mm->addr = virt_to_phys(rhp->rdev.status_page); in c4iw_alloc_ucontext()
237 struct c4iw_dev *rhp; in c4iw_deallocate_pd() local
241 rhp = php->rhp; in c4iw_deallocate_pd()
243 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); in c4iw_deallocate_pd()
244 mutex_lock(&rhp->rdev.stats.lock); in c4iw_deallocate_pd()
[all …]
Dqp.c709 spin_lock_irqsave(&qhp->rhp->lock, flags); in ring_kernel_sq_db()
711 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
714 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
718 spin_unlock_irqrestore(&qhp->rhp->lock, flags); in ring_kernel_sq_db()
726 spin_lock_irqsave(&qhp->rhp->lock, flags); in ring_kernel_rq_db()
728 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
731 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
735 spin_unlock_irqrestore(&qhp->rhp->lock, flags); in ring_kernel_rq_db()
816 qhp->rhp->rdev.lldi.adapter_type) ? in c4iw_post_send()
843 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_send()
[all …]
Diw_cxgb4.h278 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) in get_chp() argument
280 return idr_find(&rhp->cqidr, cqid); in get_chp()
283 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) in get_qhp() argument
285 return idr_find(&rhp->qpidr, qpid); in get_qhp()
288 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) in get_mhp() argument
290 return idr_find(&rhp->mmidr, mmid); in get_mhp()
293 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, in _insert_handle() argument
300 spin_lock_irq(&rhp->lock); in _insert_handle()
306 spin_unlock_irq(&rhp->lock); in _insert_handle()
314 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, in insert_handle() argument
[all …]
Dcq.c355 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq()
697 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); in c4iw_poll_cq_one()
855 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); in c4iw_destroy_cq()
861 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_destroy_cq()
874 struct c4iw_dev *rhp; in c4iw_create_cq() local
886 rhp = to_c4iw_dev(ibdev); in c4iw_create_cq()
888 if (vector >= rhp->rdev.lldi.nciq) in c4iw_create_cq()
912 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); in c4iw_create_cq()
932 ret = create_cq(&rhp->rdev, &chp->cq, in c4iw_create_cq()
933 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); in c4iw_create_cq()
[all …]
Ddevice.c1372 spin_lock_irq(&qp->rhp->lock); in recover_lost_dbs()
1374 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], in recover_lost_dbs()
1384 spin_unlock_irq(&qp->rhp->lock); in recover_lost_dbs()
1389 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], in recover_lost_dbs()
1400 spin_unlock_irq(&qp->rhp->lock); in recover_lost_dbs()
1405 spin_unlock_irq(&qp->rhp->lock); in recover_lost_dbs()
1408 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { in recover_lost_dbs()
Dev.c100 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, in post_qp_event()
Dcm.c1529 err = c4iw_modify_qp(ep->com.qp->rhp, in process_mpa_reply()
1544 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply()
1564 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply()
1768 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in rx_data()
2608 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
2626 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
2719 ret = c4iw_modify_qp(ep->com.qp->rhp, in peer_abort()
2808 c4iw_modify_qp(ep->com.qp->rhp, in close_con_rpl()
2845 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in terminate()
3001 err = c4iw_modify_qp(ep->com.qp->rhp, in c4iw_accept_cr()
[all …]
/linux-4.4.14/include/trace/events/
Drcu.h433 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
436 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
440 __field(void *, rhp)
448 __entry->rhp = rhp;
449 __entry->func = rhp->func;
455 __entry->rcuname, __entry->rhp, __entry->func,
469 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
472 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
476 __field(void *, rhp)
484 __entry->rhp = rhp;
[all …]
/linux-4.4.14/kernel/rcu/
Dupdate.c430 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, in do_trace_rcu_torture_read() argument
434 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); in do_trace_rcu_torture_read()
438 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
537 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) in call_rcu_tasks() argument
542 rhp->next = NULL; in call_rcu_tasks()
543 rhp->func = func; in call_rcu_tasks()
546 *rcu_tasks_cbs_tail = rhp; in call_rcu_tasks()
547 rcu_tasks_cbs_tail = &rhp->next; in call_rcu_tasks()
Dtree_plugin.h1616 static void rcu_oom_callback(struct rcu_head *rhp) in rcu_oom_callback() argument
1884 struct rcu_head *rhp; in rcu_nocb_cpu_needs_barrier() local
1902 rhp = READ_ONCE(rdp->nocb_head); in rcu_nocb_cpu_needs_barrier()
1903 if (!rhp) in rcu_nocb_cpu_needs_barrier()
1904 rhp = READ_ONCE(rdp->nocb_gp_head); in rcu_nocb_cpu_needs_barrier()
1905 if (!rhp) in rcu_nocb_cpu_needs_barrier()
1906 rhp = READ_ONCE(rdp->nocb_follower_head); in rcu_nocb_cpu_needs_barrier()
1909 if (!READ_ONCE(rdp->nocb_kthread) && rhp && in rcu_nocb_cpu_needs_barrier()
1913 cpu, rhp->func); in rcu_nocb_cpu_needs_barrier()
1930 struct rcu_head *rhp, in __call_rcu_nocb_enqueue() argument
[all …]
Drcutorture.c809 static void rcu_torture_cbflood_cb(struct rcu_head *rhp) in rcu_torture_cbflood_cb() argument
824 struct rcu_head *rhp; in rcu_torture_cbflood() local
831 rhp = vmalloc(sizeof(*rhp) * in rcu_torture_cbflood()
833 err = !rhp; in rcu_torture_cbflood()
846 cur_ops->call(&rhp[i * cbflood_n_per_burst + j], in rcu_torture_cbflood()
855 vfree(rhp); in rcu_torture_cbflood()
1661 static void rcu_torture_leak_cb(struct rcu_head *rhp) in rcu_torture_leak_cb() argument
1665 static void rcu_torture_err_cb(struct rcu_head *rhp) in rcu_torture_err_cb() argument
Dtree.h612 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
Dtree.c3029 static void rcu_leak_callback(struct rcu_head *rhp) in rcu_leak_callback() argument
3991 static void rcu_barrier_callback(struct rcu_head *rhp) in rcu_barrier_callback() argument
3993 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); in rcu_barrier_callback()
/linux-4.4.14/drivers/media/usb/pvrusb2/
Dpvrusb2-v4l2.c56 struct pvr2_ioread *rhp; member
941 if (fhp->rhp) { in pvr2_v4l2_release()
944 sp = pvr2_ioread_get_stream(fhp->rhp); in pvr2_v4l2_release()
946 pvr2_ioread_destroy(fhp->rhp); in pvr2_v4l2_release()
947 fhp->rhp = NULL; in pvr2_v4l2_release()
1070 if (fh->rhp) return 0; in pvr2_v4l2_iosetup()
1086 fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); in pvr2_v4l2_iosetup()
1087 if (!fh->rhp) { in pvr2_v4l2_iosetup()
1097 return pvr2_ioread_set_enabled(fh->rhp,!0); in pvr2_v4l2_iosetup()
1140 if (!fh->rhp) { in pvr2_v4l2_read()
[all …]
/linux-4.4.14/include/linux/
Drcupdate.h88 struct rcu_head *rhp,
110 struct rcu_head *rhp,
115 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
/linux-4.4.14/kernel/
Dpid.c253 static void delayed_put_pid(struct rcu_head *rhp) in delayed_put_pid() argument
255 struct pid *pid = container_of(rhp, struct pid, rcu); in delayed_put_pid()
Dexit.c159 static void delayed_put_task_struct(struct rcu_head *rhp) in delayed_put_task_struct() argument
161 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); in delayed_put_task_struct()
/linux-4.4.14/Documentation/RCU/
Drcuref.txt103 void el_free(struct rcu_head *rhp)
/linux-4.4.14/fs/xfs/
Dxfs_log_recover.c3654 struct hlist_head *rhp; in xlog_recover_ophdr_to_trans() local
3657 rhp = &rhash[XLOG_RHASH(tid)]; in xlog_recover_ophdr_to_trans()
3658 hlist_for_each_entry(trans, rhp, r_list) { in xlog_recover_ophdr_to_trans()
3681 hlist_add_head(&trans->r_list, rhp); in xlog_recover_ophdr_to_trans()
/linux-4.4.14/kernel/sched/
Dcore.c7743 static void sched_free_group_rcu(struct rcu_head *rhp) in sched_free_group_rcu() argument
7746 sched_free_group(container_of(rhp, struct task_group, rcu)); in sched_free_group_rcu()
/linux-4.4.14/
DCREDITS2872 E: rhp@draper.net