/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 66 struct mthca_dev *mdev = to_mdev(ibdev); in mthca_query_device() 155 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, in mthca_query_port() 167 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; in mthca_query_port() 169 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; in mthca_query_port() 194 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) in mthca_modify_device() 197 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mthca_modify_device() 211 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) in mthca_modify_port() 224 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); in mthca_modify_port() 228 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mthca_modify_port() 248 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, in mthca_query_pkey() [all …]
|
D | mthca_mad.c | 120 mthca_update_rate(to_mdev(ibdev), port_num); in smp_snoop() 121 update_sm_ah(to_mdev(ibdev), port_num, in smp_snoop() 155 mutex_lock(&to_mdev(dev)->cap_mask_mutex); in node_desc_override() 157 mutex_unlock(&to_mdev(dev)->cap_mask_mutex); in node_desc_override() 219 forward_trap(to_mdev(ibdev), port_num, in_mad); in mthca_process_mad() 259 err = mthca_MAD_IFC(to_mdev(ibdev), in mthca_process_mad() 266 mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err); in mthca_process_mad()
|
D | mthca_cq.c | 340 if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && in mthca_cq_resize_copy_cqes() 665 struct mthca_dev *dev = to_mdev(ibcq->device); in mthca_poll_cq() 738 mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, in mthca_tavor_arm_cq() 739 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); in mthca_tavor_arm_cq() 770 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, in mthca_arbel_arm_cq() 771 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); in mthca_arbel_arm_cq()
|
D | mthca_srq.c | 367 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_modify_srq() 390 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_query_srq() 478 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_tavor_post_srq_recv() 578 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_arbel_post_srq_recv()
|
D | mthca_mcg.c | 122 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_attach() 216 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_detach()
|
D | mthca_qp.c | 427 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_query_qp() 547 struct mthca_dev *dev = to_mdev(ibqp->device); in __mthca_modify_qp() 845 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_modify_qp() 1605 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_tavor_post_send() 1808 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_tavor_post_receive() 1919 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_arbel_post_send() 2159 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_arbel_post_receive()
|
D | mthca_av.c | 295 struct mthca_dev *dev = to_mdev(ibah->device); in mthca_ah_query()
|
D | mthca_mr.c | 721 struct mthca_dev *dev = to_mdev(ibfmr->device); in mthca_tavor_map_phys_fmr() 762 struct mthca_dev *dev = to_mdev(ibfmr->device); in mthca_arbel_map_phys_fmr()
|
D | mthca_dev.h | 587 static inline struct mthca_dev *to_mdev(struct ib_device *ibdev) in to_mdev() function
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | mr.c | 66 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, in mlx4_ib_get_dma_mr() 71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr() 81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr() 137 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_reg_user_mr() 177 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_reg_user_mr() 193 struct mlx4_ib_dev *dev = to_mdev(mr->device); in mlx4_ib_rereg_user_mr() 327 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); in mlx4_ib_dereg_mr() 339 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_alloc_mw() 395 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); in mlx4_ib_dealloc_mw() 405 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_alloc_mr() [all …]
|
D | main.c | 130 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_get_netdev() 197 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_add_gid() 271 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_del_gid() 363 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_query_device() 397 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, in mlx4_ib_query_device() 510 struct mlx4_dev *dev = to_mdev(device)->dev; in mlx4_ib_port_link_layer() 534 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in ib_link_query_port() 537 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in ib_link_query_port() 553 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; in ib_link_query_port() 554 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; in ib_link_query_port() [all …]
|
D | cm.c | 146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in id_map_find_by_sl_id() 194 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_find_del() 212 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in sl_id_map_add() 247 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_alloc() 258 ent->dev = to_mdev(ibdev); in id_map_alloc() 262 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); in id_map_alloc() 287 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_get() 303 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in schedule_delayed()
|
D | cq.c | 95 struct mlx4_ib_dev *dev = to_mdev(cq->device); in mlx4_ib_modify_cq() 177 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_create_cq() 374 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); in mlx4_ib_resize_cq() 475 struct mlx4_ib_dev *dev = to_mdev(cq->device); in mlx4_ib_destroy_cq() 701 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx4_ib_poll_one() 721 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one() 739 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one() 855 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { in mlx4_ib_poll_one() 898 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); in mlx4_ib_poll_cq() 928 to_mdev(ibcq->device)->uar_map, in mlx4_ib_arm_cq() [all …]
|
D | srq.c | 76 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_create_srq() 236 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); in mlx4_ib_modify_srq() 261 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); in mlx4_ib_query_srq() 279 struct mlx4_ib_dev *dev = to_mdev(srq->device); in mlx4_ib_destroy_srq() 324 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device); in mlx4_ib_post_srq_recv()
|
D | mad.c | 226 struct mlx4_ib_dev *dev = to_mdev(ibdev); in smp_snoop() 346 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); in node_desc_override() 348 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); in node_desc_override() 403 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_find_real_gid() 608 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_demux_mad() 749 forward_trap(to_mdev(ibdev), port_num, in_mad); in ib_process_mad() 782 err = mlx4_MAD_IFC(to_mdev(ibdev), in ib_process_mad() 791 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) in ib_process_mad() 794 if (!mlx4_is_slave(to_mdev(ibdev)->dev)) in ib_process_mad() 827 struct mlx4_ib_dev *dev = to_mdev(ibdev); in iboe_process_mad() [all …]
|
D | ah.c | 46 struct mlx4_dev *dev = to_mdev(pd->device)->dev; in create_ib_ah() 75 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); in create_iboe_ah()
|
D | qp.c | 1148 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) in mlx4_ib_create_qp() 1163 err = create_qp_common(to_mdev(pd->device), pd, init_attr, in mlx4_ib_create_qp() 1180 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, in mlx4_ib_create_qp() 1181 get_sqp_num(to_mdev(pd->device), init_attr), in mlx4_ib_create_qp() 1201 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_destroy_qp() 1514 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); in __mlx4_ib_modify_qp() 1646 u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 : in __mlx4_ib_modify_qp() 2028 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); in mlx4_ib_modify_qp() 2146 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); in build_sriov_qp0_header() 2292 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { in build_mlx_header() [all …]
|
D | mcg.c | 889 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_mcg_demux_handler() 938 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_mcg_multiplex_handler()
|
D | mlx4_ib.h | 610 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) in to_mdev() function
|
D | alias_GUID.c | 490 struct mlx4_ib_dev *dev = to_mdev(ibdev); in set_guid_rec()
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | main.c | 69 struct mlx5_ib_dev *dev = to_mdev(device); in mlx5_ib_port_link_layer() 94 if (mlx5_use_mad_ifc(to_mdev(ibdev))) in mlx5_get_vport_access_method() 107 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_system_image_guid() 131 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_max_pkeys() 152 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_vendor_id() 210 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_device() 317 struct mlx5_ib_dev *dev = to_mdev(ibdev); in translate_active_width() 405 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_hca_port() 491 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_gid() 510 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_pkey() [all …]
|
D | mad.c | 102 err = mlx5_MAD_IFC(to_mdev(ibdev), in mlx5_ib_process_mad() 162 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, in mlx5_query_mad_ifc_smp_attr_node_info() 301 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, in mlx5_query_mad_ifc_pkey() 330 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, in mlx5_query_mad_ifc_gids() 341 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, in mlx5_query_mad_ifc_gids() 357 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_mad_ifc_port()
|
D | srq.c | 80 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_srq_user() 238 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_create_srq() 346 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); in mlx5_ib_modify_srq() 371 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); in mlx5_ib_query_srq() 395 struct mlx5_ib_dev *dev = to_mdev(srq->device); in mlx5_ib_destroy_srq()
|
D | odp.c | 157 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_page_fault_resume() 183 struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); in pagefault_single_data_segment() 387 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_mr_initiator_pfault_handler() 492 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_mr_responder_pfault_handler() 532 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_mr_wqe_pfault_handler()
|
D | cq.c | 50 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event() 174 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in handle_responder() 414 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one() 583 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; in mlx5_ib_arm_cq() 754 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_create_cq() 845 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq() 932 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq() 1026 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes() 1082 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); in mlx5_ib_resize_cq()
|
D | mr.c | 637 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dma_mr() 701 struct mlx5_ib_dev *dev = to_mdev(pd->device); in prep_umr_reg_wqe() 763 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_umr() 982 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_create() 1044 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr() 1216 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in clean_mr() 1259 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); in mlx5_ib_dereg_mr() 1300 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_alloc_mr()
|
D | qp.c | 120 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_read_user_wqe() 1227 dev = to_mdev(pd->device); in mlx5_ib_create_qp() 1236 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); in mlx5_ib_create_qp() 1301 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_destroy_qp() 1556 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in __mlx5_ib_modify_qp() 1763 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_modify_qp() 2441 mlx5_ib_warn(to_mdev(qp->ibqp.device), in set_reg_wr() 2591 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_post_send() 3027 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_query_qp() 3128 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_alloc_xrcd() [all …]
|
D | mlx5_ib.h | 446 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) in to_mdev() function
|
/linux-4.4.14/drivers/staging/most/hdm-usb/ |
D | hdm_usb.c | 137 #define to_mdev(d) container_of(d, struct most_dev, iface) macro 280 mdev = to_mdev(iface); in hdm_poison_channel() 393 mdev = to_mdev(mbo->ifp); in hdm_write_completion() 558 mdev = to_mdev(mbo->ifp); in hdm_read_completion() 646 mdev = to_mdev(iface); in hdm_enqueue() 738 mdev = to_mdev(iface); in hdm_configure_channel() 859 mdev = to_mdev(iface); in hdm_request_netinfo() 931 mdev = to_mdev(mbo->ifp); in wq_clear_halt()
|
/linux-4.4.14/drivers/media/platform/s5p-tv/ |
D | mixer_drv.c | 355 struct mxr_device *mdev = to_mdev(dev); in mxr_runtime_resume() 396 struct mxr_device *mdev = to_mdev(dev); in mxr_runtime_suspend() 478 struct mxr_device *mdev = to_mdev(dev); in mxr_remove()
|
D | mixer.h | 271 static inline struct mxr_device *to_mdev(struct device *dev) in to_mdev() function
|