Lines Matching refs:inbox
673 struct mlx4_cmd_mailbox *inbox) in update_pkey_index() argument
675 u8 sched = *(u8 *)(inbox->buf + 64); in update_pkey_index()
676 u8 orig_index = *(u8 *)(inbox->buf + 35); in update_pkey_index()
684 *(u8 *)(inbox->buf + 35) = new_index; in update_pkey_index()
687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, in update_gid() argument
690 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; in update_gid()
691 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); in update_gid()
731 struct mlx4_cmd_mailbox *inbox, in update_vport_qp_param() argument
734 struct mlx4_qp_context *qpc = inbox->buf + 8; in update_vport_qp_param()
760 *(__be32 *)inbox->buf = in update_vport_qp_param()
761 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | in update_vport_qp_param()
2218 struct mlx4_cmd_mailbox *inbox, in mlx4_ALLOC_RES_wrapper() argument
2533 struct mlx4_cmd_mailbox *inbox, in mlx4_FREE_RES_wrapper() argument
2679 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_MPT_wrapper() argument
2687 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_MPT_wrapper()
2699 if (!mr_is_region(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2705 pd = mr_get_pd(inbox->buf); in mlx4_SW2HW_MPT_wrapper()
2712 if (mr_is_fmr(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2714 if (mr_is_bind_enabled(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2719 if (!mr_is_region(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2725 phys = mr_phys_mpt(inbox->buf); in mlx4_SW2HW_MPT_wrapper()
2732 mr_get_mtt_size(inbox->buf), mtt); in mlx4_SW2HW_MPT_wrapper()
2739 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_MPT_wrapper()
2762 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_MPT_wrapper() argument
2776 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_MPT_wrapper()
2794 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_MPT_wrapper() argument
2830 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_MPT_wrapper()
2872 struct mlx4_cmd_mailbox *inbox);
2876 struct mlx4_cmd_mailbox *inbox, in mlx4_RST2INIT_QP_wrapper() argument
2884 struct mlx4_qp_context *qpc = inbox->buf + 8; in mlx4_RST2INIT_QP_wrapper()
2896 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); in mlx4_RST2INIT_QP_wrapper()
2939 update_pkey_index(dev, slave, inbox); in mlx4_RST2INIT_QP_wrapper()
2940 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RST2INIT_QP_wrapper()
3014 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_EQ_wrapper() argument
3021 struct mlx4_eq_context *eqc = inbox->buf; in mlx4_SW2HW_EQ_wrapper()
3042 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_EQ_wrapper()
3063 struct mlx4_cmd_mailbox *inbox, in mlx4_CONFIG_DEV_wrapper() argument
3073 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_CONFIG_DEV_wrapper()
3104 struct mlx4_cmd_mailbox *inbox, in verify_qp_parameters() argument
3114 qp_ctx = inbox->buf + 8; in verify_qp_parameters()
3116 optpar = be32_to_cpu(*(__be32 *) inbox->buf); in verify_qp_parameters()
3184 struct mlx4_cmd_mailbox *inbox, in mlx4_WRITE_MTT_wrapper() argument
3189 __be64 *page_list = inbox->buf; in mlx4_WRITE_MTT_wrapper()
3222 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_EQ_wrapper() argument
3239 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_EQ_wrapper()
3328 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_EQ_wrapper() argument
3346 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_EQ_wrapper()
3355 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_CQ_wrapper() argument
3361 struct mlx4_cq_context *cqc = inbox->buf; in mlx4_SW2HW_CQ_wrapper()
3375 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_CQ_wrapper()
3393 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_CQ_wrapper() argument
3404 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_CQ_wrapper()
3418 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_CQ_wrapper() argument
3433 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_CQ_wrapper()
3442 struct mlx4_cmd_mailbox *inbox, in handle_resize() argument
3450 struct mlx4_cq_context *cqc = inbox->buf; in handle_resize()
3469 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in handle_resize()
3490 struct mlx4_cmd_mailbox *inbox, in mlx4_MODIFY_CQ_wrapper() argument
3506 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); in mlx4_MODIFY_CQ_wrapper()
3510 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_MODIFY_CQ_wrapper()
3531 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_SRQ_wrapper() argument
3539 struct mlx4_srq_context *srqc = inbox->buf; in mlx4_SW2HW_SRQ_wrapper()
3556 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_SRQ_wrapper()
3576 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_SRQ_wrapper() argument
3587 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_SRQ_wrapper()
3605 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_SRQ_wrapper() argument
3620 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_SRQ_wrapper()
3628 struct mlx4_cmd_mailbox *inbox, in mlx4_ARM_SRQ_wrapper() argument
3645 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_ARM_SRQ_wrapper()
3653 struct mlx4_cmd_mailbox *inbox, in mlx4_GEN_QP_wrapper() argument
3669 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_GEN_QP_wrapper()
3677 struct mlx4_cmd_mailbox *inbox, in mlx4_INIT2INIT_QP_wrapper() argument
3681 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_INIT2INIT_QP_wrapper()
3683 update_pkey_index(dev, slave, inbox); in mlx4_INIT2INIT_QP_wrapper()
3684 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2INIT_QP_wrapper()
3689 struct mlx4_cmd_mailbox *inbox) in adjust_qp_sched_queue() argument
3691 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); in adjust_qp_sched_queue()
3722 struct mlx4_cmd_mailbox *inbox) in roce_verify_mac() argument
3727 u8 sched = *(u8 *)(inbox->buf + 64); in roce_verify_mac()
3741 struct mlx4_cmd_mailbox *inbox, in mlx4_INIT2RTR_QP_wrapper() argument
3746 struct mlx4_qp_context *qpc = inbox->buf + 8; in mlx4_INIT2RTR_QP_wrapper()
3757 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); in mlx4_INIT2RTR_QP_wrapper()
3760 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); in mlx4_INIT2RTR_QP_wrapper()
3764 if (roce_verify_mac(dev, slave, qpc, inbox)) in mlx4_INIT2RTR_QP_wrapper()
3767 update_pkey_index(dev, slave, inbox); in mlx4_INIT2RTR_QP_wrapper()
3768 update_gid(dev, inbox, (u8)slave); in mlx4_INIT2RTR_QP_wrapper()
3780 err = update_vport_qp_param(dev, inbox, slave, qpn); in mlx4_INIT2RTR_QP_wrapper()
3784 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2RTR_QP_wrapper()
3805 struct mlx4_cmd_mailbox *inbox, in mlx4_RTR2RTS_QP_wrapper() argument
3810 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_RTR2RTS_QP_wrapper()
3812 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_RTR2RTS_QP_wrapper()
3815 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); in mlx4_RTR2RTS_QP_wrapper()
3819 update_pkey_index(dev, slave, inbox); in mlx4_RTR2RTS_QP_wrapper()
3820 update_gid(dev, inbox, (u8)slave); in mlx4_RTR2RTS_QP_wrapper()
3822 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTR2RTS_QP_wrapper()
3827 struct mlx4_cmd_mailbox *inbox, in mlx4_RTS2RTS_QP_wrapper() argument
3832 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_RTS2RTS_QP_wrapper()
3834 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_RTS2RTS_QP_wrapper()
3837 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); in mlx4_RTS2RTS_QP_wrapper()
3841 update_pkey_index(dev, slave, inbox); in mlx4_RTS2RTS_QP_wrapper()
3842 update_gid(dev, inbox, (u8)slave); in mlx4_RTS2RTS_QP_wrapper()
3844 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTS2RTS_QP_wrapper()
3850 struct mlx4_cmd_mailbox *inbox, in mlx4_SQERR2RTS_QP_wrapper() argument
3854 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQERR2RTS_QP_wrapper()
3855 int err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQERR2RTS_QP_wrapper()
3859 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQERR2RTS_QP_wrapper()
3864 struct mlx4_cmd_mailbox *inbox, in mlx4_SQD2SQD_QP_wrapper() argument
3869 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQD2SQD_QP_wrapper()
3871 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQD2SQD_QP_wrapper()
3874 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); in mlx4_SQD2SQD_QP_wrapper()
3879 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2SQD_QP_wrapper()
3880 update_pkey_index(dev, slave, inbox); in mlx4_SQD2SQD_QP_wrapper()
3881 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2SQD_QP_wrapper()
3886 struct mlx4_cmd_mailbox *inbox, in mlx4_SQD2RTS_QP_wrapper() argument
3891 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQD2RTS_QP_wrapper()
3893 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQD2RTS_QP_wrapper()
3896 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); in mlx4_SQD2RTS_QP_wrapper()
3901 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2RTS_QP_wrapper()
3902 update_pkey_index(dev, slave, inbox); in mlx4_SQD2RTS_QP_wrapper()
3903 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2RTS_QP_wrapper()
3908 struct mlx4_cmd_mailbox *inbox, in mlx4_2RST_QP_wrapper() argument
3919 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_2RST_QP_wrapper()
4061 struct mlx4_cmd_mailbox *inbox, in mlx4_QP_ATTACH_wrapper() argument
4066 u8 *gid = inbox->buf; in mlx4_QP_ATTACH_wrapper()
4163 struct mlx4_cmd_mailbox *inbox, in add_eth_header() argument
4175 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; in add_eth_header()
4223 struct mlx4_cmd_mailbox *inbox, in mlx4_UPDATE_QP_wrapper() argument
4236 cmd = (struct mlx4_update_qp_context *)inbox->buf; in mlx4_UPDATE_QP_wrapper()
4274 err = mlx4_cmd(dev, inbox->dma, in mlx4_UPDATE_QP_wrapper()
4290 struct mlx4_cmd_mailbox *inbox, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper() argument
4309 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4342 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4356 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4380 struct mlx4_cmd_mailbox *inbox, in mlx4_QP_FLOW_STEERING_DETACH_wrapper() argument
4423 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_IF_STAT_wrapper() argument
4434 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_IF_STAT_wrapper()