Lines Matching refs:pfault
162 struct mlx5_ib_pfault *pfault, in mlx5_ib_page_fault_resume() argument
166 pfault->mpfault.flags, in mlx5_ib_page_fault_resume()
186 struct mlx5_ib_pfault *pfault, in pagefault_single_data_segment() argument
216 (bcnt - pfault->mpfault.bytes_committed); in pagefault_single_data_segment()
237 io_virt += pfault->mpfault.bytes_committed; in pagefault_single_data_segment()
238 bcnt -= pfault->mpfault.bytes_committed; in pagefault_single_data_segment()
295 pfault->mpfault.bytes_committed = 0; in pagefault_single_data_segment()
318 struct mlx5_ib_pfault *pfault, void *wqe, in pagefault_data_segments() argument
362 pfault->mpfault.bytes_committed); in pagefault_data_segments()
369 if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) { in pagefault_data_segments()
370 pfault->mpfault.bytes_committed -= in pagefault_data_segments()
372 pfault->mpfault.bytes_committed); in pagefault_data_segments()
376 ret = pagefault_single_data_segment(qp, pfault, key, io_virt, in pagefault_data_segments()
391 struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, in mlx5_ib_mr_initiator_pfault_handler() argument
396 u16 wqe_index = pfault->mpfault.wqe.wqe_index; in mlx5_ib_mr_initiator_pfault_handler()
496 struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, in mlx5_ib_mr_responder_pfault_handler() argument
537 struct mlx5_ib_pfault *pfault) in mlx5_ib_mr_wqe_pfault_handler() argument
545 u16 wqe_index = pfault->mpfault.wqe.wqe_index; in mlx5_ib_mr_wqe_pfault_handler()
546 int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR; in mlx5_ib_mr_wqe_pfault_handler()
566 ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, in mlx5_ib_mr_wqe_pfault_handler()
569 ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, in mlx5_ib_mr_wqe_pfault_handler()
582 ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, in mlx5_ib_mr_wqe_pfault_handler()
594 mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); in mlx5_ib_mr_wqe_pfault_handler()
596 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); in mlx5_ib_mr_wqe_pfault_handler()
608 struct mlx5_ib_pfault *pfault) in mlx5_ib_mr_rdma_pfault_handler() argument
610 struct mlx5_pagefault *mpfault = &pfault->mpfault; in mlx5_ib_mr_rdma_pfault_handler()
645 ret = pagefault_single_data_segment(qp, pfault, rkey, address, length, in mlx5_ib_mr_rdma_pfault_handler()
651 mlx5_ib_page_fault_resume(qp, pfault, 1); in mlx5_ib_mr_rdma_pfault_handler()
655 mlx5_ib_page_fault_resume(qp, pfault, 0); in mlx5_ib_mr_rdma_pfault_handler()
676 struct mlx5_ib_pfault *pfault) in mlx5_ib_mr_pfault_handler() argument
678 u8 event_subtype = pfault->mpfault.event_subtype; in mlx5_ib_mr_pfault_handler()
682 mlx5_ib_mr_wqe_pfault_handler(qp, pfault); in mlx5_ib_mr_pfault_handler()
685 mlx5_ib_mr_rdma_pfault_handler(qp, pfault); in mlx5_ib_mr_pfault_handler()
690 mlx5_ib_page_fault_resume(qp, pfault, 1); in mlx5_ib_mr_pfault_handler()
697 struct mlx5_ib_pfault *pfault = container_of(work, in mlx5_ib_qp_pfault_action() local
701 mlx5_ib_get_pagefault_context(&pfault->mpfault); in mlx5_ib_qp_pfault_action()
702 struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp, in mlx5_ib_qp_pfault_action()
704 mlx5_ib_mr_pfault_handler(qp, pfault); in mlx5_ib_qp_pfault_action()
733 struct mlx5_pagefault *pfault) in mlx5_ib_pfault_handler() argument
744 mlx5_ib_get_pagefault_context(pfault); in mlx5_ib_pfault_handler()
747 qp_pfault->mpfault = *pfault; in mlx5_ib_pfault_handler()