Lines Matching refs:bcnt
180 u32 key, u64 io_virt, size_t bcnt, in pagefault_single_data_segment() argument
209 (bcnt - pfault->mpfault.bytes_committed); in pagefault_single_data_segment()
231 bcnt -= pfault->mpfault.bytes_committed; in pagefault_single_data_segment()
237 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, in pagefault_single_data_segment()
266 *bytes_mapped += min_t(u32, new_mappings, bcnt); in pagefault_single_data_segment()
319 size_t bcnt; in pagefault_data_segments() local
338 bcnt = byte_count & ~MLX5_INLINE_SEG; in pagefault_data_segments()
341 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK; in pagefault_data_segments()
342 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, in pagefault_data_segments()
349 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY && in pagefault_data_segments()
354 *total_wqe_bytes += bcnt - min_t(size_t, bcnt, in pagefault_data_segments()
359 if (bcnt == 0) in pagefault_data_segments()
360 bcnt = 1U << 31; in pagefault_data_segments()
362 if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) { in pagefault_data_segments()
364 min_t(size_t, bcnt, in pagefault_data_segments()
370 bcnt, bytes_mapped); in pagefault_data_segments()