comp_ctx           74 drivers/crypto/cavium/zip/zip_crypto.c 	struct zip_operation  *comp_ctx   = &zip_ctx->zip_comp;
comp_ctx           77 drivers/crypto/cavium/zip/zip_crypto.c 	zip_static_init_zip_ops(comp_ctx, lzs_flag);
comp_ctx           80 drivers/crypto/cavium/zip/zip_crypto.c 	comp_ctx->input  = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
comp_ctx           81 drivers/crypto/cavium/zip/zip_crypto.c 	if (!comp_ctx->input)
comp_ctx           84 drivers/crypto/cavium/zip/zip_crypto.c 	comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
comp_ctx           85 drivers/crypto/cavium/zip/zip_crypto.c 	if (!comp_ctx->output)
comp_ctx          102 drivers/crypto/cavium/zip/zip_crypto.c 	zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
comp_ctx          105 drivers/crypto/cavium/zip/zip_crypto.c 	zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
comp_ctx          112 drivers/crypto/cavium/zip/zip_crypto.c 	struct zip_operation  *comp_ctx   = &zip_ctx->zip_comp;
comp_ctx          115 drivers/crypto/cavium/zip/zip_crypto.c 	zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
comp_ctx          116 drivers/crypto/cavium/zip/zip_crypto.c 	zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
comp_ctx          281 drivers/infiniband/hw/efa/efa_com.c 					struct efa_comp_ctx *comp_ctx)
comp_ctx          283 drivers/infiniband/hw/efa/efa_com.c 	u16 cmd_id = comp_ctx->user_cqe->acq_common_descriptor.command &
comp_ctx          288 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->occupied = 0;
comp_ctx          297 drivers/infiniband/hw/efa/efa_com.c 	if (aq->comp_ctx[ctx_id].occupied && capture) {
comp_ctx          306 drivers/infiniband/hw/efa/efa_com.c 		aq->comp_ctx[ctx_id].occupied = 1;
comp_ctx          311 drivers/infiniband/hw/efa/efa_com.c 	return &aq->comp_ctx[ctx_id];
comp_ctx          321 drivers/infiniband/hw/efa/efa_com.c 	struct efa_comp_ctx *comp_ctx;
comp_ctx          341 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
comp_ctx          342 drivers/infiniband/hw/efa/efa_com.c 	if (!comp_ctx) {
comp_ctx          347 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->status = EFA_CMD_SUBMITTED;
comp_ctx          348 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->comp_size = comp_size_in_bytes;
comp_ctx          349 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->user_cqe = comp;
comp_ctx          350 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
comp_ctx          352 drivers/infiniband/hw/efa/efa_com.c 	reinit_completion(&comp_ctx->wait_event);
comp_ctx          367 drivers/infiniband/hw/efa/efa_com.c 	return comp_ctx;
comp_ctx          374 drivers/infiniband/hw/efa/efa_com.c 	struct efa_comp_ctx *comp_ctx;
comp_ctx          377 drivers/infiniband/hw/efa/efa_com.c 	aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
comp_ctx          379 drivers/infiniband/hw/efa/efa_com.c 	if (!aq->comp_ctx || !aq->comp_ctx_pool) {
comp_ctx          381 drivers/infiniband/hw/efa/efa_com.c 		devm_kfree(aq->dmadev, aq->comp_ctx);
comp_ctx          386 drivers/infiniband/hw/efa/efa_com.c 		comp_ctx = efa_com_get_comp_ctx(aq, i, false);
comp_ctx          387 drivers/infiniband/hw/efa/efa_com.c 		if (comp_ctx)
comp_ctx          388 drivers/infiniband/hw/efa/efa_com.c 			init_completion(&comp_ctx->wait_event);
comp_ctx          406 drivers/infiniband/hw/efa/efa_com.c 	struct efa_comp_ctx *comp_ctx;
comp_ctx          415 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
comp_ctx          418 drivers/infiniband/hw/efa/efa_com.c 	if (IS_ERR(comp_ctx))
comp_ctx          421 drivers/infiniband/hw/efa/efa_com.c 	return comp_ctx;
comp_ctx          427 drivers/infiniband/hw/efa/efa_com.c 	struct efa_comp_ctx *comp_ctx;
comp_ctx          433 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
comp_ctx          434 drivers/infiniband/hw/efa/efa_com.c 	if (!comp_ctx) {
comp_ctx          441 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->status = EFA_CMD_COMPLETED;
comp_ctx          442 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
comp_ctx          443 drivers/infiniband/hw/efa/efa_com.c 	if (comp_ctx->user_cqe)
comp_ctx          444 drivers/infiniband/hw/efa/efa_com.c 		memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
comp_ctx          447 drivers/infiniband/hw/efa/efa_com.c 		complete(&comp_ctx->wait_event);
comp_ctx          510 drivers/infiniband/hw/efa/efa_com.c static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
comp_ctx          524 drivers/infiniband/hw/efa/efa_com.c 		if (comp_ctx->status != EFA_CMD_SUBMITTED)
comp_ctx          542 drivers/infiniband/hw/efa/efa_com.c 	err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
comp_ctx          544 drivers/infiniband/hw/efa/efa_com.c 	efa_com_put_comp_ctx(aq, comp_ctx);
comp_ctx          548 drivers/infiniband/hw/efa/efa_com.c static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
comp_ctx          554 drivers/infiniband/hw/efa/efa_com.c 	wait_for_completion_timeout(&comp_ctx->wait_event,
comp_ctx          563 drivers/infiniband/hw/efa/efa_com.c 	if (comp_ctx->status == EFA_CMD_SUBMITTED) {
comp_ctx          570 drivers/infiniband/hw/efa/efa_com.c 		if (comp_ctx->status == EFA_CMD_COMPLETED)
comp_ctx          574 drivers/infiniband/hw/efa/efa_com.c 				efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx          575 drivers/infiniband/hw/efa/efa_com.c 				comp_ctx->cmd_opcode, comp_ctx->status,
comp_ctx          576 drivers/infiniband/hw/efa/efa_com.c 				comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
comp_ctx          581 drivers/infiniband/hw/efa/efa_com.c 				efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx          582 drivers/infiniband/hw/efa/efa_com.c 				comp_ctx->cmd_opcode, comp_ctx->status,
comp_ctx          583 drivers/infiniband/hw/efa/efa_com.c 				comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
comp_ctx          590 drivers/infiniband/hw/efa/efa_com.c 	err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
comp_ctx          592 drivers/infiniband/hw/efa/efa_com.c 	efa_com_put_comp_ctx(aq, comp_ctx);
comp_ctx          604 drivers/infiniband/hw/efa/efa_com.c static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
comp_ctx          608 drivers/infiniband/hw/efa/efa_com.c 		return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
comp_ctx          610 drivers/infiniband/hw/efa/efa_com.c 	return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
comp_ctx          632 drivers/infiniband/hw/efa/efa_com.c 	struct efa_comp_ctx *comp_ctx;
comp_ctx          643 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
comp_ctx          644 drivers/infiniband/hw/efa/efa_com.c 	if (IS_ERR(comp_ctx)) {
comp_ctx          649 drivers/infiniband/hw/efa/efa_com.c 			cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
comp_ctx          652 drivers/infiniband/hw/efa/efa_com.c 		return PTR_ERR(comp_ctx);
comp_ctx          655 drivers/infiniband/hw/efa/efa_com.c 	err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
comp_ctx          661 drivers/infiniband/hw/efa/efa_com.c 			cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
comp_ctx          684 drivers/infiniband/hw/efa/efa_com.c 	devm_kfree(edev->dmadev, aq->comp_ctx);
comp_ctx          802 drivers/infiniband/hw/efa/efa_com.c 	devm_kfree(edev->dmadev, aq->comp_ctx);
comp_ctx           61 drivers/infiniband/hw/efa/efa_com.h 	struct efa_comp_ctx *comp_ctx;
comp_ctx          232 drivers/infiniband/hw/i40iw/i40iw_puda.c 	u64 comp_ctx;
comp_ctx          260 drivers/infiniband/hw/i40iw/i40iw_puda.c 	get_64bit_val(cqe, 8, &comp_ctx);
comp_ctx          261 drivers/infiniband/hw/i40iw/i40iw_puda.c 	info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
comp_ctx          752 drivers/infiniband/hw/i40iw/i40iw_uk.c 	u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
comp_ctx          791 drivers/infiniband/hw/i40iw/i40iw_uk.c 	get_64bit_val(cqe, 8, &comp_ctx);
comp_ctx          796 drivers/infiniband/hw/i40iw/i40iw_uk.c 	qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
comp_ctx         1093 drivers/infiniband/hw/i40iw/i40iw_uk.c 	u64 qword3, comp_ctx;
comp_ctx         1110 drivers/infiniband/hw/i40iw/i40iw_uk.c 		get_64bit_val(cqe, 8, &comp_ctx);
comp_ctx         1111 drivers/infiniband/hw/i40iw/i40iw_uk.c 		if ((void *)(unsigned long)comp_ctx == queue)
comp_ctx          194 drivers/net/ethernet/amazon/ena/ena_com.c 				     struct ena_comp_ctx *comp_ctx)
comp_ctx          196 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->occupied = false;
comp_ctx          203 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(!queue->comp_ctx)) {
comp_ctx          214 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
comp_ctx          221 drivers/net/ethernet/amazon/ena/ena_com.c 		queue->comp_ctx[command_id].occupied = true;
comp_ctx          224 drivers/net/ethernet/amazon/ena/ena_com.c 	return &queue->comp_ctx[command_id];
comp_ctx          233 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_comp_ctx *comp_ctx;
comp_ctx          258 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
comp_ctx          259 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(!comp_ctx))
comp_ctx          262 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->status = ENA_CMD_SUBMITTED;
comp_ctx          263 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
comp_ctx          264 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->user_cqe = comp;
comp_ctx          265 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
comp_ctx          267 drivers/net/ethernet/amazon/ena/ena_com.c 	reinit_completion(&comp_ctx->wait_event);
comp_ctx          282 drivers/net/ethernet/amazon/ena/ena_com.c 	return comp_ctx;
comp_ctx          288 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_comp_ctx *comp_ctx;
comp_ctx          291 drivers/net/ethernet/amazon/ena/ena_com.c 	queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
comp_ctx          292 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(!queue->comp_ctx)) {
comp_ctx          298 drivers/net/ethernet/amazon/ena/ena_com.c 		comp_ctx = get_comp_ctxt(queue, i, false);
comp_ctx          299 drivers/net/ethernet/amazon/ena/ena_com.c 		if (comp_ctx)
comp_ctx          300 drivers/net/ethernet/amazon/ena/ena_com.c 			init_completion(&comp_ctx->wait_event);
comp_ctx          313 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_comp_ctx *comp_ctx;
comp_ctx          320 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
comp_ctx          324 drivers/net/ethernet/amazon/ena/ena_com.c 	if (IS_ERR(comp_ctx))
comp_ctx          328 drivers/net/ethernet/amazon/ena/ena_com.c 	return comp_ctx;
comp_ctx          461 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_comp_ctx *comp_ctx;
comp_ctx          467 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
comp_ctx          468 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(!comp_ctx)) {
comp_ctx          474 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->status = ENA_CMD_COMPLETED;
comp_ctx          475 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
comp_ctx          477 drivers/net/ethernet/amazon/ena/ena_com.c 	if (comp_ctx->user_cqe)
comp_ctx          478 drivers/net/ethernet/amazon/ena/ena_com.c 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
comp_ctx          481 drivers/net/ethernet/amazon/ena/ena_com.c 		complete(&comp_ctx->wait_event);
comp_ctx          546 drivers/net/ethernet/amazon/ena/ena_com.c static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
comp_ctx          560 drivers/net/ethernet/amazon/ena/ena_com.c 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
comp_ctx          578 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
comp_ctx          587 drivers/net/ethernet/amazon/ena/ena_com.c 	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
comp_ctx          588 drivers/net/ethernet/amazon/ena/ena_com.c 	     comp_ctx->status);
comp_ctx          590 drivers/net/ethernet/amazon/ena/ena_com.c 	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
comp_ctx          592 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctxt_release(admin_queue, comp_ctx);
comp_ctx          749 drivers/net/ethernet/amazon/ena/ena_com.c static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
comp_ctx          755 drivers/net/ethernet/amazon/ena/ena_com.c 	wait_for_completion_timeout(&comp_ctx->wait_event,
comp_ctx          764 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
comp_ctx          770 drivers/net/ethernet/amazon/ena/ena_com.c 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
comp_ctx          772 drivers/net/ethernet/amazon/ena/ena_com.c 			       comp_ctx->cmd_opcode,
comp_ctx          779 drivers/net/ethernet/amazon/ena/ena_com.c 			       comp_ctx->cmd_opcode, comp_ctx->status);
comp_ctx          792 drivers/net/ethernet/amazon/ena/ena_com.c 	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
comp_ctx          794 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctxt_release(admin_queue, comp_ctx);
comp_ctx          865 drivers/net/ethernet/amazon/ena/ena_com.c static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
comp_ctx          869 drivers/net/ethernet/amazon/ena/ena_com.c 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
comp_ctx          872 drivers/net/ethernet/amazon/ena/ena_com.c 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
comp_ctx         1333 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_comp_ctx *comp_ctx;
comp_ctx         1336 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp_ctx         1338 drivers/net/ethernet/amazon/ena/ena_com.c 	if (IS_ERR(comp_ctx)) {
comp_ctx         1339 drivers/net/ethernet/amazon/ena/ena_com.c 		if (comp_ctx == ERR_PTR(-ENODEV))
comp_ctx         1341 drivers/net/ethernet/amazon/ena/ena_com.c 				 PTR_ERR(comp_ctx));
comp_ctx         1344 drivers/net/ethernet/amazon/ena/ena_com.c 			       PTR_ERR(comp_ctx));
comp_ctx         1346 drivers/net/ethernet/amazon/ena/ena_com.c 		return PTR_ERR(comp_ctx);
comp_ctx         1349 drivers/net/ethernet/amazon/ena/ena_com.c 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
comp_ctx         1436 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_comp_ctx *comp_ctx;
comp_ctx         1439 drivers/net/ethernet/amazon/ena/ena_com.c 	if (!admin_queue->comp_ctx)
comp_ctx         1443 drivers/net/ethernet/amazon/ena/ena_com.c 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
comp_ctx         1444 drivers/net/ethernet/amazon/ena/ena_com.c 		if (unlikely(!comp_ctx))
comp_ctx         1447 drivers/net/ethernet/amazon/ena/ena_com.c 		comp_ctx->status = ENA_CMD_ABORTED;
comp_ctx         1449 drivers/net/ethernet/amazon/ena/ena_com.c 		complete(&comp_ctx->wait_event);
comp_ctx         1639 drivers/net/ethernet/amazon/ena/ena_com.c 	if (admin_queue->comp_ctx)
comp_ctx         1640 drivers/net/ethernet/amazon/ena/ena_com.c 		devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
comp_ctx         1641 drivers/net/ethernet/amazon/ena/ena_com.c 	admin_queue->comp_ctx = NULL;
comp_ctx          242 drivers/net/ethernet/amazon/ena/ena_com.h 	struct ena_comp_ctx *comp_ctx;