Lines Matching refs:cqe

282 						struct nvme_completion *cqe)  in special_completion()  argument
289 cqe->command_id, le16_to_cpup(&cqe->sq_id)); in special_completion()
295 cqe->command_id, le16_to_cpup(&cqe->sq_id)); in special_completion()
314 struct nvme_completion *cqe) in async_req_completion() argument
316 u32 result = le32_to_cpup(&cqe->result); in async_req_completion()
317 u16 status = le16_to_cpup(&cqe->status) >> 1; in async_req_completion()
334 struct nvme_completion *cqe) in abort_completion() argument
338 u16 status = le16_to_cpup(&cqe->status) >> 1; in abort_completion()
339 u32 result = le32_to_cpup(&cqe->result); in abort_completion()
348 struct nvme_completion *cqe) in async_completion() argument
351 cmdinfo->result = le32_to_cpup(&cqe->result); in async_completion()
352 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; in async_completion()
590 struct nvme_completion *cqe) in req_completion() argument
595 u16 status = le16_to_cpup(&cqe->status) >> 1; in req_completion()
624 u32 result = le32_to_cpup(&cqe->result); in req_completion()
957 struct nvme_completion cqe = nvmeq->cqes[head]; in __nvme_process_cq() local
958 if ((le16_to_cpu(cqe.status) & 1) != phase) in __nvme_process_cq()
960 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); in __nvme_process_cq()
965 if (tag && *tag == cqe.command_id) in __nvme_process_cq()
967 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn); in __nvme_process_cq()
968 fn(nvmeq, ctx, &cqe); in __nvme_process_cq()
1008 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; in nvme_irq_check() local
1009 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) in nvme_irq_check()
1351 struct nvme_completion cqe; in nvme_cancel_queue_ios() local
1362 cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); in nvme_cancel_queue_ios()
1364 cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); in nvme_cancel_queue_ios()
1370 fn(nvmeq, ctx, &cqe); in nvme_cancel_queue_ios()