qidev              67 drivers/crypto/caam/caamalg_qi.c 	struct device *qidev;
qidev             856 drivers/crypto/caam/caamalg_qi.c 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
qidev             916 drivers/crypto/caam/caamalg_qi.c 	struct device *qidev;
qidev             923 drivers/crypto/caam/caamalg_qi.c 	qidev = caam_ctx->qidev;
qidev             926 drivers/crypto/caam/caamalg_qi.c 		ecode = caam_jr_strstatus(qidev, status);
qidev             929 drivers/crypto/caam/caamalg_qi.c 	aead_unmap(qidev, edesc, aead_req);
qidev             945 drivers/crypto/caam/caamalg_qi.c 	struct device *qidev = ctx->qidev;
qidev             966 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "could not allocate extended descriptor\n");
qidev             976 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
qidev             982 drivers/crypto/caam/caamalg_qi.c 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
qidev             985 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "unable to map source\n");
qidev             995 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
qidev            1003 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
qidev            1010 drivers/crypto/caam/caamalg_qi.c 			mapped_src_nents = dma_map_sg(qidev, req->src,
qidev            1013 drivers/crypto/caam/caamalg_qi.c 				dev_err(qidev, "unable to map source\n");
qidev            1022 drivers/crypto/caam/caamalg_qi.c 			mapped_dst_nents = dma_map_sg(qidev, req->dst,
qidev            1026 drivers/crypto/caam/caamalg_qi.c 				dev_err(qidev, "unable to map destination\n");
qidev            1027 drivers/crypto/caam/caamalg_qi.c 				dma_unmap_sg(qidev, req->src, src_nents,
qidev            1065 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qidev            1067 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
qidev            1079 drivers/crypto/caam/caamalg_qi.c 		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
qidev            1080 drivers/crypto/caam/caamalg_qi.c 		if (dma_mapping_error(qidev, iv_dma)) {
qidev            1081 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "unable to map IV\n");
qidev            1082 drivers/crypto/caam/caamalg_qi.c 			caam_unmap(qidev, req->src, req->dst, src_nents,
qidev            1097 drivers/crypto/caam/caamalg_qi.c 	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
qidev            1099 drivers/crypto/caam/caamalg_qi.c 	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
qidev            1100 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "unable to map assoclen\n");
qidev            1101 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
qidev            1119 drivers/crypto/caam/caamalg_qi.c 	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
qidev            1120 drivers/crypto/caam/caamalg_qi.c 	if (dma_mapping_error(qidev, qm_sg_dma)) {
qidev            1121 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "unable to map S/G table\n");
qidev            1122 drivers/crypto/caam/caamalg_qi.c 		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
qidev            1123 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
qidev            1174 drivers/crypto/caam/caamalg_qi.c 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
qidev            1178 drivers/crypto/caam/caamalg_qi.c 		aead_unmap(ctx->qidev, edesc, req);
qidev            1213 drivers/crypto/caam/caamalg_qi.c 	struct device *qidev = caam_ctx->qidev;
qidev            1217 drivers/crypto/caam/caamalg_qi.c 	dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
qidev            1222 drivers/crypto/caam/caamalg_qi.c 		ecode = caam_jr_strstatus(qidev, status);
qidev            1231 drivers/crypto/caam/caamalg_qi.c 	skcipher_unmap(qidev, edesc, req);
qidev            1251 drivers/crypto/caam/caamalg_qi.c 	struct device *qidev = ctx->qidev;
qidev            1269 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
qidev            1277 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
qidev            1282 drivers/crypto/caam/caamalg_qi.c 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
qidev            1285 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "unable to map source\n");
qidev            1289 drivers/crypto/caam/caamalg_qi.c 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
qidev            1292 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "unable to map destination\n");
qidev            1293 drivers/crypto/caam/caamalg_qi.c 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
qidev            1297 drivers/crypto/caam/caamalg_qi.c 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
qidev            1300 drivers/crypto/caam/caamalg_qi.c 			dev_err(qidev, "unable to map source\n");
qidev            1324 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qidev            1326 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
qidev            1334 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "could not allocate extended descriptor\n");
qidev            1335 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
qidev            1345 drivers/crypto/caam/caamalg_qi.c 	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
qidev            1346 drivers/crypto/caam/caamalg_qi.c 	if (dma_mapping_error(qidev, iv_dma)) {
qidev            1347 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "unable to map IV\n");
qidev            1348 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
qidev            1371 drivers/crypto/caam/caamalg_qi.c 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
qidev            1373 drivers/crypto/caam/caamalg_qi.c 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
qidev            1374 drivers/crypto/caam/caamalg_qi.c 		dev_err(qidev, "unable to map S/G table\n");
qidev            1375 drivers/crypto/caam/caamalg_qi.c 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
qidev            1416 drivers/crypto/caam/caamalg_qi.c 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
qidev            1420 drivers/crypto/caam/caamalg_qi.c 		skcipher_unmap(ctx->qidev, edesc, req);
qidev            2451 drivers/crypto/caam/caamalg_qi.c 	ctx->qidev = dev;
qidev             304 drivers/crypto/caam/error.c static int report_qi_status(struct device *qidev, const u32 status,
qidev             321 drivers/crypto/caam/error.c 	dev_err(qidev, "%08x: %s: %s%s\n",
qidev              18 drivers/crypto/caam/error.h #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
qidev             107 drivers/crypto/caam/qi.c int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
qidev             117 drivers/crypto/caam/qi.c 	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
qidev             119 drivers/crypto/caam/qi.c 	if (dma_mapping_error(qidev, addr)) {
qidev             120 drivers/crypto/caam/qi.c 		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
qidev             135 drivers/crypto/caam/qi.c 	dev_err(qidev, "qman_enqueue failed: %d\n", ret);
qidev             146 drivers/crypto/caam/qi.c 	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
qidev             147 drivers/crypto/caam/qi.c 	struct caam_drv_private *priv = dev_get_drvdata(qidev);
qidev             152 drivers/crypto/caam/qi.c 		dev_err(qidev, "Non-compound FD from CAAM\n");
qidev             158 drivers/crypto/caam/qi.c 		dev_err(qidev,
qidev             163 drivers/crypto/caam/qi.c 	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
qidev             172 drivers/crypto/caam/qi.c static struct qman_fq *create_caam_req_fq(struct device *qidev,
qidev             191 drivers/crypto/caam/qi.c 		dev_err(qidev, "Failed to create session req FQ\n");
qidev             207 drivers/crypto/caam/qi.c 		dev_err(qidev, "Failed to init session req FQ\n");
qidev             211 drivers/crypto/caam/qi.c 	dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
qidev             222 drivers/crypto/caam/qi.c static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
qidev             231 drivers/crypto/caam/qi.c 		dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
qidev             245 drivers/crypto/caam/qi.c static int kill_fq(struct device *qidev, struct qman_fq *fq)
qidev             252 drivers/crypto/caam/qi.c 		dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
qidev             272 drivers/crypto/caam/qi.c 		ret = empty_retired_fq(qidev, fq);
qidev             274 drivers/crypto/caam/qi.c 			dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
qidev             282 drivers/crypto/caam/qi.c 		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
qidev             320 drivers/crypto/caam/qi.c 	struct device *qidev = drv_ctx->qidev;
qidev             324 drivers/crypto/caam/qi.c 		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
qidev             332 drivers/crypto/caam/qi.c 	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
qidev             335 drivers/crypto/caam/qi.c 		dev_err(qidev, "FQ allocation for shdesc update failed\n");
qidev             345 drivers/crypto/caam/qi.c 		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
qidev             350 drivers/crypto/caam/qi.c 		if (kill_fq(qidev, new_fq))
qidev             351 drivers/crypto/caam/qi.c 			dev_warn(qidev, "New CAAM FQ kill failed\n");
qidev             364 drivers/crypto/caam/qi.c 	dma_sync_single_for_device(qidev, drv_ctx->context_a,
qidev             372 drivers/crypto/caam/qi.c 		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
qidev             381 drivers/crypto/caam/qi.c 		if (kill_fq(qidev, new_fq))
qidev             382 drivers/crypto/caam/qi.c 			dev_warn(qidev, "New CAAM FQ kill failed\n");
qidev             383 drivers/crypto/caam/qi.c 	} else if (kill_fq(qidev, old_fq)) {
qidev             384 drivers/crypto/caam/qi.c 		dev_warn(qidev, "Old CAAM FQ kill failed\n");
qidev             391 drivers/crypto/caam/qi.c struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
qidev             403 drivers/crypto/caam/qi.c 		dev_err(qidev, "Invalid descriptor len: %d words\n",
qidev             421 drivers/crypto/caam/qi.c 	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
qidev             423 drivers/crypto/caam/qi.c 	if (dma_mapping_error(qidev, hwdesc)) {
qidev             424 drivers/crypto/caam/qi.c 		dev_err(qidev, "DMA map error for preheader + shdesc\n");
qidev             447 drivers/crypto/caam/qi.c 	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
qidev             450 drivers/crypto/caam/qi.c 		dev_err(qidev, "create_caam_req_fq failed\n");
qidev             451 drivers/crypto/caam/qi.c 		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
qidev             456 drivers/crypto/caam/qi.c 	drv_ctx->qidev = qidev;
qidev             493 drivers/crypto/caam/qi.c 	if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
qidev             494 drivers/crypto/caam/qi.c 		dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
qidev             496 drivers/crypto/caam/qi.c 	dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
qidev             503 drivers/crypto/caam/qi.c void caam_qi_shutdown(struct device *qidev)
qidev             516 drivers/crypto/caam/qi.c 		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
qidev             517 drivers/crypto/caam/qi.c 			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
qidev             565 drivers/crypto/caam/qi.c 	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
qidev             566 drivers/crypto/caam/qi.c 	struct caam_drv_private *priv = dev_get_drvdata(qidev);
qidev             580 drivers/crypto/caam/qi.c 			dev_err_ratelimited(qidev,
qidev             586 drivers/crypto/caam/qi.c 		dev_err(qidev, "Non-compound FD from CAAM\n");
qidev             592 drivers/crypto/caam/qi.c 		dev_err(qidev,
qidev             597 drivers/crypto/caam/qi.c 	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
qidev             604 drivers/crypto/caam/qi.c static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
qidev             619 drivers/crypto/caam/qi.c 		dev_err(qidev, "Rsp FQ create failed\n");
qidev             638 drivers/crypto/caam/qi.c 		dev_err(qidev, "Rsp FQ init failed\n");
qidev             645 drivers/crypto/caam/qi.c 	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
qidev             649 drivers/crypto/caam/qi.c static int init_cgr(struct device *qidev)
qidev             658 drivers/crypto/caam/qi.c 		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
qidev             672 drivers/crypto/caam/qi.c 		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
qidev             677 drivers/crypto/caam/qi.c 	dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
qidev             681 drivers/crypto/caam/qi.c static int alloc_rsp_fqs(struct device *qidev)
qidev             688 drivers/crypto/caam/qi.c 		ret = alloc_rsp_fq_cpu(qidev, i);
qidev             690 drivers/crypto/caam/qi.c 			dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
qidev             710 drivers/crypto/caam/qi.c 	struct device *ctrldev = &caam_pdev->dev, *qidev;
qidev             715 drivers/crypto/caam/qi.c 	qidev = ctrldev;
qidev             718 drivers/crypto/caam/qi.c 	err = init_cgr(qidev);
qidev             720 drivers/crypto/caam/qi.c 		dev_err(qidev, "CGR initialization failed: %d\n", err);
qidev             725 drivers/crypto/caam/qi.c 	err = alloc_rsp_fqs(qidev);
qidev             727 drivers/crypto/caam/qi.c 		dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
qidev             742 drivers/crypto/caam/qi.c 		net_dev->dev = *qidev;
qidev             754 drivers/crypto/caam/qi.c 		dev_err(qidev, "Can't allocate CAAM cache\n");
qidev             765 drivers/crypto/caam/qi.c 	dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
qidev              67 drivers/crypto/caam/qi.h 	struct device *qidev;
qidev             107 drivers/crypto/caam/qi.h struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
qidev             120 drivers/crypto/caam/qi.h int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);