llq               184 drivers/iommu/arm-smmu-v3.c #define Q_IDX(llq, p)			((p) & ((1 << (llq)->max_n_shift) - 1))
llq               185 drivers/iommu/arm-smmu-v3.c #define Q_WRP(llq, p)			((p) & (1 << (llq)->max_n_shift))
llq               189 drivers/iommu/arm-smmu-v3.c 					 Q_IDX(&((q)->llq), p) *	\
llq               506 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_ll_queue	llq;
llq               754 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(q->llq.cons, q->cons_reg);
llq               768 drivers/iommu/arm-smmu-v3.c 	if (Q_OVF(prod) != Q_OVF(q->llq.prod))
llq               771 drivers/iommu/arm-smmu-v3.c 	q->llq.prod = prod;
llq               826 drivers/iommu/arm-smmu-v3.c 	if (queue_empty(&q->llq))
llq               829 drivers/iommu/arm-smmu-v3.c 	queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
llq               830 drivers/iommu/arm-smmu-v3.c 	queue_inc_cons(&q->llq);
llq               929 drivers/iommu/arm-smmu-v3.c 		ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
llq              1098 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_ll_queue llq = {
llq              1099 drivers/iommu/arm-smmu-v3.c 		.max_n_shift	= cmdq->q.llq.max_n_shift,
llq              1103 drivers/iommu/arm-smmu-v3.c 	ewidx = BIT_WORD(Q_IDX(&llq, eprod));
llq              1104 drivers/iommu/arm-smmu-v3.c 	ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
llq              1106 drivers/iommu/arm-smmu-v3.c 	while (llq.prod != eprod) {
llq              1111 drivers/iommu/arm-smmu-v3.c 		swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
llq              1112 drivers/iommu/arm-smmu-v3.c 		sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
llq              1132 drivers/iommu/arm-smmu-v3.c 			valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
llq              1136 drivers/iommu/arm-smmu-v3.c 		llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
llq              1156 drivers/iommu/arm-smmu-v3.c 					     struct arm_smmu_ll_queue *llq)
llq              1168 drivers/iommu/arm-smmu-v3.c 		WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
llq              1170 drivers/iommu/arm-smmu-v3.c 		llq->val = READ_ONCE(cmdq->q.llq.val);
llq              1176 drivers/iommu/arm-smmu-v3.c 		llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
llq              1177 drivers/iommu/arm-smmu-v3.c 		if (!queue_full(llq))
llq              1191 drivers/iommu/arm-smmu-v3.c 					  struct arm_smmu_ll_queue *llq)
llq              1196 drivers/iommu/arm-smmu-v3.c 	u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
llq              1206 drivers/iommu/arm-smmu-v3.c 	llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
llq              1215 drivers/iommu/arm-smmu-v3.c 					       struct arm_smmu_ll_queue *llq)
llq              1219 drivers/iommu/arm-smmu-v3.c 	u32 prod = llq->prod;
llq              1223 drivers/iommu/arm-smmu-v3.c 	llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
llq              1225 drivers/iommu/arm-smmu-v3.c 		if (queue_consumed(llq, prod))
llq              1258 drivers/iommu/arm-smmu-v3.c 		llq->cons = readl(cmdq->q.cons_reg);
llq              1265 drivers/iommu/arm-smmu-v3.c 					 struct arm_smmu_ll_queue *llq)
llq              1269 drivers/iommu/arm-smmu-v3.c 		return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
llq              1271 drivers/iommu/arm-smmu-v3.c 	return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
llq              1278 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_ll_queue llq = {
llq              1279 drivers/iommu/arm-smmu-v3.c 		.max_n_shift	= cmdq->q.llq.max_n_shift,
llq              1286 drivers/iommu/arm-smmu-v3.c 		prod = queue_inc_prod_n(&llq, i);
llq              1315 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_ll_queue llq = {
llq              1316 drivers/iommu/arm-smmu-v3.c 		.max_n_shift = cmdq->q.llq.max_n_shift,
llq              1317 drivers/iommu/arm-smmu-v3.c 	}, head = llq;
llq              1322 drivers/iommu/arm-smmu-v3.c 	llq.val = READ_ONCE(cmdq->q.llq.val);
llq              1326 drivers/iommu/arm-smmu-v3.c 		while (!queue_has_space(&llq, n + sync)) {
llq              1328 drivers/iommu/arm-smmu-v3.c 			if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
llq              1333 drivers/iommu/arm-smmu-v3.c 		head.cons = llq.cons;
llq              1334 drivers/iommu/arm-smmu-v3.c 		head.prod = queue_inc_prod_n(&llq, n + sync) |
llq              1337 drivers/iommu/arm-smmu-v3.c 		old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
llq              1338 drivers/iommu/arm-smmu-v3.c 		if (old == llq.val)
llq              1341 drivers/iommu/arm-smmu-v3.c 		llq.val = old;
llq              1343 drivers/iommu/arm-smmu-v3.c 	owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
llq              1345 drivers/iommu/arm-smmu-v3.c 	llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
llq              1351 drivers/iommu/arm-smmu-v3.c 	arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
llq              1353 drivers/iommu/arm-smmu-v3.c 		prod = queue_inc_prod_n(&llq, n);
llq              1368 drivers/iommu/arm-smmu-v3.c 	arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
llq              1373 drivers/iommu/arm-smmu-v3.c 		atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
llq              1377 drivers/iommu/arm-smmu-v3.c 						   &cmdq->q.llq.atomic.prod);
llq              1385 drivers/iommu/arm-smmu-v3.c 		arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
llq              1403 drivers/iommu/arm-smmu-v3.c 		llq.prod = queue_inc_prod_n(&llq, n);
llq              1404 drivers/iommu/arm-smmu-v3.c 		ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
llq              1408 drivers/iommu/arm-smmu-v3.c 					    llq.prod,
llq              1418 drivers/iommu/arm-smmu-v3.c 			WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
llq              1699 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_ll_queue *llq = &q->llq;
llq              1719 drivers/iommu/arm-smmu-v3.c 	} while (!queue_empty(llq));
llq              1722 drivers/iommu/arm-smmu-v3.c 	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
llq              1723 drivers/iommu/arm-smmu-v3.c 		    Q_IDX(llq, llq->cons);
llq              1769 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_ll_queue *llq = &q->llq;
llq              1778 drivers/iommu/arm-smmu-v3.c 	} while (!queue_empty(llq));
llq              1781 drivers/iommu/arm-smmu-v3.c 	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
llq              1782 drivers/iommu/arm-smmu-v3.c 		      Q_IDX(llq, llq->cons);
llq              2755 drivers/iommu/arm-smmu-v3.c 		qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
llq              2761 drivers/iommu/arm-smmu-v3.c 		q->llq.max_n_shift--;
llq              2773 drivers/iommu/arm-smmu-v3.c 			 1 << q->llq.max_n_shift, name);
llq              2782 drivers/iommu/arm-smmu-v3.c 	q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
llq              2784 drivers/iommu/arm-smmu-v3.c 	q->llq.prod = q->llq.cons = 0;
llq              2798 drivers/iommu/arm-smmu-v3.c 	unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
llq              3208 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
llq              3209 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
llq              3236 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(smmu->evtq.q.llq.prod,
llq              3238 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(smmu->evtq.q.llq.cons,
llq              3253 drivers/iommu/arm-smmu-v3.c 		writel_relaxed(smmu->priq.q.llq.prod,
llq              3255 drivers/iommu/arm-smmu-v3.c 		writel_relaxed(smmu->priq.q.llq.cons,
llq              3409 drivers/iommu/arm-smmu-v3.c 	smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
llq              3411 drivers/iommu/arm-smmu-v3.c 	if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
llq              3423 drivers/iommu/arm-smmu-v3.c 	smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
llq              3425 drivers/iommu/arm-smmu-v3.c 	smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
llq               902 drivers/net/ethernet/amazon/ena/ena_admin_defs.h 		struct ena_admin_feature_llq_desc llq;
llq               955 drivers/net/ethernet/amazon/ena/ena_admin_defs.h 		struct ena_admin_feature_llq_desc llq;
llq               616 drivers/net/ethernet/amazon/ena/ena_com.c 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
llq               617 drivers/net/ethernet/amazon/ena/ena_com.c 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
llq               618 drivers/net/ethernet/amazon/ena/ena_com.c 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
llq               619 drivers/net/ethernet/amazon/ena/ena_com.c 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
llq              1978 drivers/net/ethernet/amazon/ena/ena_com.c 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
llq              1979 drivers/net/ethernet/amazon/ena/ena_com.c 		       sizeof(get_resp.u.llq));
llq              1981 drivers/net/ethernet/amazon/ena/ena_com.c 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
llq               361 drivers/net/ethernet/amazon/ena/ena_com.h 	struct ena_admin_feature_llq_desc llq;
llq              3168 drivers/net/ethernet/amazon/ena/ena_netdev.c 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
llq              3186 drivers/net/ethernet/amazon/ena/ena_netdev.c 					   struct ena_admin_feature_llq_desc *llq,
llq              3203 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
llq              3358 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
llq              3374 drivers/net/ethernet/amazon/ena/ena_netdev.c 						  llq->max_llq_depth);
llq              3392 drivers/net/ethernet/amazon/ena/ena_netdev.c 						  llq->max_llq_depth);
llq              3492 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,