re_chan            89 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan            94 drivers/dma/fsl_raid.c 	re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
re_chan            96 drivers/dma/fsl_raid.c 	spin_lock_irqsave(&re_chan->desc_lock, flags);
re_chan            98 drivers/dma/fsl_raid.c 	list_add_tail(&desc->node, &re_chan->submit_q);
re_chan            99 drivers/dma/fsl_raid.c 	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
re_chan           107 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           112 drivers/dma/fsl_raid.c 	re_chan = container_of(chan, struct fsl_re_chan, chan);
re_chan           114 drivers/dma/fsl_raid.c 	spin_lock_irqsave(&re_chan->desc_lock, flags);
re_chan           116 drivers/dma/fsl_raid.c 		in_be32(&re_chan->jrregs->inbring_slot_avail));
re_chan           118 drivers/dma/fsl_raid.c 	list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
re_chan           122 drivers/dma/fsl_raid.c 		list_move_tail(&desc->node, &re_chan->active_q);
re_chan           124 drivers/dma/fsl_raid.c 		memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
re_chan           127 drivers/dma/fsl_raid.c 		re_chan->inb_count = (re_chan->inb_count + 1) &
re_chan           129 drivers/dma/fsl_raid.c 		out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
re_chan           132 drivers/dma/fsl_raid.c 	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
re_chan           142 drivers/dma/fsl_raid.c static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
re_chan           147 drivers/dma/fsl_raid.c 	spin_lock_irqsave(&re_chan->desc_lock, flags);
re_chan           148 drivers/dma/fsl_raid.c 	list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
re_chan           150 drivers/dma/fsl_raid.c 			list_move_tail(&desc->node, &re_chan->free_q);
re_chan           152 drivers/dma/fsl_raid.c 	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
re_chan           154 drivers/dma/fsl_raid.c 	fsl_re_issue_pending(&re_chan->chan);
re_chan           159 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           166 drivers/dma/fsl_raid.c 	re_chan = dev_get_drvdata((struct device *)data);
re_chan           168 drivers/dma/fsl_raid.c 	fsl_re_cleanup_descs(re_chan);
re_chan           170 drivers/dma/fsl_raid.c 	spin_lock_irqsave(&re_chan->desc_lock, flags);
re_chan           171 drivers/dma/fsl_raid.c 	count =	FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
re_chan           174 drivers/dma/fsl_raid.c 		hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
re_chan           175 drivers/dma/fsl_raid.c 		list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
re_chan           187 drivers/dma/fsl_raid.c 			list_move_tail(&desc->node, &re_chan->ack_q);
re_chan           189 drivers/dma/fsl_raid.c 			dev_err(re_chan->dev,
re_chan           193 drivers/dma/fsl_raid.c 		oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
re_chan           194 drivers/dma/fsl_raid.c 		re_chan->oub_count = oub_count;
re_chan           196 drivers/dma/fsl_raid.c 		out_be32(&re_chan->jrregs->oubring_job_rmvd,
re_chan           199 drivers/dma/fsl_raid.c 	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
re_chan           205 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           208 drivers/dma/fsl_raid.c 	re_chan = dev_get_drvdata((struct device *)data);
re_chan           210 drivers/dma/fsl_raid.c 	irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
re_chan           220 drivers/dma/fsl_raid.c 		status = in_be32(&re_chan->jrregs->jr_status);
re_chan           221 drivers/dma/fsl_raid.c 		dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
re_chan           226 drivers/dma/fsl_raid.c 	out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
re_chan           228 drivers/dma/fsl_raid.c 	tasklet_schedule(&re_chan->irqtask);
re_chan           251 drivers/dma/fsl_raid.c static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
re_chan           255 drivers/dma/fsl_raid.c 	desc->re_chan = re_chan;
re_chan           257 drivers/dma/fsl_raid.c 	dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
re_chan           272 drivers/dma/fsl_raid.c static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
re_chan           280 drivers/dma/fsl_raid.c 	fsl_re_cleanup_descs(re_chan);
re_chan           282 drivers/dma/fsl_raid.c 	spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
re_chan           283 drivers/dma/fsl_raid.c 	if (!list_empty(&re_chan->free_q)) {
re_chan           285 drivers/dma/fsl_raid.c 		desc = list_first_entry(&re_chan->free_q,
re_chan           291 drivers/dma/fsl_raid.c 	spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
re_chan           298 drivers/dma/fsl_raid.c 		cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
re_chan           305 drivers/dma/fsl_raid.c 		desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
re_chan           308 drivers/dma/fsl_raid.c 		spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
re_chan           309 drivers/dma/fsl_raid.c 		re_chan->alloc_count++;
re_chan           310 drivers/dma/fsl_raid.c 		spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
re_chan           321 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           330 drivers/dma/fsl_raid.c 	re_chan = container_of(chan, struct fsl_re_chan, chan);
re_chan           332 drivers/dma/fsl_raid.c 		dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
re_chan           337 drivers/dma/fsl_raid.c 	desc = fsl_re_chan_alloc_desc(re_chan, flags);
re_chan           408 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           417 drivers/dma/fsl_raid.c 	re_chan = container_of(chan, struct fsl_re_chan, chan);
re_chan           419 drivers/dma/fsl_raid.c 		dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
re_chan           461 drivers/dma/fsl_raid.c 	desc = fsl_re_chan_alloc_desc(re_chan, flags);
re_chan           510 drivers/dma/fsl_raid.c 			dev_err(re_chan->dev, "PQ tx continuation error!\n");
re_chan           530 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           537 drivers/dma/fsl_raid.c 	re_chan = container_of(chan, struct fsl_re_chan, chan);
re_chan           540 drivers/dma/fsl_raid.c 		dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
re_chan           545 drivers/dma/fsl_raid.c 	desc = fsl_re_chan_alloc_desc(re_chan, flags);
re_chan           575 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           581 drivers/dma/fsl_raid.c 	re_chan = container_of(chan, struct fsl_re_chan, chan);
re_chan           587 drivers/dma/fsl_raid.c 		cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
re_chan           595 drivers/dma/fsl_raid.c 		fsl_re_init_desc(re_chan, desc, cf, paddr);
re_chan           597 drivers/dma/fsl_raid.c 		list_add_tail(&desc->node, &re_chan->free_q);
re_chan           598 drivers/dma/fsl_raid.c 		re_chan->alloc_count++;
re_chan           600 drivers/dma/fsl_raid.c 	return re_chan->alloc_count;
re_chan           605 drivers/dma/fsl_raid.c 	struct fsl_re_chan *re_chan;
re_chan           608 drivers/dma/fsl_raid.c 	re_chan = container_of(chan, struct fsl_re_chan, chan);
re_chan           609 drivers/dma/fsl_raid.c 	while (re_chan->alloc_count--) {
re_chan           610 drivers/dma/fsl_raid.c 		desc = list_first_entry(&re_chan->free_q,
re_chan           615 drivers/dma/fsl_raid.c 		dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
re_chan           620 drivers/dma/fsl_raid.c 	if (!list_empty(&re_chan->free_q))
re_chan           621 drivers/dma/fsl_raid.c 		dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
re_chan           297 drivers/dma/fsl_raid.h 	struct fsl_re_chan *re_chan;