1#include <linux/device.h>
2#include <linux/dma-mapping.h>
3#include <linux/dmaengine.h>
4#include <linux/sizes.h>
5#include <linux/platform_device.h>
6#include <linux/of.h>
7
8#include "musb_core.h"
9
10#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
11
12#define EP_MODE_AUTOREQ_NONE		0
13#define EP_MODE_AUTOREQ_ALL_NEOP	1
14#define EP_MODE_AUTOREQ_ALWAYS		3
15
16#define EP_MODE_DMA_TRANSPARENT		0
17#define EP_MODE_DMA_RNDIS		1
18#define EP_MODE_DMA_GEN_RNDIS		3
19
20#define USB_CTRL_TX_MODE	0x70
21#define USB_CTRL_RX_MODE	0x74
22#define USB_CTRL_AUTOREQ	0xd0
23#define USB_TDOWN		0xd8
24
25struct cppi41_dma_channel {
26	struct dma_channel channel;
27	struct cppi41_dma_controller *controller;
28	struct musb_hw_ep *hw_ep;
29	struct dma_chan *dc;
30	dma_cookie_t cookie;
31	u8 port_num;
32	u8 is_tx;
33	u8 is_allocated;
34	u8 usb_toggle;
35
36	dma_addr_t buf_addr;
37	u32 total_len;
38	u32 prog_len;
39	u32 transferred;
40	u32 packet_sz;
41	struct list_head tx_check;
42	int tx_zlp;
43};
44
45#define MUSB_DMA_NUM_CHANNELS 15
46
47struct cppi41_dma_controller {
48	struct dma_controller controller;
49	struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
50	struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
51	struct musb *musb;
52	struct hrtimer early_tx;
53	struct list_head early_tx_list;
54	u32 rx_mode;
55	u32 tx_mode;
56	u32 auto_req;
57};
58
59static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
60{
61	u16 csr;
62	u8 toggle;
63
64	if (cppi41_channel->is_tx)
65		return;
66	if (!is_host_active(cppi41_channel->controller->musb))
67		return;
68
69	csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
70	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
71
72	cppi41_channel->usb_toggle = toggle;
73}
74
75static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
76{
77	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
78	struct musb *musb = hw_ep->musb;
79	u16 csr;
80	u8 toggle;
81
82	if (cppi41_channel->is_tx)
83		return;
84	if (!is_host_active(musb))
85		return;
86
87	musb_ep_select(musb->mregs, hw_ep->epnum);
88	csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
89	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
90
91	/*
92	 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
93	 * data toggle may reset from DATA1 to DATA0 during receiving data from
94	 * more than one endpoint.
95	 */
96	if (!toggle && toggle == cppi41_channel->usb_toggle) {
97		csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
98		musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
99		dev_dbg(cppi41_channel->controller->musb->controller,
100				"Restoring DATA1 toggle.\n");
101	}
102
103	cppi41_channel->usb_toggle = toggle;
104}
105
106static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
107{
108	u8		epnum = hw_ep->epnum;
109	struct musb	*musb = hw_ep->musb;
110	void __iomem	*epio = musb->endpoints[epnum].regs;
111	u16		csr;
112
113	musb_ep_select(musb->mregs, hw_ep->epnum);
114	csr = musb_readw(epio, MUSB_TXCSR);
115	if (csr & MUSB_TXCSR_TXPKTRDY)
116		return false;
117	return true;
118}
119
120static void cppi41_dma_callback(void *private_data);
121
122static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
123{
124	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
125	struct musb *musb = hw_ep->musb;
126	void __iomem *epio = hw_ep->regs;
127	u16 csr;
128
129	if (!cppi41_channel->prog_len ||
130	    (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
131
132		/* done, complete */
133		cppi41_channel->channel.actual_len =
134			cppi41_channel->transferred;
135		cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
136		cppi41_channel->channel.rx_packet_done = true;
137
138		/*
139		 * transmit ZLP using PIO mode for transfers which size is
140		 * multiple of EP packet size.
141		 */
142		if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
143					cppi41_channel->packet_sz) == 0) {
144			musb_ep_select(musb->mregs, hw_ep->epnum);
145			csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
146			musb_writew(epio, MUSB_TXCSR, csr);
147		}
148		musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
149	} else {
150		/* next iteration, reload */
151		struct dma_chan *dc = cppi41_channel->dc;
152		struct dma_async_tx_descriptor *dma_desc;
153		enum dma_transfer_direction direction;
154		u32 remain_bytes;
155
156		cppi41_channel->buf_addr += cppi41_channel->packet_sz;
157
158		remain_bytes = cppi41_channel->total_len;
159		remain_bytes -= cppi41_channel->transferred;
160		remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
161		cppi41_channel->prog_len = remain_bytes;
162
163		direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
164			: DMA_DEV_TO_MEM;
165		dma_desc = dmaengine_prep_slave_single(dc,
166				cppi41_channel->buf_addr,
167				remain_bytes,
168				direction,
169				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
170		if (WARN_ON(!dma_desc))
171			return;
172
173		dma_desc->callback = cppi41_dma_callback;
174		dma_desc->callback_param = &cppi41_channel->channel;
175		cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
176		dma_async_issue_pending(dc);
177
178		if (!cppi41_channel->is_tx) {
179			musb_ep_select(musb->mregs, hw_ep->epnum);
180			csr = musb_readw(epio, MUSB_RXCSR);
181			csr |= MUSB_RXCSR_H_REQPKT;
182			musb_writew(epio, MUSB_RXCSR, csr);
183		}
184	}
185}
186
187static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
188{
189	struct cppi41_dma_controller *controller;
190	struct cppi41_dma_channel *cppi41_channel, *n;
191	struct musb *musb;
192	unsigned long flags;
193	enum hrtimer_restart ret = HRTIMER_NORESTART;
194
195	controller = container_of(timer, struct cppi41_dma_controller,
196			early_tx);
197	musb = controller->musb;
198
199	spin_lock_irqsave(&musb->lock, flags);
200	list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
201			tx_check) {
202		bool empty;
203		struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
204
205		empty = musb_is_tx_fifo_empty(hw_ep);
206		if (empty) {
207			list_del_init(&cppi41_channel->tx_check);
208			cppi41_trans_done(cppi41_channel);
209		}
210	}
211
212	if (!list_empty(&controller->early_tx_list) &&
213	    !hrtimer_is_queued(&controller->early_tx)) {
214		ret = HRTIMER_RESTART;
215		hrtimer_forward_now(&controller->early_tx,
216				ktime_set(0, 20 * NSEC_PER_USEC));
217	}
218
219	spin_unlock_irqrestore(&musb->lock, flags);
220	return ret;
221}
222
223static void cppi41_dma_callback(void *private_data)
224{
225	struct dma_channel *channel = private_data;
226	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
227	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
228	struct cppi41_dma_controller *controller;
229	struct musb *musb = hw_ep->musb;
230	unsigned long flags;
231	struct dma_tx_state txstate;
232	u32 transferred;
233	int is_hs = 0;
234	bool empty;
235
236	spin_lock_irqsave(&musb->lock, flags);
237
238	dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
239			&txstate);
240	transferred = cppi41_channel->prog_len - txstate.residue;
241	cppi41_channel->transferred += transferred;
242
243	dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
244		hw_ep->epnum, cppi41_channel->transferred,
245		cppi41_channel->total_len);
246
247	update_rx_toggle(cppi41_channel);
248
249	if (cppi41_channel->transferred == cppi41_channel->total_len ||
250			transferred < cppi41_channel->packet_sz)
251		cppi41_channel->prog_len = 0;
252
253	if (cppi41_channel->is_tx)
254		empty = musb_is_tx_fifo_empty(hw_ep);
255
256	if (!cppi41_channel->is_tx || empty) {
257		cppi41_trans_done(cppi41_channel);
258		goto out;
259	}
260
261	/*
262	 * On AM335x it has been observed that the TX interrupt fires
263	 * too early that means the TXFIFO is not yet empty but the DMA
264	 * engine says that it is done with the transfer. We don't
265	 * receive a FIFO empty interrupt so the only thing we can do is
266	 * to poll for the bit. On HS it usually takes 2us, on FS around
267	 * 110us - 150us depending on the transfer size.
268	 * We spin on HS (no longer than than 25us and setup a timer on
269	 * FS to check for the bit and complete the transfer.
270	 */
271	controller = cppi41_channel->controller;
272
273	if (is_host_active(musb)) {
274		if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
275			is_hs = 1;
276	} else {
277		if (musb->g.speed == USB_SPEED_HIGH)
278			is_hs = 1;
279	}
280	if (is_hs) {
281		unsigned wait = 25;
282
283		do {
284			empty = musb_is_tx_fifo_empty(hw_ep);
285			if (empty) {
286				cppi41_trans_done(cppi41_channel);
287				goto out;
288			}
289			wait--;
290			if (!wait)
291				break;
292			cpu_relax();
293		} while (1);
294	}
295	list_add_tail(&cppi41_channel->tx_check,
296			&controller->early_tx_list);
297	if (!hrtimer_is_queued(&controller->early_tx)) {
298		unsigned long usecs = cppi41_channel->total_len / 10;
299
300		hrtimer_start_range_ns(&controller->early_tx,
301				ktime_set(0, usecs * NSEC_PER_USEC),
302				20 * NSEC_PER_USEC,
303				HRTIMER_MODE_REL);
304	}
305
306out:
307	spin_unlock_irqrestore(&musb->lock, flags);
308}
309
310static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
311{
312	unsigned shift;
313
314	shift = (ep - 1) * 2;
315	old &= ~(3 << shift);
316	old |= mode << shift;
317	return old;
318}
319
320static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
321		unsigned mode)
322{
323	struct cppi41_dma_controller *controller = cppi41_channel->controller;
324	u32 port;
325	u32 new_mode;
326	u32 old_mode;
327
328	if (cppi41_channel->is_tx)
329		old_mode = controller->tx_mode;
330	else
331		old_mode = controller->rx_mode;
332	port = cppi41_channel->port_num;
333	new_mode = update_ep_mode(port, mode, old_mode);
334
335	if (new_mode == old_mode)
336		return;
337	if (cppi41_channel->is_tx) {
338		controller->tx_mode = new_mode;
339		musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
340				new_mode);
341	} else {
342		controller->rx_mode = new_mode;
343		musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
344				new_mode);
345	}
346}
347
348static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
349		unsigned mode)
350{
351	struct cppi41_dma_controller *controller = cppi41_channel->controller;
352	u32 port;
353	u32 new_mode;
354	u32 old_mode;
355
356	old_mode = controller->auto_req;
357	port = cppi41_channel->port_num;
358	new_mode = update_ep_mode(port, mode, old_mode);
359
360	if (new_mode == old_mode)
361		return;
362	controller->auto_req = new_mode;
363	musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
364}
365
366static bool cppi41_configure_channel(struct dma_channel *channel,
367				u16 packet_sz, u8 mode,
368				dma_addr_t dma_addr, u32 len)
369{
370	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
371	struct dma_chan *dc = cppi41_channel->dc;
372	struct dma_async_tx_descriptor *dma_desc;
373	enum dma_transfer_direction direction;
374	struct musb *musb = cppi41_channel->controller->musb;
375	unsigned use_gen_rndis = 0;
376
377	dev_dbg(musb->controller,
378		"configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
379		cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
380		packet_sz, mode, (unsigned long long) dma_addr,
381		len, cppi41_channel->is_tx);
382
383	cppi41_channel->buf_addr = dma_addr;
384	cppi41_channel->total_len = len;
385	cppi41_channel->transferred = 0;
386	cppi41_channel->packet_sz = packet_sz;
387	cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
388
389	/*
390	 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
391	 * than max packet size at a time.
392	 */
393	if (cppi41_channel->is_tx)
394		use_gen_rndis = 1;
395
396	if (use_gen_rndis) {
397		/* RNDIS mode */
398		if (len > packet_sz) {
399			musb_writel(musb->ctrl_base,
400				RNDIS_REG(cppi41_channel->port_num), len);
401			/* gen rndis */
402			cppi41_set_dma_mode(cppi41_channel,
403					EP_MODE_DMA_GEN_RNDIS);
404
405			/* auto req */
406			cppi41_set_autoreq_mode(cppi41_channel,
407					EP_MODE_AUTOREQ_ALL_NEOP);
408		} else {
409			musb_writel(musb->ctrl_base,
410					RNDIS_REG(cppi41_channel->port_num), 0);
411			cppi41_set_dma_mode(cppi41_channel,
412					EP_MODE_DMA_TRANSPARENT);
413			cppi41_set_autoreq_mode(cppi41_channel,
414					EP_MODE_AUTOREQ_NONE);
415		}
416	} else {
417		/* fallback mode */
418		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
419		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
420		len = min_t(u32, packet_sz, len);
421	}
422	cppi41_channel->prog_len = len;
423	direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
424	dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
425			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
426	if (!dma_desc)
427		return false;
428
429	dma_desc->callback = cppi41_dma_callback;
430	dma_desc->callback_param = channel;
431	cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
432	cppi41_channel->channel.rx_packet_done = false;
433
434	save_rx_toggle(cppi41_channel);
435	dma_async_issue_pending(dc);
436	return true;
437}
438
439static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
440				struct musb_hw_ep *hw_ep, u8 is_tx)
441{
442	struct cppi41_dma_controller *controller = container_of(c,
443			struct cppi41_dma_controller, controller);
444	struct cppi41_dma_channel *cppi41_channel = NULL;
445	u8 ch_num = hw_ep->epnum - 1;
446
447	if (ch_num >= MUSB_DMA_NUM_CHANNELS)
448		return NULL;
449
450	if (is_tx)
451		cppi41_channel = &controller->tx_channel[ch_num];
452	else
453		cppi41_channel = &controller->rx_channel[ch_num];
454
455	if (!cppi41_channel->dc)
456		return NULL;
457
458	if (cppi41_channel->is_allocated)
459		return NULL;
460
461	cppi41_channel->hw_ep = hw_ep;
462	cppi41_channel->is_allocated = 1;
463
464	return &cppi41_channel->channel;
465}
466
467static void cppi41_dma_channel_release(struct dma_channel *channel)
468{
469	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
470
471	if (cppi41_channel->is_allocated) {
472		cppi41_channel->is_allocated = 0;
473		channel->status = MUSB_DMA_STATUS_FREE;
474		channel->actual_len = 0;
475	}
476}
477
478static int cppi41_dma_channel_program(struct dma_channel *channel,
479				u16 packet_sz, u8 mode,
480				dma_addr_t dma_addr, u32 len)
481{
482	int ret;
483	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
484	int hb_mult = 0;
485
486	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
487		channel->status == MUSB_DMA_STATUS_BUSY);
488
489	if (is_host_active(cppi41_channel->controller->musb)) {
490		if (cppi41_channel->is_tx)
491			hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
492		else
493			hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
494	}
495
496	channel->status = MUSB_DMA_STATUS_BUSY;
497	channel->actual_len = 0;
498
499	if (hb_mult)
500		packet_sz = hb_mult * (packet_sz & 0x7FF);
501
502	ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
503	if (!ret)
504		channel->status = MUSB_DMA_STATUS_FREE;
505
506	return ret;
507}
508
509static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
510		void *buf, u32 length)
511{
512	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
513	struct cppi41_dma_controller *controller = cppi41_channel->controller;
514	struct musb *musb = controller->musb;
515
516	if (is_host_active(musb)) {
517		WARN_ON(1);
518		return 1;
519	}
520	if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
521		return 0;
522	if (cppi41_channel->is_tx)
523		return 1;
524	/* AM335x Advisory 1.0.13. No workaround for device RX mode */
525	return 0;
526}
527
528static int cppi41_dma_channel_abort(struct dma_channel *channel)
529{
530	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
531	struct cppi41_dma_controller *controller = cppi41_channel->controller;
532	struct musb *musb = controller->musb;
533	void __iomem *epio = cppi41_channel->hw_ep->regs;
534	int tdbit;
535	int ret;
536	unsigned is_tx;
537	u16 csr;
538
539	is_tx = cppi41_channel->is_tx;
540	dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
541			cppi41_channel->port_num, is_tx);
542
543	if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
544		return 0;
545
546	list_del_init(&cppi41_channel->tx_check);
547	if (is_tx) {
548		csr = musb_readw(epio, MUSB_TXCSR);
549		csr &= ~MUSB_TXCSR_DMAENAB;
550		musb_writew(epio, MUSB_TXCSR, csr);
551	} else {
552		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
553
554		/* delay to drain to cppi dma pipeline for isoch */
555		udelay(250);
556
557		csr = musb_readw(epio, MUSB_RXCSR);
558		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
559		musb_writew(epio, MUSB_RXCSR, csr);
560
561		/* wait to drain cppi dma pipe line */
562		udelay(50);
563
564		csr = musb_readw(epio, MUSB_RXCSR);
565		if (csr & MUSB_RXCSR_RXPKTRDY) {
566			csr |= MUSB_RXCSR_FLUSHFIFO;
567			musb_writew(epio, MUSB_RXCSR, csr);
568			musb_writew(epio, MUSB_RXCSR, csr);
569		}
570	}
571
572	tdbit = 1 << cppi41_channel->port_num;
573	if (is_tx)
574		tdbit <<= 16;
575
576	do {
577		if (is_tx)
578			musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
579		ret = dmaengine_terminate_all(cppi41_channel->dc);
580	} while (ret == -EAGAIN);
581
582	if (is_tx) {
583		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
584
585		csr = musb_readw(epio, MUSB_TXCSR);
586		if (csr & MUSB_TXCSR_TXPKTRDY) {
587			csr |= MUSB_TXCSR_FLUSHFIFO;
588			musb_writew(epio, MUSB_TXCSR, csr);
589		}
590	}
591
592	cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
593	return 0;
594}
595
596static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
597{
598	struct dma_chan *dc;
599	int i;
600
601	for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
602		dc = ctrl->tx_channel[i].dc;
603		if (dc)
604			dma_release_channel(dc);
605		dc = ctrl->rx_channel[i].dc;
606		if (dc)
607			dma_release_channel(dc);
608	}
609}
610
611static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
612{
613	cppi41_release_all_dma_chans(controller);
614}
615
616static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
617{
618	struct musb *musb = controller->musb;
619	struct device *dev = musb->controller;
620	struct device_node *np = dev->parent->of_node;
621	struct cppi41_dma_channel *cppi41_channel;
622	int count;
623	int i;
624	int ret;
625
626	count = of_property_count_strings(np, "dma-names");
627	if (count < 0)
628		return count;
629
630	for (i = 0; i < count; i++) {
631		struct dma_chan *dc;
632		struct dma_channel *musb_dma;
633		const char *str;
634		unsigned is_tx;
635		unsigned int port;
636
637		ret = of_property_read_string_index(np, "dma-names", i, &str);
638		if (ret)
639			goto err;
640		if (strstarts(str, "tx"))
641			is_tx = 1;
642		else if (strstarts(str, "rx"))
643			is_tx = 0;
644		else {
645			dev_err(dev, "Wrong dmatype %s\n", str);
646			goto err;
647		}
648		ret = kstrtouint(str + 2, 0, &port);
649		if (ret)
650			goto err;
651
652		ret = -EINVAL;
653		if (port > MUSB_DMA_NUM_CHANNELS || !port)
654			goto err;
655		if (is_tx)
656			cppi41_channel = &controller->tx_channel[port - 1];
657		else
658			cppi41_channel = &controller->rx_channel[port - 1];
659
660		cppi41_channel->controller = controller;
661		cppi41_channel->port_num = port;
662		cppi41_channel->is_tx = is_tx;
663		INIT_LIST_HEAD(&cppi41_channel->tx_check);
664
665		musb_dma = &cppi41_channel->channel;
666		musb_dma->private_data = cppi41_channel;
667		musb_dma->status = MUSB_DMA_STATUS_FREE;
668		musb_dma->max_len = SZ_4M;
669
670		dc = dma_request_slave_channel(dev->parent, str);
671		if (!dc) {
672			dev_err(dev, "Failed to request %s.\n", str);
673			ret = -EPROBE_DEFER;
674			goto err;
675		}
676		cppi41_channel->dc = dc;
677	}
678	return 0;
679err:
680	cppi41_release_all_dma_chans(controller);
681	return ret;
682}
683
684void cppi41_dma_controller_destroy(struct dma_controller *c)
685{
686	struct cppi41_dma_controller *controller = container_of(c,
687			struct cppi41_dma_controller, controller);
688
689	hrtimer_cancel(&controller->early_tx);
690	cppi41_dma_controller_stop(controller);
691	kfree(controller);
692}
693EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
694
695struct dma_controller *
696cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
697{
698	struct cppi41_dma_controller *controller;
699	int ret = 0;
700
701	if (!musb->controller->parent->of_node) {
702		dev_err(musb->controller, "Need DT for the DMA engine.\n");
703		return NULL;
704	}
705
706	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
707	if (!controller)
708		goto kzalloc_fail;
709
710	hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
711	controller->early_tx.function = cppi41_recheck_tx_req;
712	INIT_LIST_HEAD(&controller->early_tx_list);
713	controller->musb = musb;
714
715	controller->controller.channel_alloc = cppi41_dma_channel_allocate;
716	controller->controller.channel_release = cppi41_dma_channel_release;
717	controller->controller.channel_program = cppi41_dma_channel_program;
718	controller->controller.channel_abort = cppi41_dma_channel_abort;
719	controller->controller.is_compatible = cppi41_is_compatible;
720
721	ret = cppi41_dma_controller_start(controller);
722	if (ret)
723		goto plat_get_fail;
724	return &controller->controller;
725
726plat_get_fail:
727	kfree(controller);
728kzalloc_fail:
729	if (ret == -EPROBE_DEFER)
730		return ERR_PTR(ret);
731	return NULL;
732}
733EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);
734