1/* OMAP SSI port driver.
2 *
3 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
5 *
6 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/platform_device.h>
24#include <linux/dma-mapping.h>
25#include <linux/pm_runtime.h>
26
27#include <linux/of_gpio.h>
28#include <linux/debugfs.h>
29
30#include "omap_ssi_regs.h"
31#include "omap_ssi.h"
32
33static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
34{
35	return 0;
36}
37
38static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
39{
40	return 0;
41}
42
43static inline unsigned int ssi_wakein(struct hsi_port *port)
44{
45	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
46	return gpio_get_value(omap_port->wake_gpio);
47}
48
49#ifdef CONFIG_DEBUG_FS
50static void ssi_debug_remove_port(struct hsi_port *port)
51{
52	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
53
54	debugfs_remove_recursive(omap_port->dir);
55}
56
57static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
58{
59	struct hsi_port *port = m->private;
60	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
61	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
62	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
63	void __iomem	*base = omap_ssi->sys;
64	unsigned int ch;
65
66	pm_runtime_get_sync(omap_port->pdev);
67	if (omap_port->wake_irq > 0)
68		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
69	seq_printf(m, "WAKE\t\t: 0x%08x\n",
70				readl(base + SSI_WAKE_REG(port->num)));
71	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
72			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
73	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
74			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
75	/* SST */
76	base = omap_port->sst_base;
77	seq_puts(m, "\nSST\n===\n");
78	seq_printf(m, "ID SST\t\t: 0x%08x\n",
79				readl(base + SSI_SST_ID_REG));
80	seq_printf(m, "MODE\t\t: 0x%08x\n",
81				readl(base + SSI_SST_MODE_REG));
82	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
83				readl(base + SSI_SST_FRAMESIZE_REG));
84	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
85				readl(base + SSI_SST_DIVISOR_REG));
86	seq_printf(m, "CHANNELS\t: 0x%08x\n",
87				readl(base + SSI_SST_CHANNELS_REG));
88	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
89				readl(base + SSI_SST_ARBMODE_REG));
90	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
91				readl(base + SSI_SST_TXSTATE_REG));
92	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
93				readl(base + SSI_SST_BUFSTATE_REG));
94	seq_printf(m, "BREAK\t\t: 0x%08x\n",
95				readl(base + SSI_SST_BREAK_REG));
96	for (ch = 0; ch < omap_port->channels; ch++) {
97		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
98				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
99	}
100	/* SSR */
101	base = omap_port->ssr_base;
102	seq_puts(m, "\nSSR\n===\n");
103	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
104				readl(base + SSI_SSR_ID_REG));
105	seq_printf(m, "MODE\t\t: 0x%08x\n",
106				readl(base + SSI_SSR_MODE_REG));
107	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
108				readl(base + SSI_SSR_FRAMESIZE_REG));
109	seq_printf(m, "CHANNELS\t: 0x%08x\n",
110				readl(base + SSI_SSR_CHANNELS_REG));
111	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
112				readl(base + SSI_SSR_TIMEOUT_REG));
113	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
114				readl(base + SSI_SSR_RXSTATE_REG));
115	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
116				readl(base + SSI_SSR_BUFSTATE_REG));
117	seq_printf(m, "BREAK\t\t: 0x%08x\n",
118				readl(base + SSI_SSR_BREAK_REG));
119	seq_printf(m, "ERROR\t\t: 0x%08x\n",
120				readl(base + SSI_SSR_ERROR_REG));
121	seq_printf(m, "ERRORACK\t: 0x%08x\n",
122				readl(base + SSI_SSR_ERRORACK_REG));
123	for (ch = 0; ch < omap_port->channels; ch++) {
124		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
125				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
126	}
127	pm_runtime_put_sync(omap_port->pdev);
128
129	return 0;
130}
131
132static int ssi_port_regs_open(struct inode *inode, struct file *file)
133{
134	return single_open(file, ssi_debug_port_show, inode->i_private);
135}
136
137static const struct file_operations ssi_port_regs_fops = {
138	.open		= ssi_port_regs_open,
139	.read		= seq_read,
140	.llseek		= seq_lseek,
141	.release	= single_release,
142};
143
144static int ssi_div_get(void *data, u64 *val)
145{
146	struct hsi_port *port = data;
147	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
148
149	pm_runtime_get_sync(omap_port->pdev);
150	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
151	pm_runtime_put_sync(omap_port->pdev);
152
153	return 0;
154}
155
156static int ssi_div_set(void *data, u64 val)
157{
158	struct hsi_port *port = data;
159	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
160
161	if (val > 127)
162		return -EINVAL;
163
164	pm_runtime_get_sync(omap_port->pdev);
165	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
166	omap_port->sst.divisor = val;
167	pm_runtime_put_sync(omap_port->pdev);
168
169	return 0;
170}
171
172DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
173
174static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
175				     struct dentry *dir)
176{
177	struct hsi_port *port = to_hsi_port(omap_port->dev);
178
179	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
180	if (!dir)
181		return -ENOMEM;
182	omap_port->dir = dir;
183	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
184	dir = debugfs_create_dir("sst", dir);
185	if (!dir)
186		return -ENOMEM;
187	debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
188			    &ssi_sst_div_fops);
189
190	return 0;
191}
192#endif
193
194static int ssi_claim_lch(struct hsi_msg *msg)
195{
196
197	struct hsi_port *port = hsi_get_port(msg->cl);
198	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
199	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
200	int lch;
201
202	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
203		if (!omap_ssi->gdd_trn[lch].msg) {
204			omap_ssi->gdd_trn[lch].msg = msg;
205			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
206			return lch;
207		}
208
209	return -EBUSY;
210}
211
212static int ssi_start_dma(struct hsi_msg *msg, int lch)
213{
214	struct hsi_port *port = hsi_get_port(msg->cl);
215	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
216	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
217	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
218	void __iomem *gdd = omap_ssi->gdd;
219	int err;
220	u16 csdp;
221	u16 ccr;
222	u32 s_addr;
223	u32 d_addr;
224	u32 tmp;
225
226	if (msg->ttype == HSI_MSG_READ) {
227		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
228							DMA_FROM_DEVICE);
229		if (err < 0) {
230			dev_dbg(&ssi->device, "DMA map SG failed !\n");
231			return err;
232		}
233		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
234			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
235			SSI_DATA_TYPE_S32;
236		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
237		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
238			SSI_CCR_ENABLE;
239		s_addr = omap_port->ssr_dma +
240					SSI_SSR_BUFFER_CH_REG(msg->channel);
241		d_addr = sg_dma_address(msg->sgt.sgl);
242	} else {
243		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
244							DMA_TO_DEVICE);
245		if (err < 0) {
246			dev_dbg(&ssi->device, "DMA map SG failed !\n");
247			return err;
248		}
249		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
250			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
251			SSI_DATA_TYPE_S32;
252		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
253		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
254			SSI_CCR_ENABLE;
255		s_addr = sg_dma_address(msg->sgt.sgl);
256		d_addr = omap_port->sst_dma +
257					SSI_SST_BUFFER_CH_REG(msg->channel);
258	}
259	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
260		lch, csdp, ccr, s_addr, d_addr);
261
262	/* Hold clocks during the transfer */
263	pm_runtime_get_sync(omap_port->pdev);
264
265	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
266	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
267	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
268	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
269	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
270						gdd + SSI_GDD_CEN_REG(lch));
271
272	spin_lock_bh(&omap_ssi->lock);
273	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
274	tmp |= SSI_GDD_LCH(lch);
275	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
276	spin_unlock_bh(&omap_ssi->lock);
277	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
278	msg->status = HSI_STATUS_PROCEEDING;
279
280	return 0;
281}
282
283static int ssi_start_pio(struct hsi_msg *msg)
284{
285	struct hsi_port *port = hsi_get_port(msg->cl);
286	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
287	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
288	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
289	u32 val;
290
291	pm_runtime_get_sync(omap_port->pdev);
292	if (msg->ttype == HSI_MSG_WRITE) {
293		val = SSI_DATAACCEPT(msg->channel);
294		/* Hold clocks for pio writes */
295		pm_runtime_get_sync(omap_port->pdev);
296	} else {
297		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
298	}
299	dev_dbg(&port->device, "Single %s transfer\n",
300						msg->ttype ? "write" : "read");
301	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
302	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
303	pm_runtime_put_sync(omap_port->pdev);
304	msg->actual_len = 0;
305	msg->status = HSI_STATUS_PROCEEDING;
306
307	return 0;
308}
309
310static int ssi_start_transfer(struct list_head *queue)
311{
312	struct hsi_msg *msg;
313	int lch = -1;
314
315	if (list_empty(queue))
316		return 0;
317	msg = list_first_entry(queue, struct hsi_msg, link);
318	if (msg->status != HSI_STATUS_QUEUED)
319		return 0;
320	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
321		lch = ssi_claim_lch(msg);
322	if (lch >= 0)
323		return ssi_start_dma(msg, lch);
324	else
325		return ssi_start_pio(msg);
326}
327
328static int ssi_async_break(struct hsi_msg *msg)
329{
330	struct hsi_port *port = hsi_get_port(msg->cl);
331	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
332	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
333	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
334	int err = 0;
335	u32 tmp;
336
337	pm_runtime_get_sync(omap_port->pdev);
338	if (msg->ttype == HSI_MSG_WRITE) {
339		if (omap_port->sst.mode != SSI_MODE_FRAME) {
340			err = -EINVAL;
341			goto out;
342		}
343		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
344		msg->status = HSI_STATUS_COMPLETED;
345		msg->complete(msg);
346	} else {
347		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
348			err = -EINVAL;
349			goto out;
350		}
351		spin_lock_bh(&omap_port->lock);
352		tmp = readl(omap_ssi->sys +
353					SSI_MPU_ENABLE_REG(port->num, 0));
354		writel(tmp | SSI_BREAKDETECTED,
355			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
356		msg->status = HSI_STATUS_PROCEEDING;
357		list_add_tail(&msg->link, &omap_port->brkqueue);
358		spin_unlock_bh(&omap_port->lock);
359	}
360out:
361	pm_runtime_put_sync(omap_port->pdev);
362
363	return err;
364}
365
366static int ssi_async(struct hsi_msg *msg)
367{
368	struct hsi_port *port = hsi_get_port(msg->cl);
369	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
370	struct list_head *queue;
371	int err = 0;
372
373	BUG_ON(!msg);
374
375	if (msg->sgt.nents > 1)
376		return -ENOSYS; /* TODO: Add sg support */
377
378	if (msg->break_frame)
379		return ssi_async_break(msg);
380
381	if (msg->ttype) {
382		BUG_ON(msg->channel >= omap_port->sst.channels);
383		queue = &omap_port->txqueue[msg->channel];
384	} else {
385		BUG_ON(msg->channel >= omap_port->ssr.channels);
386		queue = &omap_port->rxqueue[msg->channel];
387	}
388	msg->status = HSI_STATUS_QUEUED;
389	spin_lock_bh(&omap_port->lock);
390	list_add_tail(&msg->link, queue);
391	err = ssi_start_transfer(queue);
392	if (err < 0) {
393		list_del(&msg->link);
394		msg->status = HSI_STATUS_ERROR;
395	}
396	spin_unlock_bh(&omap_port->lock);
397	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
398				msg->status, msg->ttype, msg->channel);
399
400	return err;
401}
402
403static u32 ssi_calculate_div(struct hsi_controller *ssi)
404{
405	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
406	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
407
408	/* / 2 : SSI TX clock is always half of the SSI functional clock */
409	tx_fckrate >>= 1;
410	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
411	tx_fckrate--;
412	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
413		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
414		omap_ssi->max_speed);
415
416	return tx_fckrate / omap_ssi->max_speed;
417}
418
419static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
420{
421	struct list_head *node, *tmp;
422	struct hsi_msg *msg;
423
424	list_for_each_safe(node, tmp, queue) {
425		msg = list_entry(node, struct hsi_msg, link);
426		if ((cl) && (cl != msg->cl))
427			continue;
428		list_del(node);
429		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
430			msg->channel, msg, msg->sgt.sgl->length,
431					msg->ttype, msg->context);
432		if (msg->destructor)
433			msg->destructor(msg);
434		else
435			hsi_free_msg(msg);
436	}
437}
438
439static int ssi_setup(struct hsi_client *cl)
440{
441	struct hsi_port *port = to_hsi_port(cl->device.parent);
442	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
443	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
444	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
445	void __iomem *sst = omap_port->sst_base;
446	void __iomem *ssr = omap_port->ssr_base;
447	u32 div;
448	u32 val;
449	int err = 0;
450
451	pm_runtime_get_sync(omap_port->pdev);
452	spin_lock_bh(&omap_port->lock);
453	if (cl->tx_cfg.speed)
454		omap_ssi->max_speed = cl->tx_cfg.speed;
455	div = ssi_calculate_div(ssi);
456	if (div > SSI_MAX_DIVISOR) {
457		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
458						cl->tx_cfg.speed, div);
459		err = -EINVAL;
460		goto out;
461	}
462	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
463	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
464	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
465	/* Flush posted write */
466	val = readl(ssr + SSI_SSR_MODE_REG);
467	/* TX */
468	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
469	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
470	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
471	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
472	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
473	/* RX */
474	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
475	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
476	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
477	/* Cleanup the break queue if we leave FRAME mode */
478	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
479		(cl->rx_cfg.mode != SSI_MODE_FRAME))
480		ssi_flush_queue(&omap_port->brkqueue, cl);
481	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
482	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
483				  cl->tx_cfg.num_hw_channels);
484	/* Shadow registering for OFF mode */
485	/* SST */
486	omap_port->sst.divisor = div;
487	omap_port->sst.frame_size = 31;
488	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
489	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
490	omap_port->sst.mode = cl->tx_cfg.mode;
491	/* SSR */
492	omap_port->ssr.frame_size = 31;
493	omap_port->ssr.timeout = 0;
494	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
495	omap_port->ssr.mode = cl->rx_cfg.mode;
496out:
497	spin_unlock_bh(&omap_port->lock);
498	pm_runtime_put_sync(omap_port->pdev);
499
500	return err;
501}
502
503static int ssi_flush(struct hsi_client *cl)
504{
505	struct hsi_port *port = hsi_get_port(cl);
506	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
507	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
508	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
509	struct hsi_msg *msg;
510	void __iomem *sst = omap_port->sst_base;
511	void __iomem *ssr = omap_port->ssr_base;
512	unsigned int i;
513	u32 err;
514
515	pm_runtime_get_sync(omap_port->pdev);
516	spin_lock_bh(&omap_port->lock);
517	/* Stop all DMA transfers */
518	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
519		msg = omap_ssi->gdd_trn[i].msg;
520		if (!msg || (port != hsi_get_port(msg->cl)))
521			continue;
522		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
523		if (msg->ttype == HSI_MSG_READ)
524			pm_runtime_put_sync(omap_port->pdev);
525		omap_ssi->gdd_trn[i].msg = NULL;
526	}
527	/* Flush all SST buffers */
528	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
529	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
530	/* Flush all SSR buffers */
531	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
532	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
533	/* Flush all errors */
534	err = readl(ssr + SSI_SSR_ERROR_REG);
535	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
536	/* Flush break */
537	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
538	/* Clear interrupts */
539	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
540	writel_relaxed(0xffffff00,
541			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
542	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
543	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
544	/* Dequeue all pending requests */
545	for (i = 0; i < omap_port->channels; i++) {
546		/* Release write clocks */
547		if (!list_empty(&omap_port->txqueue[i]))
548			pm_runtime_put_sync(omap_port->pdev);
549		ssi_flush_queue(&omap_port->txqueue[i], NULL);
550		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
551	}
552	ssi_flush_queue(&omap_port->brkqueue, NULL);
553	spin_unlock_bh(&omap_port->lock);
554	pm_runtime_put_sync(omap_port->pdev);
555
556	return 0;
557}
558
559static int ssi_start_tx(struct hsi_client *cl)
560{
561	struct hsi_port *port = hsi_get_port(cl);
562	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
563	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
564	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
565
566	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
567
568	spin_lock_bh(&omap_port->wk_lock);
569	if (omap_port->wk_refcount++) {
570		spin_unlock_bh(&omap_port->wk_lock);
571		return 0;
572	}
573	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
574	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
575	spin_unlock_bh(&omap_port->wk_lock);
576
577	return 0;
578}
579
580static int ssi_stop_tx(struct hsi_client *cl)
581{
582	struct hsi_port *port = hsi_get_port(cl);
583	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
584	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
585	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
586
587	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
588
589	spin_lock_bh(&omap_port->wk_lock);
590	BUG_ON(!omap_port->wk_refcount);
591	if (--omap_port->wk_refcount) {
592		spin_unlock_bh(&omap_port->wk_lock);
593		return 0;
594	}
595	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
596	pm_runtime_put_sync(omap_port->pdev); /* Release clocks */
597	spin_unlock_bh(&omap_port->wk_lock);
598
599	return 0;
600}
601
602static void ssi_transfer(struct omap_ssi_port *omap_port,
603							struct list_head *queue)
604{
605	struct hsi_msg *msg;
606	int err = -1;
607
608	spin_lock_bh(&omap_port->lock);
609	while (err < 0) {
610		err = ssi_start_transfer(queue);
611		if (err < 0) {
612			msg = list_first_entry(queue, struct hsi_msg, link);
613			msg->status = HSI_STATUS_ERROR;
614			msg->actual_len = 0;
615			list_del(&msg->link);
616			spin_unlock_bh(&omap_port->lock);
617			msg->complete(msg);
618			spin_lock_bh(&omap_port->lock);
619		}
620	}
621	spin_unlock_bh(&omap_port->lock);
622}
623
624static void ssi_cleanup_queues(struct hsi_client *cl)
625{
626	struct hsi_port *port = hsi_get_port(cl);
627	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
628	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
629	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
630	struct hsi_msg *msg;
631	unsigned int i;
632	u32 rxbufstate = 0;
633	u32 txbufstate = 0;
634	u32 status = SSI_ERROROCCURED;
635	u32 tmp;
636
637	ssi_flush_queue(&omap_port->brkqueue, cl);
638	if (list_empty(&omap_port->brkqueue))
639		status |= SSI_BREAKDETECTED;
640
641	for (i = 0; i < omap_port->channels; i++) {
642		if (list_empty(&omap_port->txqueue[i]))
643			continue;
644		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
645									link);
646		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
647			txbufstate |= (1 << i);
648			status |= SSI_DATAACCEPT(i);
649			/* Release the clocks writes, also GDD ones */
650			pm_runtime_put_sync(omap_port->pdev);
651		}
652		ssi_flush_queue(&omap_port->txqueue[i], cl);
653	}
654	for (i = 0; i < omap_port->channels; i++) {
655		if (list_empty(&omap_port->rxqueue[i]))
656			continue;
657		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
658									link);
659		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
660			rxbufstate |= (1 << i);
661			status |= SSI_DATAAVAILABLE(i);
662		}
663		ssi_flush_queue(&omap_port->rxqueue[i], cl);
664		/* Check if we keep the error detection interrupt armed */
665		if (!list_empty(&omap_port->rxqueue[i]))
666			status &= ~SSI_ERROROCCURED;
667	}
668	/* Cleanup write buffers */
669	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
670	tmp &= ~txbufstate;
671	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
672	/* Cleanup read buffers */
673	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
674	tmp &= ~rxbufstate;
675	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
676	/* Disarm and ack pending interrupts */
677	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
678	tmp &= ~status;
679	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
680	writel_relaxed(status, omap_ssi->sys +
681		SSI_MPU_STATUS_REG(port->num, 0));
682}
683
684static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
685{
686	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
687	struct hsi_port *port = hsi_get_port(cl);
688	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
689	struct hsi_msg *msg;
690	unsigned int i;
691	u32 val = 0;
692	u32 tmp;
693
694	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
695		msg = omap_ssi->gdd_trn[i].msg;
696		if ((!msg) || (msg->cl != cl))
697			continue;
698		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
699		val |= (1 << i);
700		/*
701		 * Clock references for write will be handled in
702		 * ssi_cleanup_queues
703		 */
704		if (msg->ttype == HSI_MSG_READ)
705			pm_runtime_put_sync(omap_port->pdev);
706		omap_ssi->gdd_trn[i].msg = NULL;
707	}
708	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
709	tmp &= ~val;
710	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
711	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
712}
713
714static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
715{
716	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
717	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
718	/* OCP barrier */
719	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
720
721	return 0;
722}
723
724static int ssi_release(struct hsi_client *cl)
725{
726	struct hsi_port *port = hsi_get_port(cl);
727	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
728	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
729
730	spin_lock_bh(&omap_port->lock);
731	pm_runtime_get_sync(omap_port->pdev);
732	/* Stop all the pending DMA requests for that client */
733	ssi_cleanup_gdd(ssi, cl);
734	/* Now cleanup all the queues */
735	ssi_cleanup_queues(cl);
736	pm_runtime_put_sync(omap_port->pdev);
737	/* If it is the last client of the port, do extra checks and cleanup */
738	if (port->claimed <= 1) {
739		/*
740		 * Drop the clock reference for the incoming wake line
741		 * if it is still kept high by the other side.
742		 */
743		if (omap_port->wkin_cken) {
744			pm_runtime_put_sync(omap_port->pdev);
745			omap_port->wkin_cken = 0;
746		}
747		pm_runtime_get_sync(omap_port->pdev);
748		/* Stop any SSI TX/RX without a client */
749		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
750		omap_port->sst.mode = SSI_MODE_SLEEP;
751		omap_port->ssr.mode = SSI_MODE_SLEEP;
752		pm_runtime_put_sync(omap_port->pdev);
753		WARN_ON(omap_port->wk_refcount != 0);
754	}
755	spin_unlock_bh(&omap_port->lock);
756
757	return 0;
758}
759
760
761
762static void ssi_error(struct hsi_port *port)
763{
764	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
765	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
766	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
767	struct hsi_msg *msg;
768	unsigned int i;
769	u32 err;
770	u32 val;
771	u32 tmp;
772
773	/* ACK error */
774	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
775	dev_err(&port->device, "SSI error: 0x%02x\n", err);
776	if (!err) {
777		dev_dbg(&port->device, "spurious SSI error ignored!\n");
778		return;
779	}
780	spin_lock(&omap_ssi->lock);
781	/* Cancel all GDD read transfers */
782	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
783		msg = omap_ssi->gdd_trn[i].msg;
784		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
785			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
786			val |= (1 << i);
787			omap_ssi->gdd_trn[i].msg = NULL;
788		}
789	}
790	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
791	tmp &= ~val;
792	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
793	spin_unlock(&omap_ssi->lock);
794	/* Cancel all PIO read transfers */
795	spin_lock(&omap_port->lock);
796	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
797	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
798	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
799	/* ACK error */
800	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
801	writel_relaxed(SSI_ERROROCCURED,
802			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
803	/* Signal the error all current pending read requests */
804	for (i = 0; i < omap_port->channels; i++) {
805		if (list_empty(&omap_port->rxqueue[i]))
806			continue;
807		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
808									link);
809		list_del(&msg->link);
810		msg->status = HSI_STATUS_ERROR;
811		spin_unlock(&omap_port->lock);
812		msg->complete(msg);
813		/* Now restart queued reads if any */
814		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
815		spin_lock(&omap_port->lock);
816	}
817	spin_unlock(&omap_port->lock);
818}
819
820static void ssi_break_complete(struct hsi_port *port)
821{
822	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
823	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
824	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
825	struct hsi_msg *msg;
826	struct hsi_msg *tmp;
827	u32 val;
828
829	dev_dbg(&port->device, "HWBREAK received\n");
830
831	spin_lock(&omap_port->lock);
832	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
833	val &= ~SSI_BREAKDETECTED;
834	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
835	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
836	writel(SSI_BREAKDETECTED,
837			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
838	spin_unlock(&omap_port->lock);
839
840	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
841		msg->status = HSI_STATUS_COMPLETED;
842		spin_lock(&omap_port->lock);
843		list_del(&msg->link);
844		spin_unlock(&omap_port->lock);
845		msg->complete(msg);
846	}
847
848}
849
850static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
851{
852	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
853	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
854	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
855	struct hsi_msg *msg;
856	u32 *buf;
857	u32 reg;
858	u32 val;
859
860	spin_lock(&omap_port->lock);
861	msg = list_first_entry(queue, struct hsi_msg, link);
862	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
863		msg->actual_len = 0;
864		msg->status = HSI_STATUS_PENDING;
865	}
866	if (msg->ttype == HSI_MSG_WRITE)
867		val = SSI_DATAACCEPT(msg->channel);
868	else
869		val = SSI_DATAAVAILABLE(msg->channel);
870	if (msg->status == HSI_STATUS_PROCEEDING) {
871		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
872		if (msg->ttype == HSI_MSG_WRITE)
873			writel(*buf, omap_port->sst_base +
874					SSI_SST_BUFFER_CH_REG(msg->channel));
875		 else
876			*buf = readl(omap_port->ssr_base +
877					SSI_SSR_BUFFER_CH_REG(msg->channel));
878		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
879							msg->ttype, *buf);
880		msg->actual_len += sizeof(*buf);
881		if (msg->actual_len >= msg->sgt.sgl->length)
882			msg->status = HSI_STATUS_COMPLETED;
883		/*
884		 * Wait for the last written frame to be really sent before
885		 * we call the complete callback
886		 */
887		if ((msg->status == HSI_STATUS_PROCEEDING) ||
888				((msg->status == HSI_STATUS_COMPLETED) &&
889					(msg->ttype == HSI_MSG_WRITE))) {
890			writel(val, omap_ssi->sys +
891					SSI_MPU_STATUS_REG(port->num, 0));
892			spin_unlock(&omap_port->lock);
893
894			return;
895		}
896
897	}
898	/* Transfer completed at this point */
899	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
900	if (msg->ttype == HSI_MSG_WRITE) {
901		/* Release clocks for write transfer */
902		pm_runtime_put_sync(omap_port->pdev);
903	}
904	reg &= ~val;
905	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
906	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
907	list_del(&msg->link);
908	spin_unlock(&omap_port->lock);
909	msg->complete(msg);
910	ssi_transfer(omap_port, queue);
911}
912
913static void ssi_pio_tasklet(unsigned long ssi_port)
914{
915	struct hsi_port *port = (struct hsi_port *)ssi_port;
916	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
917	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
918	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
919	void __iomem *sys = omap_ssi->sys;
920	unsigned int ch;
921	u32 status_reg;
922
923	pm_runtime_get_sync(omap_port->pdev);
924	status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
925	status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
926
927	for (ch = 0; ch < omap_port->channels; ch++) {
928		if (status_reg & SSI_DATAACCEPT(ch))
929			ssi_pio_complete(port, &omap_port->txqueue[ch]);
930		if (status_reg & SSI_DATAAVAILABLE(ch))
931			ssi_pio_complete(port, &omap_port->rxqueue[ch]);
932	}
933	if (status_reg & SSI_BREAKDETECTED)
934		ssi_break_complete(port);
935	if (status_reg & SSI_ERROROCCURED)
936		ssi_error(port);
937
938	status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
939	status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
940	pm_runtime_put_sync(omap_port->pdev);
941
942	if (status_reg)
943		tasklet_hi_schedule(&omap_port->pio_tasklet);
944	else
945		enable_irq(omap_port->irq);
946}
947
948static irqreturn_t ssi_pio_isr(int irq, void *port)
949{
950	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
951
952	tasklet_hi_schedule(&omap_port->pio_tasklet);
953	disable_irq_nosync(irq);
954
955	return IRQ_HANDLED;
956}
957
958static void ssi_wake_tasklet(unsigned long ssi_port)
959{
960	struct hsi_port *port = (struct hsi_port *)ssi_port;
961	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
962	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
963	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
964
965	if (ssi_wakein(port)) {
966		/**
967		 * We can have a quick High-Low-High transition in the line.
968		 * In such a case if we have long interrupt latencies,
969		 * we can miss the low event or get twice a high event.
970		 * This workaround will avoid breaking the clock reference
971		 * count when such a situation ocurrs.
972		 */
973		spin_lock(&omap_port->lock);
974		if (!omap_port->wkin_cken) {
975			omap_port->wkin_cken = 1;
976			pm_runtime_get_sync(omap_port->pdev);
977		}
978		spin_unlock(&omap_port->lock);
979		dev_dbg(&ssi->device, "Wake in high\n");
980		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
981			writel(SSI_WAKE(0),
982				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
983		}
984		hsi_event(port, HSI_EVENT_START_RX);
985	} else {
986		dev_dbg(&ssi->device, "Wake in low\n");
987		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
988			writel(SSI_WAKE(0),
989				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
990		}
991		hsi_event(port, HSI_EVENT_STOP_RX);
992		spin_lock(&omap_port->lock);
993		if (omap_port->wkin_cken) {
994			pm_runtime_put_sync(omap_port->pdev);
995			omap_port->wkin_cken = 0;
996		}
997		spin_unlock(&omap_port->lock);
998	}
999}
1000
1001static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
1002{
1003	struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
1004
1005	tasklet_hi_schedule(&omap_port->wake_tasklet);
1006
1007	return IRQ_HANDLED;
1008}
1009
1010static int __init ssi_port_irq(struct hsi_port *port,
1011						struct platform_device *pd)
1012{
1013	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1014	int err;
1015
1016	err = platform_get_irq(pd, 0);
1017	if (err < 0) {
1018		dev_err(&port->device, "Port IRQ resource missing\n");
1019		return err;
1020	}
1021	omap_port->irq = err;
1022	tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
1023							(unsigned long)port);
1024	err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
1025						0, "mpu_irq0", port);
1026	if (err < 0)
1027		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1028							omap_port->irq, err);
1029	return err;
1030}
1031
1032static int __init ssi_wake_irq(struct hsi_port *port,
1033						struct platform_device *pd)
1034{
1035	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1036	int cawake_irq;
1037	int err;
1038
1039	if (omap_port->wake_gpio == -1) {
1040		omap_port->wake_irq = -1;
1041		return 0;
1042	}
1043
1044	cawake_irq = gpio_to_irq(omap_port->wake_gpio);
1045
1046	omap_port->wake_irq = cawake_irq;
1047	tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
1048							(unsigned long)port);
1049	err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr,
1050		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1051							"cawake", port);
1052	if (err < 0)
1053		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1054						cawake_irq, err);
1055	err = enable_irq_wake(cawake_irq);
1056	if (err < 0)
1057		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1058			cawake_irq, err);
1059
1060	return err;
1061}
1062
1063static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
1064{
1065	unsigned int ch;
1066
1067	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1068		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1069		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1070	}
1071	INIT_LIST_HEAD(&omap_port->brkqueue);
1072}
1073
1074static int __init ssi_port_get_iomem(struct platform_device *pd,
1075		const char *name, void __iomem **pbase, dma_addr_t *phy)
1076{
1077	struct hsi_port *port = platform_get_drvdata(pd);
1078	struct resource *mem;
1079	struct resource *ioarea;
1080	void __iomem *base;
1081
1082	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1083	if (!mem) {
1084		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1085		return -ENXIO;
1086	}
1087	ioarea = devm_request_mem_region(&port->device, mem->start,
1088					resource_size(mem), dev_name(&pd->dev));
1089	if (!ioarea) {
1090		dev_err(&pd->dev, "%s IO memory region request failed\n",
1091								mem->name);
1092		return -ENXIO;
1093	}
1094	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1095	if (!base) {
1096		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1097		return -ENXIO;
1098	}
1099	*pbase = base;
1100
1101	if (phy)
1102		*phy = mem->start;
1103
1104	return 0;
1105}
1106
1107static int __init ssi_port_probe(struct platform_device *pd)
1108{
1109	struct device_node *np = pd->dev.of_node;
1110	struct hsi_port *port;
1111	struct omap_ssi_port *omap_port;
1112	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1113	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1114	int cawake_gpio = 0;
1115	u32 port_id;
1116	int err;
1117
1118	dev_dbg(&pd->dev, "init ssi port...\n");
1119
1120	if (!try_module_get(ssi->owner)) {
1121		dev_err(&pd->dev, "could not increment parent module refcount\n");
1122		return -ENODEV;
1123	}
1124
1125	if (!ssi->port || !omap_ssi->port) {
1126		dev_err(&pd->dev, "ssi controller not initialized!\n");
1127		err = -ENODEV;
1128		goto error;
1129	}
1130
1131	/* get id of first uninitialized port in controller */
1132	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1133		port_id++)
1134		;
1135
1136	if (port_id >= ssi->num_ports) {
1137		dev_err(&pd->dev, "port id out of range!\n");
1138		err = -ENODEV;
1139		goto error;
1140	}
1141
1142	port = ssi->port[port_id];
1143
1144	if (!np) {
1145		dev_err(&pd->dev, "missing device tree data\n");
1146		err = -EINVAL;
1147		goto error;
1148	}
1149
1150	cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0);
1151	if (cawake_gpio < 0) {
1152		dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n",
1153			cawake_gpio);
1154		err = -ENODEV;
1155		goto error;
1156	}
1157
1158	err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN,
1159		"cawake");
1160	if (err) {
1161		dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n",
1162			err);
1163		err = -ENXIO;
1164		goto error;
1165	}
1166
1167	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1168	if (!omap_port) {
1169		err = -ENOMEM;
1170		goto error;
1171	}
1172	omap_port->wake_gpio = cawake_gpio;
1173	omap_port->pdev = &pd->dev;
1174	omap_port->port_id = port_id;
1175
1176	/* initialize HSI port */
1177	port->async	= ssi_async;
1178	port->setup	= ssi_setup;
1179	port->flush	= ssi_flush;
1180	port->start_tx	= ssi_start_tx;
1181	port->stop_tx	= ssi_stop_tx;
1182	port->release	= ssi_release;
1183	hsi_port_set_drvdata(port, omap_port);
1184	omap_ssi->port[port_id] = omap_port;
1185
1186	platform_set_drvdata(pd, port);
1187
1188	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1189		&omap_port->sst_dma);
1190	if (err < 0)
1191		goto error;
1192	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1193		&omap_port->ssr_dma);
1194	if (err < 0)
1195		goto error;
1196
1197	err = ssi_port_irq(port, pd);
1198	if (err < 0)
1199		goto error;
1200	err = ssi_wake_irq(port, pd);
1201	if (err < 0)
1202		goto error;
1203
1204	ssi_queues_init(omap_port);
1205	spin_lock_init(&omap_port->lock);
1206	spin_lock_init(&omap_port->wk_lock);
1207	omap_port->dev = &port->device;
1208
1209	pm_runtime_irq_safe(omap_port->pdev);
1210	pm_runtime_enable(omap_port->pdev);
1211
1212#ifdef CONFIG_DEBUG_FS
1213	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1214	if (err < 0) {
1215		pm_runtime_disable(omap_port->pdev);
1216		goto error;
1217	}
1218#endif
1219
1220	hsi_add_clients_from_dt(port, np);
1221
1222	dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n",
1223		port_id, cawake_gpio);
1224
1225	return 0;
1226
1227error:
1228	return err;
1229}
1230
1231static int __exit ssi_port_remove(struct platform_device *pd)
1232{
1233	struct hsi_port *port = platform_get_drvdata(pd);
1234	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1235	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1236	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1237
1238#ifdef CONFIG_DEBUG_FS
1239	ssi_debug_remove_port(port);
1240#endif
1241
1242	hsi_port_unregister_clients(port);
1243
1244	tasklet_kill(&omap_port->wake_tasklet);
1245	tasklet_kill(&omap_port->pio_tasklet);
1246
1247	port->async	= hsi_dummy_msg;
1248	port->setup	= hsi_dummy_cl;
1249	port->flush	= hsi_dummy_cl;
1250	port->start_tx	= hsi_dummy_cl;
1251	port->stop_tx	= hsi_dummy_cl;
1252	port->release	= hsi_dummy_cl;
1253
1254	omap_ssi->port[omap_port->port_id] = NULL;
1255	platform_set_drvdata(pd, NULL);
1256	module_put(ssi->owner);
1257	pm_runtime_disable(&pd->dev);
1258
1259	return 0;
1260}
1261
1262#ifdef CONFIG_PM
1263static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1264{
1265	struct hsi_port *port = to_hsi_port(omap_port->dev);
1266	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1267	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1268
1269	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1270					SSI_MPU_ENABLE_REG(port->num, 0));
1271
1272	return 0;
1273}
1274
1275static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1276{
1277	struct hsi_port *port = to_hsi_port(omap_port->dev);
1278	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1279	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1280	void __iomem	*base;
1281
1282	writel_relaxed(omap_port->sys_mpu_enable,
1283			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1284
1285	/* SST context */
1286	base = omap_port->sst_base;
1287	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1288	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1289	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1290
1291	/* SSR context */
1292	base = omap_port->ssr_base;
1293	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1294	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1295	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1296
1297	return 0;
1298}
1299
1300static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1301{
1302	u32 mode;
1303
1304	writel_relaxed(omap_port->sst.mode,
1305				omap_port->sst_base + SSI_SST_MODE_REG);
1306	writel_relaxed(omap_port->ssr.mode,
1307				omap_port->ssr_base + SSI_SSR_MODE_REG);
1308	/* OCP barrier */
1309	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1310
1311	return 0;
1312}
1313
1314static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1315{
1316	writel_relaxed(omap_port->sst.divisor,
1317				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1318
1319	return 0;
1320}
1321
1322static int omap_ssi_port_runtime_suspend(struct device *dev)
1323{
1324	struct hsi_port *port = dev_get_drvdata(dev);
1325	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1326	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1327	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1328
1329	dev_dbg(dev, "port runtime suspend!\n");
1330
1331	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1332	if (omap_ssi->get_loss)
1333		omap_port->loss_count =
1334				omap_ssi->get_loss(ssi->device.parent);
1335	ssi_save_port_ctx(omap_port);
1336
1337	return 0;
1338}
1339
1340static int omap_ssi_port_runtime_resume(struct device *dev)
1341{
1342	struct hsi_port *port = dev_get_drvdata(dev);
1343	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1344	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1345	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1346
1347	dev_dbg(dev, "port runtime resume!\n");
1348
1349	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1350				omap_ssi->get_loss(ssi->device.parent)))
1351		goto mode; /* We always need to restore the mode & TX divisor */
1352
1353	ssi_restore_port_ctx(omap_port);
1354
1355mode:
1356	ssi_restore_divisor(omap_port);
1357	ssi_restore_port_mode(omap_port);
1358
1359	return 0;
1360}
1361
1362static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1363	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1364		omap_ssi_port_runtime_resume, NULL)
1365};
1366
1367#define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1368#else
1369#define DEV_PM_OPS     NULL
1370#endif
1371
1372
1373#ifdef CONFIG_OF
1374static const struct of_device_id omap_ssi_port_of_match[] = {
1375	{ .compatible = "ti,omap3-ssi-port", },
1376	{},
1377};
1378MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1379#else
1380#define omap_ssi_port_of_match NULL
1381#endif
1382
1383static struct platform_driver ssi_port_pdriver = {
1384	.remove	= __exit_p(ssi_port_remove),
1385	.driver	= {
1386		.name	= "omap_ssi_port",
1387		.of_match_table = omap_ssi_port_of_match,
1388		.pm	= DEV_PM_OPS,
1389	},
1390};
1391
1392module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe);
1393
1394MODULE_ALIAS("platform:omap_ssi_port");
1395MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
1396MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
1397MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver");
1398MODULE_LICENSE("GPL v2");
1399