1/*
2 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
3 *
4 * Copyright (c) STMicroelectronics 2015
5 *
6 *   Author:Peter Bennett <peter.bennett@st.com>
7 *	    Peter Griffin <peter.griffin@linaro.org>
8 *
9 *	This program is free software; you can redistribute it and/or
10 *	modify it under the terms of the GNU General Public License as
11 *	published by the Free Software Foundation; either version 2 of
12 *	the License, or (at your option) any later version.
13 */
14#include <linux/atomic.h>
15#include <linux/clk.h>
16#include <linux/completion.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dvb/dmx.h>
21#include <linux/dvb/frontend.h>
22#include <linux/errno.h>
23#include <linux/firmware.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/of_gpio.h>
29#include <linux/of_platform.h>
30#include <linux/platform_device.h>
31#include <linux/usb.h>
32#include <linux/slab.h>
33#include <linux/time.h>
34#include <linux/version.h>
35#include <linux/wait.h>
36#include <linux/pinctrl/pinctrl.h>
37
38#include "c8sectpfe-core.h"
39#include "c8sectpfe-common.h"
40#include "c8sectpfe-debugfs.h"
41#include "dmxdev.h"
42#include "dvb_demux.h"
43#include "dvb_frontend.h"
44#include "dvb_net.h"
45
46#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
47MODULE_FIRMWARE(FIRMWARE_MEMDMA);
48
49#define PID_TABLE_SIZE 1024
50#define POLL_MSECS 50
51
52static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
53
54#define TS_PKT_SIZE 188
55#define HEADER_SIZE (4)
56#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
57
58#define FEI_ALIGNMENT (32)
59/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
60#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
61
62#define FIFO_LEN 1024
63
64static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
65{
66	struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
67	struct channel_info *channel;
68	int chan_num;
69
70	/* iterate through input block channels */
71	for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
72		channel = fei->channel_data[chan_num];
73
74		/* is this descriptor initialised and TP enabled */
75		if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
76			tasklet_schedule(&channel->tsklet);
77	}
78
79	fei->timer.expires = jiffies +	msecs_to_jiffies(POLL_MSECS);
80	add_timer(&fei->timer);
81}
82
83static void channel_swdemux_tsklet(unsigned long data)
84{
85	struct channel_info *channel = (struct channel_info *)data;
86	struct c8sectpfei *fei = channel->fei;
87	unsigned long wp, rp;
88	int pos, num_packets, n, size;
89	u8 *buf;
90
91	if (unlikely(!channel || !channel->irec))
92		return;
93
94	wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
95	rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
96
97	pos = rp - channel->back_buffer_busaddr;
98
99	/* has it wrapped */
100	if (wp < rp)
101		wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
102
103	size = wp - rp;
104	num_packets = size / PACKET_SIZE;
105
106	/* manage cache so data is visible to CPU */
107	dma_sync_single_for_cpu(fei->dev,
108				rp,
109				size,
110				DMA_FROM_DEVICE);
111
112	buf = (u8 *) channel->back_buffer_aligned;
113
114	dev_dbg(fei->dev,
115		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
116		"rp=0x%lx, wp=0x%lx\n",
117		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
118
119	for (n = 0; n < num_packets; n++) {
120		dvb_dmx_swfilter_packets(
121			&fei->c8sectpfe[0]->
122				demux[channel->demux_mapping].dvb_demux,
123			&buf[pos], 1);
124
125		pos += PACKET_SIZE;
126	}
127
128	/* advance the read pointer */
129	if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
130		writel(channel->back_buffer_busaddr, channel->irec +
131			DMA_PRDS_BUSRP_TP(0));
132	else
133		writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
134}
135
136static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
137{
138	struct dvb_demux *demux = dvbdmxfeed->demux;
139	struct stdemux *stdemux = (struct stdemux *)demux->priv;
140	struct c8sectpfei *fei = stdemux->c8sectpfei;
141	struct channel_info *channel;
142	u32 tmp;
143	unsigned long *bitmap;
144
145	switch (dvbdmxfeed->type) {
146	case DMX_TYPE_TS:
147		break;
148	case DMX_TYPE_SEC:
149		break;
150	default:
151		dev_err(fei->dev, "%s:%d Error bailing\n"
152			, __func__, __LINE__);
153		return -EINVAL;
154	}
155
156	if (dvbdmxfeed->type == DMX_TYPE_TS) {
157		switch (dvbdmxfeed->pes_type) {
158		case DMX_PES_VIDEO:
159		case DMX_PES_AUDIO:
160		case DMX_PES_TELETEXT:
161		case DMX_PES_PCR:
162		case DMX_PES_OTHER:
163			break;
164		default:
165			dev_err(fei->dev, "%s:%d Error bailing\n"
166				, __func__, __LINE__);
167			return -EINVAL;
168		}
169	}
170
171	if (!atomic_read(&fei->fw_loaded)) {
172		dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
173		return -EINVAL;
174	}
175
176	mutex_lock(&fei->lock);
177
178	channel = fei->channel_data[stdemux->tsin_index];
179
180	bitmap = (unsigned long *) channel->pid_buffer_aligned;
181
182	/* 8192 is a special PID */
183	if (dvbdmxfeed->pid == 8192) {
184		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
185		tmp &= ~C8SECTPFE_PID_ENABLE;
186		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
187
188	} else {
189		bitmap_set(bitmap, dvbdmxfeed->pid, 1);
190	}
191
192	/* manage cache so PID bitmap is visible to HW */
193	dma_sync_single_for_device(fei->dev,
194					channel->pid_buffer_busaddr,
195					PID_TABLE_SIZE,
196					DMA_TO_DEVICE);
197
198	channel->active = 1;
199
200	if (fei->global_feed_count == 0) {
201		fei->timer.expires = jiffies +
202			msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
203
204		add_timer(&fei->timer);
205	}
206
207	if (stdemux->running_feed_count == 0) {
208
209		dev_dbg(fei->dev, "Starting channel=%p\n", channel);
210
211		tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
212			     (unsigned long) channel);
213
214		/* Reset the internal inputblock sram pointers */
215		writel(channel->fifo,
216			fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
217		writel(channel->fifo + FIFO_LEN - 1,
218			fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
219
220		writel(channel->fifo,
221			fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
222		writel(channel->fifo,
223			fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
224
225
226		/* reset read / write memdma ptrs for this channel */
227		writel(channel->back_buffer_busaddr, channel->irec +
228			DMA_PRDS_BUSBASE_TP(0));
229
230		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
231		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
232
233		writel(channel->back_buffer_busaddr, channel->irec +
234			DMA_PRDS_BUSWP_TP(0));
235
236		/* Issue a reset and enable InputBlock */
237		writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
238			, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
239
240		/* and enable the tp */
241		writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
242
243		dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
244			, __func__, __LINE__, stdemux);
245	}
246
247	stdemux->running_feed_count++;
248	fei->global_feed_count++;
249
250	mutex_unlock(&fei->lock);
251
252	return 0;
253}
254
255static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
256{
257
258	struct dvb_demux *demux = dvbdmxfeed->demux;
259	struct stdemux *stdemux = (struct stdemux *)demux->priv;
260	struct c8sectpfei *fei = stdemux->c8sectpfei;
261	struct channel_info *channel;
262	int idlereq;
263	u32 tmp;
264	int ret;
265	unsigned long *bitmap;
266
267	if (!atomic_read(&fei->fw_loaded)) {
268		dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
269		return -EINVAL;
270	}
271
272	mutex_lock(&fei->lock);
273
274	channel = fei->channel_data[stdemux->tsin_index];
275
276	bitmap = (unsigned long *) channel->pid_buffer_aligned;
277
278	if (dvbdmxfeed->pid == 8192) {
279		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
280		tmp |= C8SECTPFE_PID_ENABLE;
281		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
282	} else {
283		bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
284	}
285
286	/* manage cache so data is visible to HW */
287	dma_sync_single_for_device(fei->dev,
288					channel->pid_buffer_busaddr,
289					PID_TABLE_SIZE,
290					DMA_TO_DEVICE);
291
292	if (--stdemux->running_feed_count == 0) {
293
294		channel = fei->channel_data[stdemux->tsin_index];
295
296		/* TP re-configuration on page 168 of functional spec */
297
298		/* disable IB (prevents more TS data going to memdma) */
299		writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
300
301		/* disable this channels descriptor */
302		writel(0,  channel->irec + DMA_PRDS_TPENABLE);
303
304		tasklet_disable(&channel->tsklet);
305
306		/* now request memdma channel goes idle */
307		idlereq = (1 << channel->tsin_id) | IDLEREQ;
308		writel(idlereq, fei->io + DMA_IDLE_REQ);
309
310		/* wait for idle irq handler to signal completion */
311		ret = wait_for_completion_timeout(&channel->idle_completion,
312						msecs_to_jiffies(100));
313
314		if (ret == 0)
315			dev_warn(fei->dev,
316				"Timeout waiting for idle irq on tsin%d\n",
317				channel->tsin_id);
318
319		reinit_completion(&channel->idle_completion);
320
321		/* reset read / write ptrs for this channel */
322
323		writel(channel->back_buffer_busaddr,
324			channel->irec + DMA_PRDS_BUSBASE_TP(0));
325
326		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
327		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
328
329		writel(channel->back_buffer_busaddr,
330			channel->irec + DMA_PRDS_BUSWP_TP(0));
331
332		dev_dbg(fei->dev,
333			"%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
334			__func__, __LINE__, stdemux, channel->tsin_id);
335
336		/* turn off all PIDS in the bitmap */
337		memset((void *)channel->pid_buffer_aligned
338			, 0x00, PID_TABLE_SIZE);
339
340		/* manage cache so data is visible to HW */
341		dma_sync_single_for_device(fei->dev,
342					channel->pid_buffer_busaddr,
343					PID_TABLE_SIZE,
344					DMA_TO_DEVICE);
345
346		channel->active = 0;
347	}
348
349	if (--fei->global_feed_count == 0) {
350		dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
351			, __func__, __LINE__, fei->global_feed_count);
352
353		del_timer(&fei->timer);
354	}
355
356	mutex_unlock(&fei->lock);
357
358	return 0;
359}
360
361static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
362{
363	int i;
364
365	for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
366		if (!fei->channel_data[i])
367			continue;
368
369		if (fei->channel_data[i]->tsin_id == tsin_num)
370			return fei->channel_data[i];
371	}
372
373	return NULL;
374}
375
376static void c8sectpfe_getconfig(struct c8sectpfei *fei)
377{
378	struct c8sectpfe_hw *hw = &fei->hw_stats;
379
380	hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
381	hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
382	hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
383	hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
384	hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
385	hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
386	hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
387
388	dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
389	dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
390	dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
391	dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
392				, hw->num_swts);
393	dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
394	dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
395	dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
396	dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
397			, hw->num_tp);
398}
399
400static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
401{
402	struct c8sectpfei *fei = priv;
403	struct channel_info *chan;
404	int bit;
405	unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
406
407	/* page 168 of functional spec: Clear the idle request
408	   by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
409
410	/* signal idle completion */
411	for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
412
413		chan = find_channel(fei, bit);
414
415		if (chan)
416			complete(&chan->idle_completion);
417	}
418
419	writel(0, fei->io + DMA_IDLE_REQ);
420
421	return IRQ_HANDLED;
422}
423
424
425static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
426{
427	if (!fei || !tsin)
428		return;
429
430	if (tsin->back_buffer_busaddr)
431		if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
432			dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
433				FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
434
435	kfree(tsin->back_buffer_start);
436
437	if (tsin->pid_buffer_busaddr)
438		if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
439			dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
440				PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
441
442	kfree(tsin->pid_buffer_start);
443}
444
445#define MAX_NAME 20
446
447static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
448				struct channel_info *tsin)
449{
450	int ret;
451	u32 tmp;
452	char tsin_pin_name[MAX_NAME];
453
454	if (!fei || !tsin)
455		return -EINVAL;
456
457	dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
458		, __func__, __LINE__, tsin, tsin->tsin_id);
459
460	init_completion(&tsin->idle_completion);
461
462	tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
463					FEI_ALIGNMENT, GFP_KERNEL);
464
465	if (!tsin->back_buffer_start) {
466		ret = -ENOMEM;
467		goto err_unmap;
468	}
469
470	/* Ensure backbuffer is 32byte aligned */
471	tsin->back_buffer_aligned = tsin->back_buffer_start
472		+ FEI_ALIGNMENT;
473
474	tsin->back_buffer_aligned = (void *)
475		(((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
476
477	tsin->back_buffer_busaddr = dma_map_single(fei->dev,
478					(void *)tsin->back_buffer_aligned,
479					FEI_BUFFER_SIZE,
480					DMA_BIDIRECTIONAL);
481
482	if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
483		dev_err(fei->dev, "failed to map back_buffer\n");
484		ret = -EFAULT;
485		goto err_unmap;
486	}
487
488	/*
489	 * The pid buffer can be configured (in hw) for byte or bit
490	 * per pid. By powers of deduction we conclude stih407 family
491	 * is configured (at SoC design stage) for bit per pid.
492	 */
493	tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
494
495	if (!tsin->pid_buffer_start) {
496		ret = -ENOMEM;
497		goto err_unmap;
498	}
499
500	/*
501	 * PID buffer needs to be aligned to size of the pid table
502	 * which at bit per pid is 1024 bytes (8192 pids / 8).
503	 * PIDF_BASE register enforces this alignment when writing
504	 * the register.
505	 */
506
507	tsin->pid_buffer_aligned = tsin->pid_buffer_start +
508		PID_TABLE_SIZE;
509
510	tsin->pid_buffer_aligned = (void *)
511		(((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
512
513	tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
514						tsin->pid_buffer_aligned,
515						PID_TABLE_SIZE,
516						DMA_BIDIRECTIONAL);
517
518	if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
519		dev_err(fei->dev, "failed to map pid_bitmap\n");
520		ret = -EFAULT;
521		goto err_unmap;
522	}
523
524	/* manage cache so pid bitmap is visible to HW */
525	dma_sync_single_for_device(fei->dev,
526				tsin->pid_buffer_busaddr,
527				PID_TABLE_SIZE,
528				DMA_TO_DEVICE);
529
530	snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
531		(tsin->serial_not_parallel ? "serial" : "parallel"));
532
533	tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
534	if (IS_ERR(tsin->pstate)) {
535		dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
536			, __func__, tsin_pin_name);
537		ret = PTR_ERR(tsin->pstate);
538		goto err_unmap;
539	}
540
541	ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
542
543	if (ret) {
544		dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
545			, __func__);
546		goto err_unmap;
547	}
548
549	/* Enable this input block */
550	tmp = readl(fei->io + SYS_INPUT_CLKEN);
551	tmp |= BIT(tsin->tsin_id);
552	writel(tmp, fei->io + SYS_INPUT_CLKEN);
553
554	if (tsin->serial_not_parallel)
555		tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
556
557	if (tsin->invert_ts_clk)
558		tmp |= C8SECTPFE_INVERT_TSCLK;
559
560	if (tsin->async_not_sync)
561		tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
562
563	tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
564
565	writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
566
567	writel(C8SECTPFE_SYNC(0x9) |
568		C8SECTPFE_DROP(0x9) |
569		C8SECTPFE_TOKEN(0x47),
570		fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
571
572	writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
573
574	/* Place the FIFO's at the end of the irec descriptors */
575
576	tsin->fifo = (tsin->tsin_id * FIFO_LEN);
577
578	writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
579	writel(tsin->fifo + FIFO_LEN - 1,
580		fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
581
582	writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
583	writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
584
585	writel(tsin->pid_buffer_busaddr,
586		fei->io + PIDF_BASE(tsin->tsin_id));
587
588	dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
589		tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
590		&tsin->pid_buffer_busaddr);
591
592	/* Configure and enable HW PID filtering */
593
594	/*
595	 * The PID value is created by assembling the first 8 bytes of
596	 * the TS packet into a 64-bit word in big-endian format. A
597	 * slice of that 64-bit word is taken from
598	 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
599	 */
600	tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
601		| C8SECTPFE_PID_OFFSET(40));
602
603	writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
604
605	dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
606		tsin->tsin_id,
607		readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
608		readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
609		readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
610		readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
611
612	/* Get base addpress of pointer record block from DMEM */
613	tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
614			readl(fei->io + DMA_PTRREC_BASE);
615
616	/* fill out pointer record data structure */
617
618	/* advance pointer record block to our channel */
619	tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
620
621	writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
622
623	writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
624
625	writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
626
627	writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
628
629	/* read/write pointers with physical bus address */
630
631	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
632
633	tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
634	writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
635
636	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
637	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
638
639	/* initialize tasklet */
640	tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
641		(unsigned long) tsin);
642
643	return 0;
644
645err_unmap:
646	free_input_block(fei, tsin);
647	return ret;
648}
649
650static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
651{
652	struct c8sectpfei *fei = priv;
653
654	dev_err(fei->dev, "%s: error handling not yet implemented\n"
655		, __func__);
656
657	/*
658	 * TODO FIXME we should detect some error conditions here
659	 * and ideally so something about them!
660	 */
661
662	return IRQ_HANDLED;
663}
664
665static int c8sectpfe_probe(struct platform_device *pdev)
666{
667	struct device *dev = &pdev->dev;
668	struct device_node *child, *np = dev->of_node;
669	struct c8sectpfei *fei;
670	struct resource *res;
671	int ret, index = 0;
672	struct channel_info *tsin;
673
674	/* Allocate the c8sectpfei structure */
675	fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
676	if (!fei)
677		return -ENOMEM;
678
679	fei->dev = dev;
680
681	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
682	fei->io = devm_ioremap_resource(dev, res);
683	if (IS_ERR(fei->io))
684		return PTR_ERR(fei->io);
685
686	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
687					"c8sectpfe-ram");
688	fei->sram = devm_ioremap_resource(dev, res);
689	if (IS_ERR(fei->sram))
690		return PTR_ERR(fei->sram);
691
692	fei->sram_size = res->end - res->start;
693
694	fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
695	if (fei->idle_irq < 0) {
696		dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
697		return fei->idle_irq;
698	}
699
700	fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
701	if (fei->error_irq < 0) {
702		dev_err(dev, "Can't get c8sectpfe-error-irq\n");
703		return fei->error_irq;
704	}
705
706	platform_set_drvdata(pdev, fei);
707
708	fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
709	if (IS_ERR(fei->c8sectpfeclk)) {
710		dev_err(dev, "c8sectpfe clk not found\n");
711		return PTR_ERR(fei->c8sectpfeclk);
712	}
713
714	ret = clk_prepare_enable(fei->c8sectpfeclk);
715	if (ret) {
716		dev_err(dev, "Failed to enable c8sectpfe clock\n");
717		return ret;
718	}
719
720	/* to save power disable all IP's (on by default) */
721	writel(0, fei->io + SYS_INPUT_CLKEN);
722
723	/* Enable memdma clock */
724	writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
725
726	/* clear internal sram */
727	memset_io(fei->sram, 0x0, fei->sram_size);
728
729	c8sectpfe_getconfig(fei);
730
731	ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
732			0, "c8sectpfe-idle-irq", fei);
733	if (ret) {
734		dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
735		goto err_clk_disable;
736	}
737
738	ret = devm_request_irq(dev, fei->error_irq,
739				c8sectpfe_error_irq_handler, 0,
740				"c8sectpfe-error-irq", fei);
741	if (ret) {
742		dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
743		goto err_clk_disable;
744	}
745
746	fei->tsin_count = of_get_child_count(np);
747
748	if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
749		fei->tsin_count > fei->hw_stats.num_ib) {
750
751		dev_err(dev, "More tsin declared than exist on SoC!\n");
752		ret = -EINVAL;
753		goto err_clk_disable;
754	}
755
756	fei->pinctrl = devm_pinctrl_get(dev);
757
758	if (IS_ERR(fei->pinctrl)) {
759		dev_err(dev, "Error getting tsin pins\n");
760		ret = PTR_ERR(fei->pinctrl);
761		goto err_clk_disable;
762	}
763
764	for_each_child_of_node(np, child) {
765		struct device_node *i2c_bus;
766
767		fei->channel_data[index] = devm_kzalloc(dev,
768						sizeof(struct channel_info),
769						GFP_KERNEL);
770
771		if (!fei->channel_data[index]) {
772			ret = -ENOMEM;
773			goto err_clk_disable;
774		}
775
776		tsin = fei->channel_data[index];
777
778		tsin->fei = fei;
779
780		ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
781		if (ret) {
782			dev_err(&pdev->dev, "No tsin_num found\n");
783			goto err_clk_disable;
784		}
785
786		/* sanity check value */
787		if (tsin->tsin_id > fei->hw_stats.num_ib) {
788			dev_err(&pdev->dev,
789				"tsin-num %d specified greater than number\n\t"
790				"of input block hw in SoC! (%d)",
791				tsin->tsin_id, fei->hw_stats.num_ib);
792			ret = -EINVAL;
793			goto err_clk_disable;
794		}
795
796		tsin->invert_ts_clk = of_property_read_bool(child,
797							"invert-ts-clk");
798
799		tsin->serial_not_parallel = of_property_read_bool(child,
800							"serial-not-parallel");
801
802		tsin->async_not_sync = of_property_read_bool(child,
803							"async-not-sync");
804
805		ret = of_property_read_u32(child, "dvb-card",
806					&tsin->dvb_card);
807		if (ret) {
808			dev_err(&pdev->dev, "No dvb-card found\n");
809			goto err_clk_disable;
810		}
811
812		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
813		if (!i2c_bus) {
814			dev_err(&pdev->dev, "No i2c-bus found\n");
815			goto err_clk_disable;
816		}
817		tsin->i2c_adapter =
818			of_find_i2c_adapter_by_node(i2c_bus);
819		if (!tsin->i2c_adapter) {
820			dev_err(&pdev->dev, "No i2c adapter found\n");
821			of_node_put(i2c_bus);
822			goto err_clk_disable;
823		}
824		of_node_put(i2c_bus);
825
826		tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0);
827
828		ret = gpio_is_valid(tsin->rst_gpio);
829		if (!ret) {
830			dev_err(dev,
831				"reset gpio for tsin%d not valid (gpio=%d)\n",
832				tsin->tsin_id, tsin->rst_gpio);
833			goto err_clk_disable;
834		}
835
836		ret = devm_gpio_request_one(dev, tsin->rst_gpio,
837					GPIOF_OUT_INIT_LOW, "NIM reset");
838		if (ret && ret != -EBUSY) {
839			dev_err(dev, "Can't request tsin%d reset gpio\n"
840				, fei->channel_data[index]->tsin_id);
841			goto err_clk_disable;
842		}
843
844		if (!ret) {
845			/* toggle reset lines */
846			gpio_direction_output(tsin->rst_gpio, 0);
847			usleep_range(3500, 5000);
848			gpio_direction_output(tsin->rst_gpio, 1);
849			usleep_range(3000, 5000);
850		}
851
852		tsin->demux_mapping = index;
853
854		dev_dbg(fei->dev,
855			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
856			"serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
857			fei->channel_data[index], index,
858			tsin->tsin_id, tsin->invert_ts_clk,
859			tsin->serial_not_parallel, tsin->async_not_sync,
860			tsin->dvb_card);
861
862		index++;
863	}
864
865	/* Setup timer interrupt */
866	init_timer(&fei->timer);
867	fei->timer.function = c8sectpfe_timer_interrupt;
868	fei->timer.data = (unsigned long)fei;
869
870	mutex_init(&fei->lock);
871
872	/* Get the configuration information about the tuners */
873	ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
874					(void *)fei,
875					c8sectpfe_start_feed,
876					c8sectpfe_stop_feed);
877	if (ret) {
878		dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
879			ret);
880		goto err_clk_disable;
881	}
882
883	/* ensure all other init has been done before requesting firmware */
884	ret = load_c8sectpfe_fw_step1(fei);
885	if (ret) {
886		dev_err(dev, "Couldn't load slim core firmware\n");
887		goto err_clk_disable;
888	}
889
890	c8sectpfe_debugfs_init(fei);
891
892	return 0;
893
894err_clk_disable:
895	/* TODO uncomment when upstream has taken a reference on this clk */
896	/*clk_disable_unprepare(fei->c8sectpfeclk);*/
897	return ret;
898}
899
900static int c8sectpfe_remove(struct platform_device *pdev)
901{
902	struct c8sectpfei *fei = platform_get_drvdata(pdev);
903	struct channel_info *channel;
904	int i;
905
906	wait_for_completion(&fei->fw_ack);
907
908	c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
909
910	/*
911	 * Now loop through and un-configure each of the InputBlock resources
912	 */
913	for (i = 0; i < fei->tsin_count; i++) {
914		channel = fei->channel_data[i];
915		free_input_block(fei, channel);
916	}
917
918	c8sectpfe_debugfs_exit(fei);
919
920	dev_info(fei->dev, "Stopping memdma SLIM core\n");
921	if (readl(fei->io + DMA_CPU_RUN))
922		writel(0x0,  fei->io + DMA_CPU_RUN);
923
924	/* unclock all internal IP's */
925	if (readl(fei->io + SYS_INPUT_CLKEN))
926		writel(0, fei->io + SYS_INPUT_CLKEN);
927
928	if (readl(fei->io + SYS_OTHER_CLKEN))
929		writel(0, fei->io + SYS_OTHER_CLKEN);
930
931	/* TODO uncomment when upstream has taken a reference on this clk */
932	/*
933	if (fei->c8sectpfeclk)
934		clk_disable_unprepare(fei->c8sectpfeclk);
935	*/
936
937	return 0;
938}
939
940
941static int configure_channels(struct c8sectpfei *fei)
942{
943	int index = 0, ret;
944	struct channel_info *tsin;
945	struct device_node *child, *np = fei->dev->of_node;
946
947	/* iterate round each tsin and configure memdma descriptor and IB hw */
948	for_each_child_of_node(np, child) {
949
950		tsin = fei->channel_data[index];
951
952		ret = configure_memdma_and_inputblock(fei,
953						fei->channel_data[index]);
954
955		if (ret) {
956			dev_err(fei->dev,
957				"configure_memdma_and_inputblock failed\n");
958			goto err_unmap;
959		}
960		index++;
961	}
962
963	return 0;
964
965err_unmap:
966	for (index = 0; index < fei->tsin_count; index++) {
967		tsin = fei->channel_data[index];
968		free_input_block(fei, tsin);
969	}
970	return ret;
971}
972
973static int
974c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
975{
976	struct elf32_hdr *ehdr;
977	char class;
978
979	if (!fw) {
980		dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
981		return -EINVAL;
982	}
983
984	if (fw->size < sizeof(struct elf32_hdr)) {
985		dev_err(fei->dev, "Image is too small\n");
986		return -EINVAL;
987	}
988
989	ehdr = (struct elf32_hdr *)fw->data;
990
991	/* We only support ELF32 at this point */
992	class = ehdr->e_ident[EI_CLASS];
993	if (class != ELFCLASS32) {
994		dev_err(fei->dev, "Unsupported class: %d\n", class);
995		return -EINVAL;
996	}
997
998	if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
999		dev_err(fei->dev, "Unsupported firmware endianness\n");
1000		return -EINVAL;
1001	}
1002
1003	if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
1004		dev_err(fei->dev, "Image is too small\n");
1005		return -EINVAL;
1006	}
1007
1008	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
1009		dev_err(fei->dev, "Image is corrupted (bad magic)\n");
1010		return -EINVAL;
1011	}
1012
1013	/* Check ELF magic */
1014	ehdr = (Elf32_Ehdr *)fw->data;
1015	if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
1016	    ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
1017	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
1018	    ehdr->e_ident[EI_MAG3] != ELFMAG3) {
1019		dev_err(fei->dev, "Invalid ELF magic\n");
1020		return -EINVAL;
1021	}
1022
1023	if (ehdr->e_type != ET_EXEC) {
1024		dev_err(fei->dev, "Unsupported ELF header type\n");
1025		return -EINVAL;
1026	}
1027
1028	if (ehdr->e_phoff > fw->size) {
1029		dev_err(fei->dev, "Firmware size is too small\n");
1030		return -EINVAL;
1031	}
1032
1033	return 0;
1034}
1035
1036
1037static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1038			const struct firmware *fw, u8 __iomem *dest,
1039			int seg_num)
1040{
1041	const u8 *imem_src = fw->data + phdr->p_offset;
1042	int i;
1043
1044	/*
1045	 * For IMEM segments, the segment contains 24-bit
1046	 * instructions which must be padded to 32-bit
1047	 * instructions before being written. The written
1048	 * segment is padded with NOP instructions.
1049	 */
1050
1051	dev_dbg(fei->dev,
1052		"Loading IMEM segment %d 0x%08x\n\t"
1053		" (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
1054		phdr->p_paddr, phdr->p_filesz,
1055		dest, phdr->p_memsz + phdr->p_memsz / 3);
1056
1057	for (i = 0; i < phdr->p_filesz; i++) {
1058
1059		writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1060
1061		/* Every 3 bytes, add an additional
1062		 * padding zero in destination */
1063		if (i % 3 == 2) {
1064			dest++;
1065			writeb(0x00, (void __iomem *)dest);
1066		}
1067
1068		dest++;
1069		imem_src++;
1070	}
1071}
1072
1073static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1074			const struct firmware *fw, u8 __iomem *dst, int seg_num)
1075{
1076	/*
1077	 * For DMEM segments copy the segment data from the ELF
1078	 * file and pad segment with zeroes
1079	 */
1080
1081	dev_dbg(fei->dev,
1082		"Loading DMEM segment %d 0x%08x\n\t"
1083		"(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1084		seg_num, phdr->p_paddr, phdr->p_filesz,
1085		dst, phdr->p_memsz);
1086
1087	memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1088		phdr->p_filesz);
1089
1090	memset((void __force *)dst + phdr->p_filesz, 0,
1091		phdr->p_memsz - phdr->p_filesz);
1092}
1093
1094static int load_slim_core_fw(const struct firmware *fw, void *context)
1095{
1096	struct c8sectpfei *fei = context;
1097	Elf32_Ehdr *ehdr;
1098	Elf32_Phdr *phdr;
1099	u8 __iomem *dst;
1100	int err = 0, i;
1101
1102	if (!fw || !context)
1103		return -EINVAL;
1104
1105	ehdr = (Elf32_Ehdr *)fw->data;
1106	phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1107
1108	/* go through the available ELF segments */
1109	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1110
1111		/* Only consider LOAD segments */
1112		if (phdr->p_type != PT_LOAD)
1113			continue;
1114
1115		/*
1116		 * Check segment is contained within the fw->data buffer
1117		 */
1118		if (phdr->p_offset + phdr->p_filesz > fw->size) {
1119			dev_err(fei->dev,
1120				"Segment %d is outside of firmware file\n", i);
1121			err = -EINVAL;
1122			break;
1123		}
1124
1125		/*
1126		 * MEMDMA IMEM has executable flag set, otherwise load
1127		 * this segment into DMEM.
1128		 *
1129		 */
1130
1131		if (phdr->p_flags & PF_X) {
1132			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1133			/*
1134			 * The Slim ELF file uses 32-bit word addressing for
1135			 * load offsets.
1136			 */
1137			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1138			load_imem_segment(fei, phdr, fw, dst, i);
1139		} else {
1140			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1141			/*
1142			 * The Slim ELF file uses 32-bit word addressing for
1143			 * load offsets.
1144			 */
1145			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1146			load_dmem_segment(fei, phdr, fw, dst, i);
1147		}
1148	}
1149
1150	release_firmware(fw);
1151	return err;
1152}
1153
1154static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
1155{
1156	struct c8sectpfei *fei = context;
1157	int err;
1158
1159	err = c8sectpfe_elf_sanity_check(fei, fw);
1160	if (err) {
1161		dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1162			, err);
1163		goto err;
1164	}
1165
1166	err = load_slim_core_fw(fw, context);
1167	if (err) {
1168		dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1169		goto err;
1170	}
1171
1172	/* now the firmware is loaded configure the input blocks */
1173	err = configure_channels(fei);
1174	if (err) {
1175		dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1176		goto err;
1177	}
1178
1179	/*
1180	 * STBus target port can access IMEM and DMEM ports
1181	 * without waiting for CPU
1182	 */
1183	writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1184
1185	dev_info(fei->dev, "Boot the memdma SLIM core\n");
1186	writel(0x1,  fei->io + DMA_CPU_RUN);
1187
1188	atomic_set(&fei->fw_loaded, 1);
1189err:
1190	complete_all(&fei->fw_ack);
1191}
1192
1193static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
1194{
1195	int err;
1196
1197	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1198
1199	init_completion(&fei->fw_ack);
1200	atomic_set(&fei->fw_loaded, 0);
1201
1202	err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1203				FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
1204				load_c8sectpfe_fw_cb);
1205
1206	if (err) {
1207		dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
1208		complete_all(&fei->fw_ack);
1209		return err;
1210	}
1211
1212	return 0;
1213}
1214
1215static const struct of_device_id c8sectpfe_match[] = {
1216	{ .compatible = "st,stih407-c8sectpfe" },
1217	{ /* sentinel */ },
1218};
1219MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1220
1221static struct platform_driver c8sectpfe_driver = {
1222	.driver = {
1223		.name = "c8sectpfe",
1224		.of_match_table = of_match_ptr(c8sectpfe_match),
1225	},
1226	.probe	= c8sectpfe_probe,
1227	.remove	= c8sectpfe_remove,
1228};
1229
1230module_platform_driver(c8sectpfe_driver);
1231
1232MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1233MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1234MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1235MODULE_LICENSE("GPL");
1236