1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/export.h>
4#include <linux/ide.h>
5#include <linux/scatterlist.h>
6#include <linux/dma-mapping.h>
7#include <linux/io.h>
8
9/**
10 *	config_drive_for_dma	-	attempt to activate IDE DMA
11 *	@drive: the drive to place in DMA mode
12 *
13 *	If the drive supports at least mode 2 DMA or UDMA of any kind
14 *	then attempt to place it into DMA mode. Drives that are known to
15 *	support DMA but predate the DMA properties or that are known
16 *	to have DMA handling bugs are also set up appropriately based
17 *	on the good/bad drive lists.
18 */
19
20int config_drive_for_dma(ide_drive_t *drive)
21{
22	ide_hwif_t *hwif = drive->hwif;
23	u16 *id = drive->id;
24
25	if (drive->media != ide_disk) {
26		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
27			return 0;
28	}
29
30	/*
31	 * Enable DMA on any drive that has
32	 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
33	 */
34	if ((id[ATA_ID_FIELD_VALID] & 4) &&
35	    ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f))
36		return 1;
37
38	/*
39	 * Enable DMA on any drive that has mode2 DMA
40	 * (multi or single) enabled
41	 */
42	if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 ||
43	    (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404)
44		return 1;
45
46	/* Consult the list of known "good" drives */
47	if (ide_dma_good_drive(drive))
48		return 1;
49
50	return 0;
51}
52
53u8 ide_dma_sff_read_status(ide_hwif_t *hwif)
54{
55	unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
56
57	if (hwif->host_flags & IDE_HFLAG_MMIO)
58		return readb((void __iomem *)addr);
59	else
60		return inb(addr);
61}
62EXPORT_SYMBOL_GPL(ide_dma_sff_read_status);
63
64static void ide_dma_sff_write_status(ide_hwif_t *hwif, u8 val)
65{
66	unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
67
68	if (hwif->host_flags & IDE_HFLAG_MMIO)
69		writeb(val, (void __iomem *)addr);
70	else
71		outb(val, addr);
72}
73
74/**
75 *	ide_dma_host_set	-	Enable/disable DMA on a host
76 *	@drive: drive to control
77 *
78 *	Enable/disable DMA on an IDE controller following generic
79 *	bus-mastering IDE controller behaviour.
80 */
81
82void ide_dma_host_set(ide_drive_t *drive, int on)
83{
84	ide_hwif_t *hwif = drive->hwif;
85	u8 unit = drive->dn & 1;
86	u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
87
88	if (on)
89		dma_stat |= (1 << (5 + unit));
90	else
91		dma_stat &= ~(1 << (5 + unit));
92
93	ide_dma_sff_write_status(hwif, dma_stat);
94}
95EXPORT_SYMBOL_GPL(ide_dma_host_set);
96
97/**
98 *	ide_build_dmatable	-	build IDE DMA table
99 *
100 *	ide_build_dmatable() prepares a dma request. We map the command
101 *	to get the pci bus addresses of the buffers and then build up
102 *	the PRD table that the IDE layer wants to be fed.
103 *
104 *	Most chipsets correctly interpret a length of 0x0000 as 64KB,
105 *	but at least one (e.g. CS5530) misinterprets it as zero (!).
106 *	So we break the 64KB entry into two 32KB entries instead.
107 *
108 *	Returns the number of built PRD entries if all went okay,
109 *	returns 0 otherwise.
110 *
111 *	May also be invoked from trm290.c
112 */
113
114int ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
115{
116	ide_hwif_t *hwif = drive->hwif;
117	__le32 *table = (__le32 *)hwif->dmatable_cpu;
118	unsigned int count = 0;
119	int i;
120	struct scatterlist *sg;
121	u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
122
123	for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
124		u32 cur_addr, cur_len, xcount, bcount;
125
126		cur_addr = sg_dma_address(sg);
127		cur_len = sg_dma_len(sg);
128
129		/*
130		 * Fill in the dma table, without crossing any 64kB boundaries.
131		 * Most hardware requires 16-bit alignment of all blocks,
132		 * but the trm290 requires 32-bit alignment.
133		 */
134
135		while (cur_len) {
136			if (count++ >= PRD_ENTRIES)
137				goto use_pio_instead;
138
139			bcount = 0x10000 - (cur_addr & 0xffff);
140			if (bcount > cur_len)
141				bcount = cur_len;
142			*table++ = cpu_to_le32(cur_addr);
143			xcount = bcount & 0xffff;
144			if (is_trm290)
145				xcount = ((xcount >> 2) - 1) << 16;
146			else if (xcount == 0x0000) {
147				if (count++ >= PRD_ENTRIES)
148					goto use_pio_instead;
149				*table++ = cpu_to_le32(0x8000);
150				*table++ = cpu_to_le32(cur_addr + 0x8000);
151				xcount = 0x8000;
152			}
153			*table++ = cpu_to_le32(xcount);
154			cur_addr += bcount;
155			cur_len -= bcount;
156		}
157	}
158
159	if (count) {
160		if (!is_trm290)
161			*--table |= cpu_to_le32(0x80000000);
162		return count;
163	}
164
165use_pio_instead:
166	printk(KERN_ERR "%s: %s\n", drive->name,
167		count ? "DMA table too small" : "empty DMA table?");
168
169	return 0; /* revert to PIO for this request */
170}
171EXPORT_SYMBOL_GPL(ide_build_dmatable);
172
173/**
174 *	ide_dma_setup	-	begin a DMA phase
175 *	@drive: target device
176 *	@cmd: command
177 *
178 *	Build an IDE DMA PRD (IDE speak for scatter gather table)
179 *	and then set up the DMA transfer registers for a device
180 *	that follows generic IDE PCI DMA behaviour. Controllers can
181 *	override this function if they need to
182 *
183 *	Returns 0 on success. If a PIO fallback is required then 1
184 *	is returned.
185 */
186
187int ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
188{
189	ide_hwif_t *hwif = drive->hwif;
190	u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
191	u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
192	u8 dma_stat;
193
194	/* fall back to pio! */
195	if (ide_build_dmatable(drive, cmd) == 0) {
196		ide_map_sg(drive, cmd);
197		return 1;
198	}
199
200	/* PRD table */
201	if (mmio)
202		writel(hwif->dmatable_dma,
203		       (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
204	else
205		outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
206
207	/* specify r/w */
208	if (mmio)
209		writeb(rw, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
210	else
211		outb(rw, hwif->dma_base + ATA_DMA_CMD);
212
213	/* read DMA status for INTR & ERROR flags */
214	dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
215
216	/* clear INTR & ERROR flags */
217	ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
218
219	return 0;
220}
221EXPORT_SYMBOL_GPL(ide_dma_setup);
222
223/**
224 *	ide_dma_sff_timer_expiry	-	handle a DMA timeout
225 *	@drive: Drive that timed out
226 *
227 *	An IDE DMA transfer timed out. In the event of an error we ask
228 *	the driver to resolve the problem, if a DMA transfer is still
229 *	in progress we continue to wait (arguably we need to add a
230 *	secondary 'I don't care what the drive thinks' timeout here)
231 *	Finally if we have an interrupt we let it complete the I/O.
232 *	But only one time - we clear expiry and if it's still not
233 *	completed after WAIT_CMD, we error and retry in PIO.
234 *	This can occur if an interrupt is lost or due to hang or bugs.
235 */
236
237int ide_dma_sff_timer_expiry(ide_drive_t *drive)
238{
239	ide_hwif_t *hwif = drive->hwif;
240	u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
241
242	printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n",
243		drive->name, __func__, dma_stat);
244
245	if ((dma_stat & 0x18) == 0x18)	/* BUSY Stupid Early Timer !! */
246		return WAIT_CMD;
247
248	hwif->expiry = NULL;	/* one free ride for now */
249
250	if (dma_stat & ATA_DMA_ERR)	/* ERROR */
251		return -1;
252
253	if (dma_stat & ATA_DMA_ACTIVE)	/* DMAing */
254		return WAIT_CMD;
255
256	if (dma_stat & ATA_DMA_INTR)	/* Got an Interrupt */
257		return WAIT_CMD;
258
259	return 0;	/* Status is unknown -- reset the bus */
260}
261EXPORT_SYMBOL_GPL(ide_dma_sff_timer_expiry);
262
263void ide_dma_start(ide_drive_t *drive)
264{
265	ide_hwif_t *hwif = drive->hwif;
266	u8 dma_cmd;
267
268	/* Note that this is done *after* the cmd has
269	 * been issued to the drive, as per the BM-IDE spec.
270	 * The Promise Ultra33 doesn't work correctly when
271	 * we do this part before issuing the drive cmd.
272	 */
273	if (hwif->host_flags & IDE_HFLAG_MMIO) {
274		dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
275		writeb(dma_cmd | ATA_DMA_START,
276		       (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
277	} else {
278		dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
279		outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
280	}
281}
282EXPORT_SYMBOL_GPL(ide_dma_start);
283
284/* returns 1 on error, 0 otherwise */
285int ide_dma_end(ide_drive_t *drive)
286{
287	ide_hwif_t *hwif = drive->hwif;
288	u8 dma_stat = 0, dma_cmd = 0;
289
290	/* stop DMA */
291	if (hwif->host_flags & IDE_HFLAG_MMIO) {
292		dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
293		writeb(dma_cmd & ~ATA_DMA_START,
294		       (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
295	} else {
296		dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
297		outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
298	}
299
300	/* get DMA status */
301	dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
302
303	/* clear INTR & ERROR bits */
304	ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
305
306#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
307
308	/* verify good DMA status */
309	if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR)
310		return 0x10 | dma_stat;
311	return 0;
312}
313EXPORT_SYMBOL_GPL(ide_dma_end);
314
315/* returns 1 if dma irq issued, 0 otherwise */
316int ide_dma_test_irq(ide_drive_t *drive)
317{
318	ide_hwif_t *hwif = drive->hwif;
319	u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
320
321	return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
322}
323EXPORT_SYMBOL_GPL(ide_dma_test_irq);
324
325const struct ide_dma_ops sff_dma_ops = {
326	.dma_host_set		= ide_dma_host_set,
327	.dma_setup		= ide_dma_setup,
328	.dma_start		= ide_dma_start,
329	.dma_end		= ide_dma_end,
330	.dma_test_irq		= ide_dma_test_irq,
331	.dma_lost_irq		= ide_dma_lost_irq,
332	.dma_timer_expiry	= ide_dma_sff_timer_expiry,
333	.dma_sff_read_status	= ide_dma_sff_read_status,
334};
335EXPORT_SYMBOL_GPL(sff_dma_ops);
336