1/*
2 *  sata_via.c - VIA Serial ATA controllers
3 *
4 *  Maintained by:  Tejun Heo <tj@kernel.org>
5 * 		   Please ALWAYS copy linux-ide@vger.kernel.org
6 *		   on emails.
7 *
8 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9 *  Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 *  This program is free software; you can redistribute it and/or modify
13 *  it under the terms of the GNU General Public License as published by
14 *  the Free Software Foundation; either version 2, or (at your option)
15 *  any later version.
16 *
17 *  This program is distributed in the hope that it will be useful,
18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *  GNU General Public License for more details.
21 *
22 *  You should have received a copy of the GNU General Public License
23 *  along with this program; see the file COPYING.  If not, write to
24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 *  libata documentation is available via 'make {ps|pdf}docs',
28 *  as Documentation/DocBook/libata.*
29 *
30 *  Hardware documentation available under NDA.
31 *
32 *
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/device.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_host.h>
45#include <linux/libata.h>
46
47#define DRV_NAME	"sata_via"
48#define DRV_VERSION	"2.6"
49
50/*
51 * vt8251 is different from other sata controllers of VIA.  It has two
52 * channels, each channel has both Master and Slave slot.
53 */
54enum board_ids_enum {
55	vt6420,
56	vt6421,
57	vt8251,
58};
59
60enum {
61	SATA_CHAN_ENAB		= 0x40, /* SATA channel enable */
62	SATA_INT_GATE		= 0x41, /* SATA interrupt gating */
63	SATA_NATIVE_MODE	= 0x42, /* Native mode enable */
64	PATA_UDMA_TIMING	= 0xB3, /* PATA timing for DMA/ cable detect */
65	PATA_PIO_TIMING		= 0xAB, /* PATA timing register */
66
67	PORT0			= (1 << 1),
68	PORT1			= (1 << 0),
69	ALL_PORTS		= PORT0 | PORT1,
70
71	NATIVE_MODE_ALL		= (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
72
73	SATA_EXT_PHY		= (1 << 6), /* 0==use PATA, 1==ext phy */
74};
75
76static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
77static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
78static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
79static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
80static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
81static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
82static void svia_noop_freeze(struct ata_port *ap);
83static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
84static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
85static int vt6421_pata_cable_detect(struct ata_port *ap);
86static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
87static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
88
89static const struct pci_device_id svia_pci_tbl[] = {
90	{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
91	{ PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
92	{ PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
93	{ PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
94	{ PCI_VDEVICE(VIA, 0x5372), vt6420 },
95	{ PCI_VDEVICE(VIA, 0x7372), vt6420 },
96	{ PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
97	{ PCI_VDEVICE(VIA, 0x9000), vt8251 },
98
99	{ }	/* terminate list */
100};
101
102static struct pci_driver svia_pci_driver = {
103	.name			= DRV_NAME,
104	.id_table		= svia_pci_tbl,
105	.probe			= svia_init_one,
106#ifdef CONFIG_PM_SLEEP
107	.suspend		= ata_pci_device_suspend,
108	.resume			= ata_pci_device_resume,
109#endif
110	.remove			= ata_pci_remove_one,
111};
112
113static struct scsi_host_template svia_sht = {
114	ATA_BMDMA_SHT(DRV_NAME),
115};
116
117static struct ata_port_operations svia_base_ops = {
118	.inherits		= &ata_bmdma_port_ops,
119	.sff_tf_load		= svia_tf_load,
120};
121
122static struct ata_port_operations vt6420_sata_ops = {
123	.inherits		= &svia_base_ops,
124	.freeze			= svia_noop_freeze,
125	.prereset		= vt6420_prereset,
126	.bmdma_start		= vt6420_bmdma_start,
127};
128
129static struct ata_port_operations vt6421_pata_ops = {
130	.inherits		= &svia_base_ops,
131	.cable_detect		= vt6421_pata_cable_detect,
132	.set_piomode		= vt6421_set_pio_mode,
133	.set_dmamode		= vt6421_set_dma_mode,
134};
135
136static struct ata_port_operations vt6421_sata_ops = {
137	.inherits		= &svia_base_ops,
138	.scr_read		= svia_scr_read,
139	.scr_write		= svia_scr_write,
140};
141
142static struct ata_port_operations vt8251_ops = {
143	.inherits		= &svia_base_ops,
144	.hardreset		= sata_std_hardreset,
145	.scr_read		= vt8251_scr_read,
146	.scr_write		= vt8251_scr_write,
147};
148
149static const struct ata_port_info vt6420_port_info = {
150	.flags		= ATA_FLAG_SATA,
151	.pio_mask	= ATA_PIO4,
152	.mwdma_mask	= ATA_MWDMA2,
153	.udma_mask	= ATA_UDMA6,
154	.port_ops	= &vt6420_sata_ops,
155};
156
157static struct ata_port_info vt6421_sport_info = {
158	.flags		= ATA_FLAG_SATA,
159	.pio_mask	= ATA_PIO4,
160	.mwdma_mask	= ATA_MWDMA2,
161	.udma_mask	= ATA_UDMA6,
162	.port_ops	= &vt6421_sata_ops,
163};
164
165static struct ata_port_info vt6421_pport_info = {
166	.flags		= ATA_FLAG_SLAVE_POSS,
167	.pio_mask	= ATA_PIO4,
168	/* No MWDMA */
169	.udma_mask	= ATA_UDMA6,
170	.port_ops	= &vt6421_pata_ops,
171};
172
173static struct ata_port_info vt8251_port_info = {
174	.flags		= ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
175	.pio_mask	= ATA_PIO4,
176	.mwdma_mask	= ATA_MWDMA2,
177	.udma_mask	= ATA_UDMA6,
178	.port_ops	= &vt8251_ops,
179};
180
181MODULE_AUTHOR("Jeff Garzik");
182MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
183MODULE_LICENSE("GPL");
184MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
185MODULE_VERSION(DRV_VERSION);
186
187static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
188{
189	if (sc_reg > SCR_CONTROL)
190		return -EINVAL;
191	*val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
192	return 0;
193}
194
195static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
196{
197	if (sc_reg > SCR_CONTROL)
198		return -EINVAL;
199	iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
200	return 0;
201}
202
203static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
204{
205	static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
206	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
207	int slot = 2 * link->ap->port_no + link->pmp;
208	u32 v = 0;
209	u8 raw;
210
211	switch (scr) {
212	case SCR_STATUS:
213		pci_read_config_byte(pdev, 0xA0 + slot, &raw);
214
215		/* read the DET field, bit0 and 1 of the config byte */
216		v |= raw & 0x03;
217
218		/* read the SPD field, bit4 of the configure byte */
219		if (raw & (1 << 4))
220			v |= 0x02 << 4;
221		else
222			v |= 0x01 << 4;
223
224		/* read the IPM field, bit2 and 3 of the config byte */
225		v |= ipm_tbl[(raw >> 2) & 0x3];
226		break;
227
228	case SCR_ERROR:
229		/* devices other than 5287 uses 0xA8 as base */
230		WARN_ON(pdev->device != 0x5287);
231		pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
232		break;
233
234	case SCR_CONTROL:
235		pci_read_config_byte(pdev, 0xA4 + slot, &raw);
236
237		/* read the DET field, bit0 and bit1 */
238		v |= ((raw & 0x02) << 1) | (raw & 0x01);
239
240		/* read the IPM field, bit2 and bit3 */
241		v |= ((raw >> 2) & 0x03) << 8;
242		break;
243
244	default:
245		return -EINVAL;
246	}
247
248	*val = v;
249	return 0;
250}
251
252static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
253{
254	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
255	int slot = 2 * link->ap->port_no + link->pmp;
256	u32 v = 0;
257
258	switch (scr) {
259	case SCR_ERROR:
260		/* devices other than 5287 uses 0xA8 as base */
261		WARN_ON(pdev->device != 0x5287);
262		pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
263		return 0;
264
265	case SCR_CONTROL:
266		/* set the DET field */
267		v |= ((val & 0x4) >> 1) | (val & 0x1);
268
269		/* set the IPM field */
270		v |= ((val >> 8) & 0x3) << 2;
271
272		pci_write_config_byte(pdev, 0xA4 + slot, v);
273		return 0;
274
275	default:
276		return -EINVAL;
277	}
278}
279
280/**
281 *	svia_tf_load - send taskfile registers to host controller
282 *	@ap: Port to which output is sent
283 *	@tf: ATA taskfile register set
284 *
285 *	Outputs ATA taskfile to standard ATA host controller.
286 *
287 *	This is to fix the internal bug of via chipsets, which will
288 *	reset the device register after changing the IEN bit on ctl
289 *	register.
290 */
291static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
292{
293	struct ata_taskfile ttf;
294
295	if (tf->ctl != ap->last_ctl)  {
296		ttf = *tf;
297		ttf.flags |= ATA_TFLAG_DEVICE;
298		tf = &ttf;
299	}
300	ata_sff_tf_load(ap, tf);
301}
302
303static void svia_noop_freeze(struct ata_port *ap)
304{
305	/* Some VIA controllers choke if ATA_NIEN is manipulated in
306	 * certain way.  Leave it alone and just clear pending IRQ.
307	 */
308	ap->ops->sff_check_status(ap);
309	ata_bmdma_irq_clear(ap);
310}
311
312/**
313 *	vt6420_prereset - prereset for vt6420
314 *	@link: target ATA link
315 *	@deadline: deadline jiffies for the operation
316 *
317 *	SCR registers on vt6420 are pieces of shit and may hang the
318 *	whole machine completely if accessed with the wrong timing.
319 *	To avoid such catastrophe, vt6420 doesn't provide generic SCR
320 *	access operations, but uses SStatus and SControl only during
321 *	boot probing in controlled way.
322 *
323 *	As the old (pre EH update) probing code is proven to work, we
324 *	strictly follow the access pattern.
325 *
326 *	LOCKING:
327 *	Kernel thread context (may sleep)
328 *
329 *	RETURNS:
330 *	0 on success, -errno otherwise.
331 */
332static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
333{
334	struct ata_port *ap = link->ap;
335	struct ata_eh_context *ehc = &ap->link.eh_context;
336	unsigned long timeout = jiffies + (HZ * 5);
337	u32 sstatus, scontrol;
338	int online;
339
340	/* don't do any SCR stuff if we're not loading */
341	if (!(ap->pflags & ATA_PFLAG_LOADING))
342		goto skip_scr;
343
344	/* Resume phy.  This is the old SATA resume sequence */
345	svia_scr_write(link, SCR_CONTROL, 0x300);
346	svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
347
348	/* wait for phy to become ready, if necessary */
349	do {
350		ata_msleep(link->ap, 200);
351		svia_scr_read(link, SCR_STATUS, &sstatus);
352		if ((sstatus & 0xf) != 1)
353			break;
354	} while (time_before(jiffies, timeout));
355
356	/* open code sata_print_link_status() */
357	svia_scr_read(link, SCR_STATUS, &sstatus);
358	svia_scr_read(link, SCR_CONTROL, &scontrol);
359
360	online = (sstatus & 0xf) == 0x3;
361
362	ata_port_info(ap,
363		      "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
364		      online ? "up" : "down", sstatus, scontrol);
365
366	/* SStatus is read one more time */
367	svia_scr_read(link, SCR_STATUS, &sstatus);
368
369	if (!online) {
370		/* tell EH to bail */
371		ehc->i.action &= ~ATA_EH_RESET;
372		return 0;
373	}
374
375 skip_scr:
376	/* wait for !BSY */
377	ata_sff_wait_ready(link, deadline);
378
379	return 0;
380}
381
382static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
383{
384	struct ata_port *ap = qc->ap;
385	if ((qc->tf.command == ATA_CMD_PACKET) &&
386	    (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
387		/* Prevents corruption on some ATAPI burners */
388		ata_sff_pause(ap);
389	}
390	ata_bmdma_start(qc);
391}
392
393static int vt6421_pata_cable_detect(struct ata_port *ap)
394{
395	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
396	u8 tmp;
397
398	pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
399	if (tmp & 0x10)
400		return ATA_CBL_PATA40;
401	return ATA_CBL_PATA80;
402}
403
404static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
405{
406	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
407	static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
408	pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
409			      pio_bits[adev->pio_mode - XFER_PIO_0]);
410}
411
412static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
413{
414	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
415	static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
416	pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
417			      udma_bits[adev->dma_mode - XFER_UDMA_0]);
418}
419
420static const unsigned int svia_bar_sizes[] = {
421	8, 4, 8, 4, 16, 256
422};
423
424static const unsigned int vt6421_bar_sizes[] = {
425	16, 16, 16, 16, 32, 128
426};
427
428static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
429{
430	return addr + (port * 128);
431}
432
433static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
434{
435	return addr + (port * 64);
436}
437
438static void vt6421_init_addrs(struct ata_port *ap)
439{
440	void __iomem * const * iomap = ap->host->iomap;
441	void __iomem *reg_addr = iomap[ap->port_no];
442	void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
443	struct ata_ioports *ioaddr = &ap->ioaddr;
444
445	ioaddr->cmd_addr = reg_addr;
446	ioaddr->altstatus_addr =
447	ioaddr->ctl_addr = (void __iomem *)
448		((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
449	ioaddr->bmdma_addr = bmdma_addr;
450	ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
451
452	ata_sff_std_ports(ioaddr);
453
454	ata_port_pbar_desc(ap, ap->port_no, -1, "port");
455	ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
456}
457
458static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
459{
460	const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
461	struct ata_host *host;
462	int rc;
463
464	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
465	if (rc)
466		return rc;
467	*r_host = host;
468
469	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
470	if (rc) {
471		dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
472		return rc;
473	}
474
475	host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
476	host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
477
478	return 0;
479}
480
481static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
482{
483	const struct ata_port_info *ppi[] =
484		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
485	struct ata_host *host;
486	int i, rc;
487
488	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
489	if (!host) {
490		dev_err(&pdev->dev, "failed to allocate host\n");
491		return -ENOMEM;
492	}
493
494	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
495	if (rc) {
496		dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
497			rc);
498		return rc;
499	}
500	host->iomap = pcim_iomap_table(pdev);
501
502	for (i = 0; i < host->n_ports; i++)
503		vt6421_init_addrs(host->ports[i]);
504
505	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
506	if (rc)
507		return rc;
508	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
509	if (rc)
510		return rc;
511
512	return 0;
513}
514
515static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
516{
517	const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
518	struct ata_host *host;
519	int i, rc;
520
521	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
522	if (rc)
523		return rc;
524	*r_host = host;
525
526	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
527	if (rc) {
528		dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
529		return rc;
530	}
531
532	/* 8251 hosts four sata ports as M/S of the two channels */
533	for (i = 0; i < host->n_ports; i++)
534		ata_slave_link_init(host->ports[i]);
535
536	return 0;
537}
538
539static void svia_configure(struct pci_dev *pdev, int board_id)
540{
541	u8 tmp8;
542
543	pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
544	dev_info(&pdev->dev, "routed to hard irq line %d\n",
545		 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
546
547	/* make sure SATA channels are enabled */
548	pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
549	if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
550		dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
551			(int)tmp8);
552		tmp8 |= ALL_PORTS;
553		pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
554	}
555
556	/* make sure interrupts for each channel sent to us */
557	pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
558	if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
559		dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
560			(int) tmp8);
561		tmp8 |= ALL_PORTS;
562		pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
563	}
564
565	/* make sure native mode is enabled */
566	pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
567	if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
568		dev_dbg(&pdev->dev,
569			"enabling SATA channel native mode (0x%x)\n",
570			(int) tmp8);
571		tmp8 |= NATIVE_MODE_ALL;
572		pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
573	}
574
575	/*
576	 * vt6420/1 has problems talking to some drives.  The following
577	 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
578	 *
579	 * When host issues HOLD, device may send up to 20DW of data
580	 * before acknowledging it with HOLDA and the host should be
581	 * able to buffer them in FIFO.  Unfortunately, some WD drives
582	 * send up to 40DW before acknowledging HOLD and, in the
583	 * default configuration, this ends up overflowing vt6421's
584	 * FIFO, making the controller abort the transaction with
585	 * R_ERR.
586	 *
587	 * Rx52[2] is the internal 128DW FIFO Flow control watermark
588	 * adjusting mechanism enable bit and the default value 0
589	 * means host will issue HOLD to device when the left FIFO
590	 * size goes below 32DW.  Setting it to 1 makes the watermark
591	 * 64DW.
592	 *
593	 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
594	 * http://article.gmane.org/gmane.linux.ide/46352
595	 * http://thread.gmane.org/gmane.linux.kernel/1062139
596	 */
597	if (board_id == vt6420 || board_id == vt6421) {
598		pci_read_config_byte(pdev, 0x52, &tmp8);
599		tmp8 |= 1 << 2;
600		pci_write_config_byte(pdev, 0x52, tmp8);
601	}
602}
603
604static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
605{
606	unsigned int i;
607	int rc;
608	struct ata_host *host = NULL;
609	int board_id = (int) ent->driver_data;
610	const unsigned *bar_sizes;
611
612	ata_print_version_once(&pdev->dev, DRV_VERSION);
613
614	rc = pcim_enable_device(pdev);
615	if (rc)
616		return rc;
617
618	if (board_id == vt6421)
619		bar_sizes = &vt6421_bar_sizes[0];
620	else
621		bar_sizes = &svia_bar_sizes[0];
622
623	for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
624		if ((pci_resource_start(pdev, i) == 0) ||
625		    (pci_resource_len(pdev, i) < bar_sizes[i])) {
626			dev_err(&pdev->dev,
627				"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
628				i,
629				(unsigned long long)pci_resource_start(pdev, i),
630				(unsigned long long)pci_resource_len(pdev, i));
631			return -ENODEV;
632		}
633
634	switch (board_id) {
635	case vt6420:
636		rc = vt6420_prepare_host(pdev, &host);
637		break;
638	case vt6421:
639		rc = vt6421_prepare_host(pdev, &host);
640		break;
641	case vt8251:
642		rc = vt8251_prepare_host(pdev, &host);
643		break;
644	default:
645		rc = -EINVAL;
646	}
647	if (rc)
648		return rc;
649
650	svia_configure(pdev, board_id);
651
652	pci_set_master(pdev);
653	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
654				 IRQF_SHARED, &svia_sht);
655}
656
657module_pci_driver(svia_pci_driver);
658