This source file includes following definitions.
- svia_scr_read
- svia_scr_write
- vt8251_scr_read
- vt8251_scr_write
- svia_tf_load
- svia_noop_freeze
- vt6420_prereset
- vt6420_bmdma_start
- vt6421_pata_cable_detect
- vt6421_set_pio_mode
- vt6421_set_dma_mode
- svia_scr_addr
- vt6421_scr_addr
- vt6421_init_addrs
- vt6420_prepare_host
- vt6421_prepare_host
- vt8251_prepare_host
- svia_wd_fix
- vt642x_interrupt
- vt6421_error_handler
- svia_configure
- svia_init_one
- svia_pci_device_resume
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/blkdev.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_host.h>
27 #include <linux/libata.h>
28
29 #define DRV_NAME "sata_via"
30 #define DRV_VERSION "2.6"
31
32
33
34
35
36 enum board_ids_enum {
37 vt6420,
38 vt6421,
39 vt8251,
40 };
41
42 enum {
43 SATA_CHAN_ENAB = 0x40,
44 SATA_INT_GATE = 0x41,
45 SATA_NATIVE_MODE = 0x42,
46 SVIA_MISC_3 = 0x46,
47 PATA_UDMA_TIMING = 0xB3,
48 PATA_PIO_TIMING = 0xAB,
49
50 PORT0 = (1 << 1),
51 PORT1 = (1 << 0),
52 ALL_PORTS = PORT0 | PORT1,
53
54 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
55
56 SATA_EXT_PHY = (1 << 6),
57
58 SATA_HOTPLUG = (1 << 5),
59 };
60
61 struct svia_priv {
62 bool wd_workaround;
63 };
64
65 static int vt6420_hotplug;
66 module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
67 MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
68
69 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
70 #ifdef CONFIG_PM_SLEEP
71 static int svia_pci_device_resume(struct pci_dev *pdev);
72 #endif
73 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
74 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
75 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
76 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
77 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
78 static void svia_noop_freeze(struct ata_port *ap);
79 static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
80 static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
81 static int vt6421_pata_cable_detect(struct ata_port *ap);
82 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
83 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
84 static void vt6421_error_handler(struct ata_port *ap);
85
86 static const struct pci_device_id svia_pci_tbl[] = {
87 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
88 { PCI_VDEVICE(VIA, 0x0591), vt6420 },
89 { PCI_VDEVICE(VIA, 0x3149), vt6420 },
90 { PCI_VDEVICE(VIA, 0x3249), vt6421 },
91 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
92 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
93 { PCI_VDEVICE(VIA, 0x5287), vt8251 },
94 { PCI_VDEVICE(VIA, 0x9000), vt8251 },
95
96 { }
97 };
98
99 static struct pci_driver svia_pci_driver = {
100 .name = DRV_NAME,
101 .id_table = svia_pci_tbl,
102 .probe = svia_init_one,
103 #ifdef CONFIG_PM_SLEEP
104 .suspend = ata_pci_device_suspend,
105 .resume = svia_pci_device_resume,
106 #endif
107 .remove = ata_pci_remove_one,
108 };
109
110 static struct scsi_host_template svia_sht = {
111 ATA_BMDMA_SHT(DRV_NAME),
112 };
113
114 static struct ata_port_operations svia_base_ops = {
115 .inherits = &ata_bmdma_port_ops,
116 .sff_tf_load = svia_tf_load,
117 };
118
119 static struct ata_port_operations vt6420_sata_ops = {
120 .inherits = &svia_base_ops,
121 .freeze = svia_noop_freeze,
122 .prereset = vt6420_prereset,
123 .bmdma_start = vt6420_bmdma_start,
124 };
125
126 static struct ata_port_operations vt6421_pata_ops = {
127 .inherits = &svia_base_ops,
128 .cable_detect = vt6421_pata_cable_detect,
129 .set_piomode = vt6421_set_pio_mode,
130 .set_dmamode = vt6421_set_dma_mode,
131 };
132
133 static struct ata_port_operations vt6421_sata_ops = {
134 .inherits = &svia_base_ops,
135 .scr_read = svia_scr_read,
136 .scr_write = svia_scr_write,
137 .error_handler = vt6421_error_handler,
138 };
139
140 static struct ata_port_operations vt8251_ops = {
141 .inherits = &svia_base_ops,
142 .hardreset = sata_std_hardreset,
143 .scr_read = vt8251_scr_read,
144 .scr_write = vt8251_scr_write,
145 };
146
147 static const struct ata_port_info vt6420_port_info = {
148 .flags = ATA_FLAG_SATA,
149 .pio_mask = ATA_PIO4,
150 .mwdma_mask = ATA_MWDMA2,
151 .udma_mask = ATA_UDMA6,
152 .port_ops = &vt6420_sata_ops,
153 };
154
155 static const struct ata_port_info vt6421_sport_info = {
156 .flags = ATA_FLAG_SATA,
157 .pio_mask = ATA_PIO4,
158 .mwdma_mask = ATA_MWDMA2,
159 .udma_mask = ATA_UDMA6,
160 .port_ops = &vt6421_sata_ops,
161 };
162
163 static const struct ata_port_info vt6421_pport_info = {
164 .flags = ATA_FLAG_SLAVE_POSS,
165 .pio_mask = ATA_PIO4,
166
167 .udma_mask = ATA_UDMA6,
168 .port_ops = &vt6421_pata_ops,
169 };
170
171 static const struct ata_port_info vt8251_port_info = {
172 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
173 .pio_mask = ATA_PIO4,
174 .mwdma_mask = ATA_MWDMA2,
175 .udma_mask = ATA_UDMA6,
176 .port_ops = &vt8251_ops,
177 };
178
179 MODULE_AUTHOR("Jeff Garzik");
180 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
181 MODULE_LICENSE("GPL");
182 MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
183 MODULE_VERSION(DRV_VERSION);
184
185 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
186 {
187 if (sc_reg > SCR_CONTROL)
188 return -EINVAL;
189 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
190 return 0;
191 }
192
193 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
194 {
195 if (sc_reg > SCR_CONTROL)
196 return -EINVAL;
197 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
198 return 0;
199 }
200
201 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
202 {
203 static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
204 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
205 int slot = 2 * link->ap->port_no + link->pmp;
206 u32 v = 0;
207 u8 raw;
208
209 switch (scr) {
210 case SCR_STATUS:
211 pci_read_config_byte(pdev, 0xA0 + slot, &raw);
212
213
214 v |= raw & 0x03;
215
216
217 if (raw & (1 << 4))
218 v |= 0x02 << 4;
219 else
220 v |= 0x01 << 4;
221
222
223 v |= ipm_tbl[(raw >> 2) & 0x3];
224 break;
225
226 case SCR_ERROR:
227
228 WARN_ON(pdev->device != 0x5287);
229 pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
230 break;
231
232 case SCR_CONTROL:
233 pci_read_config_byte(pdev, 0xA4 + slot, &raw);
234
235
236 v |= ((raw & 0x02) << 1) | (raw & 0x01);
237
238
239 v |= ((raw >> 2) & 0x03) << 8;
240 break;
241
242 default:
243 return -EINVAL;
244 }
245
246 *val = v;
247 return 0;
248 }
249
250 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
251 {
252 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
253 int slot = 2 * link->ap->port_no + link->pmp;
254 u32 v = 0;
255
256 switch (scr) {
257 case SCR_ERROR:
258
259 WARN_ON(pdev->device != 0x5287);
260 pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
261 return 0;
262
263 case SCR_CONTROL:
264
265 v |= ((val & 0x4) >> 1) | (val & 0x1);
266
267
268 v |= ((val >> 8) & 0x3) << 2;
269
270 pci_write_config_byte(pdev, 0xA4 + slot, v);
271 return 0;
272
273 default:
274 return -EINVAL;
275 }
276 }
277
278
279
280
281
282
283
284
285
286
287
288
289 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
290 {
291 struct ata_taskfile ttf;
292
293 if (tf->ctl != ap->last_ctl) {
294 ttf = *tf;
295 ttf.flags |= ATA_TFLAG_DEVICE;
296 tf = &ttf;
297 }
298 ata_sff_tf_load(ap, tf);
299 }
300
301 static void svia_noop_freeze(struct ata_port *ap)
302 {
303
304
305
306 ap->ops->sff_check_status(ap);
307 ata_bmdma_irq_clear(ap);
308 }
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330 static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
331 {
332 struct ata_port *ap = link->ap;
333 struct ata_eh_context *ehc = &ap->link.eh_context;
334 unsigned long timeout = jiffies + (HZ * 5);
335 u32 sstatus, scontrol;
336 int online;
337
338
339 if (!(ap->pflags & ATA_PFLAG_LOADING))
340 goto skip_scr;
341
342
343 svia_scr_write(link, SCR_CONTROL, 0x300);
344 svia_scr_read(link, SCR_CONTROL, &scontrol);
345
346
347 do {
348 ata_msleep(link->ap, 200);
349 svia_scr_read(link, SCR_STATUS, &sstatus);
350 if ((sstatus & 0xf) != 1)
351 break;
352 } while (time_before(jiffies, timeout));
353
354
355 svia_scr_read(link, SCR_STATUS, &sstatus);
356 svia_scr_read(link, SCR_CONTROL, &scontrol);
357
358 online = (sstatus & 0xf) == 0x3;
359
360 ata_port_info(ap,
361 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
362 online ? "up" : "down", sstatus, scontrol);
363
364
365 svia_scr_read(link, SCR_STATUS, &sstatus);
366
367 if (!online) {
368
369 ehc->i.action &= ~ATA_EH_RESET;
370 return 0;
371 }
372
373 skip_scr:
374
375 ata_sff_wait_ready(link, deadline);
376
377 return 0;
378 }
379
380 static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
381 {
382 struct ata_port *ap = qc->ap;
383 if ((qc->tf.command == ATA_CMD_PACKET) &&
384 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
385
386 ata_sff_pause(ap);
387 }
388 ata_bmdma_start(qc);
389 }
390
391 static int vt6421_pata_cable_detect(struct ata_port *ap)
392 {
393 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
394 u8 tmp;
395
396 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
397 if (tmp & 0x10)
398 return ATA_CBL_PATA40;
399 return ATA_CBL_PATA80;
400 }
401
402 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
403 {
404 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
405 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
406 pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
407 pio_bits[adev->pio_mode - XFER_PIO_0]);
408 }
409
410 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
411 {
412 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
413 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
414 pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
415 udma_bits[adev->dma_mode - XFER_UDMA_0]);
416 }
417
418 static const unsigned int svia_bar_sizes[] = {
419 8, 4, 8, 4, 16, 256
420 };
421
422 static const unsigned int vt6421_bar_sizes[] = {
423 16, 16, 16, 16, 32, 128
424 };
425
426 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
427 {
428 return addr + (port * 128);
429 }
430
431 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
432 {
433 return addr + (port * 64);
434 }
435
436 static void vt6421_init_addrs(struct ata_port *ap)
437 {
438 void __iomem * const * iomap = ap->host->iomap;
439 void __iomem *reg_addr = iomap[ap->port_no];
440 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442
443 ioaddr->cmd_addr = reg_addr;
444 ioaddr->altstatus_addr =
445 ioaddr->ctl_addr = (void __iomem *)
446 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
447 ioaddr->bmdma_addr = bmdma_addr;
448 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
449
450 ata_sff_std_ports(ioaddr);
451
452 ata_port_pbar_desc(ap, ap->port_no, -1, "port");
453 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
454 }
455
456 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
457 {
458 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
459 struct ata_host *host;
460 int rc;
461
462 if (vt6420_hotplug) {
463 ppi[0]->port_ops->scr_read = svia_scr_read;
464 ppi[0]->port_ops->scr_write = svia_scr_write;
465 }
466
467 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
468 if (rc)
469 return rc;
470 *r_host = host;
471
472 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
473 if (rc) {
474 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
475 return rc;
476 }
477
478 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
479 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
480
481 return 0;
482 }
483
484 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
485 {
486 const struct ata_port_info *ppi[] =
487 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
488 struct ata_host *host;
489 int i, rc;
490
491 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
492 if (!host) {
493 dev_err(&pdev->dev, "failed to allocate host\n");
494 return -ENOMEM;
495 }
496
497 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
498 if (rc) {
499 dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
500 rc);
501 return rc;
502 }
503 host->iomap = pcim_iomap_table(pdev);
504
505 for (i = 0; i < host->n_ports; i++)
506 vt6421_init_addrs(host->ports[i]);
507
508 return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
509 }
510
511 static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
512 {
513 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
514 struct ata_host *host;
515 int i, rc;
516
517 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
518 if (rc)
519 return rc;
520 *r_host = host;
521
522 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
523 if (rc) {
524 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
525 return rc;
526 }
527
528
529 for (i = 0; i < host->n_ports; i++)
530 ata_slave_link_init(host->ports[i]);
531
532 return 0;
533 }
534
535 static void svia_wd_fix(struct pci_dev *pdev)
536 {
537 u8 tmp8;
538
539 pci_read_config_byte(pdev, 0x52, &tmp8);
540 pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
541 }
542
543 static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
544 {
545 struct ata_host *host = dev_instance;
546 irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
547
548
549 if (rc != IRQ_HANDLED) {
550 u32 serror;
551 unsigned long flags;
552
553 spin_lock_irqsave(&host->lock, flags);
554
555 svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
556 if (serror & SERR_PHYRDY_CHG) {
557 ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
558 ata_port_freeze(host->ports[0]);
559 rc = IRQ_HANDLED;
560 }
561
562 svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
563 if (serror & SERR_PHYRDY_CHG) {
564 ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
565 ata_port_freeze(host->ports[1]);
566 rc = IRQ_HANDLED;
567 }
568 spin_unlock_irqrestore(&host->lock, flags);
569 }
570
571 return rc;
572 }
573
574 static void vt6421_error_handler(struct ata_port *ap)
575 {
576 struct svia_priv *hpriv = ap->host->private_data;
577 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
578 u32 serror;
579
580
581 if (!hpriv->wd_workaround) {
582 svia_scr_read(&ap->link, SCR_ERROR, &serror);
583 if (serror == 0x1000500) {
584 ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
585 svia_wd_fix(pdev);
586 hpriv->wd_workaround = true;
587 ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
588 }
589 }
590
591 ata_sff_error_handler(ap);
592 }
593
594 static void svia_configure(struct pci_dev *pdev, int board_id,
595 struct svia_priv *hpriv)
596 {
597 u8 tmp8;
598
599 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
600 dev_info(&pdev->dev, "routed to hard irq line %d\n",
601 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
602
603
604 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
605 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
606 dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
607 (int)tmp8);
608 tmp8 |= ALL_PORTS;
609 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
610 }
611
612
613 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
614 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
615 dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
616 (int) tmp8);
617 tmp8 |= ALL_PORTS;
618 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
619 }
620
621
622 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
623 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
624 dev_dbg(&pdev->dev,
625 "enabling SATA channel native mode (0x%x)\n",
626 (int) tmp8);
627 tmp8 |= NATIVE_MODE_ALL;
628 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
629 }
630
631 if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
632
633 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
634 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
635 dev_dbg(&pdev->dev,
636 "enabling SATA hotplug (0x%x)\n",
637 (int) tmp8);
638 tmp8 |= SATA_HOTPLUG;
639 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
640 }
641 }
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670 if (board_id == vt6420) {
671 svia_wd_fix(pdev);
672 hpriv->wd_workaround = true;
673 }
674 }
675
676 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
677 {
678 unsigned int i;
679 int rc;
680 struct ata_host *host = NULL;
681 int board_id = (int) ent->driver_data;
682 const unsigned *bar_sizes;
683 struct svia_priv *hpriv;
684
685 ata_print_version_once(&pdev->dev, DRV_VERSION);
686
687 rc = pcim_enable_device(pdev);
688 if (rc)
689 return rc;
690
691 if (board_id == vt6421)
692 bar_sizes = &vt6421_bar_sizes[0];
693 else
694 bar_sizes = &svia_bar_sizes[0];
695
696 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
697 if ((pci_resource_start(pdev, i) == 0) ||
698 (pci_resource_len(pdev, i) < bar_sizes[i])) {
699 dev_err(&pdev->dev,
700 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
701 i,
702 (unsigned long long)pci_resource_start(pdev, i),
703 (unsigned long long)pci_resource_len(pdev, i));
704 return -ENODEV;
705 }
706
707 switch (board_id) {
708 case vt6420:
709 rc = vt6420_prepare_host(pdev, &host);
710 break;
711 case vt6421:
712 rc = vt6421_prepare_host(pdev, &host);
713 break;
714 case vt8251:
715 rc = vt8251_prepare_host(pdev, &host);
716 break;
717 default:
718 rc = -EINVAL;
719 }
720 if (rc)
721 return rc;
722
723 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
724 if (!hpriv)
725 return -ENOMEM;
726 host->private_data = hpriv;
727
728 svia_configure(pdev, board_id, hpriv);
729
730 pci_set_master(pdev);
731 if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
732 return ata_host_activate(host, pdev->irq, vt642x_interrupt,
733 IRQF_SHARED, &svia_sht);
734 else
735 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
736 IRQF_SHARED, &svia_sht);
737 }
738
739 #ifdef CONFIG_PM_SLEEP
740 static int svia_pci_device_resume(struct pci_dev *pdev)
741 {
742 struct ata_host *host = pci_get_drvdata(pdev);
743 struct svia_priv *hpriv = host->private_data;
744 int rc;
745
746 rc = ata_pci_device_do_resume(pdev);
747 if (rc)
748 return rc;
749
750 if (hpriv->wd_workaround)
751 svia_wd_fix(pdev);
752 ata_host_resume(host);
753
754 return 0;
755 }
756 #endif
757
758 module_pci_driver(svia_pci_driver);