This source file includes following definitions.
- kauai_lookup_timing
- pmac_ide_apply_timings
- pmac_ide_kauai_apply_timings
- pmac_ide_do_update_timings
- pmac_dev_select
- pmac_kauai_dev_select
- pmac_exec_command
- pmac_write_devctl
- pmac_ide_set_pio_mode
- set_timings_udma_ata4
- set_timings_udma_ata6
- set_timings_udma_shasta
- set_timings_mdma
- pmac_ide_set_dma_mode
- sanitize_timings
- on_media_bay
- pmac_ide_do_suspend
- pmac_ide_do_resume
- pmac_ide_cable_detect
- pmac_ide_init_dev
- pmac_ide_setup_device
- pmac_ide_init_ports
- pmac_ide_macio_attach
- pmac_ide_macio_suspend
- pmac_ide_macio_resume
- pmac_ide_pci_attach
- pmac_ide_pci_suspend
- pmac_ide_pci_resume
- pmac_ide_macio_mb_event
- pmac_ide_probe
- pmac_ide_build_dmatable
- pmac_ide_dma_setup
- pmac_ide_dma_start
- pmac_ide_dma_end
- pmac_ide_dma_test_irq
- pmac_ide_dma_host_set
- pmac_ide_dma_lost_irq
- pmac_ide_init_dma
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/ide.h>
25 #include <linux/notifier.h>
26 #include <linux/module.h>
27 #include <linux/reboot.h>
28 #include <linux/pci.h>
29 #include <linux/adb.h>
30 #include <linux/pmu.h>
31 #include <linux/scatterlist.h>
32 #include <linux/slab.h>
33
34 #include <asm/prom.h>
35 #include <asm/io.h>
36 #include <asm/dbdma.h>
37 #include <asm/ide.h>
38 #include <asm/machdep.h>
39 #include <asm/pmac_feature.h>
40 #include <asm/sections.h>
41 #include <asm/irq.h>
42 #include <asm/mediabay.h>
43
44 #define DRV_NAME "ide-pmac"
45
46 #undef IDE_PMAC_DEBUG
47
48 #define DMA_WAIT_TIMEOUT 50
49
50 typedef struct pmac_ide_hwif {
51 unsigned long regbase;
52 int irq;
53 int kind;
54 int aapl_bus_id;
55 unsigned broken_dma : 1;
56 unsigned broken_dma_warn : 1;
57 struct device_node* node;
58 struct macio_dev *mdev;
59 u32 timings[4];
60 volatile u32 __iomem * *kauai_fcr;
61 ide_hwif_t *hwif;
62
63
64
65
66
67
68 volatile struct dbdma_regs __iomem * dma_regs;
69 struct dbdma_cmd* dma_table_cpu;
70 } pmac_ide_hwif_t;
71
72 enum {
73 controller_ohare,
74 controller_heathrow,
75 controller_kl_ata3,
76 controller_kl_ata4,
77 controller_un_ata6,
78 controller_k2_ata6,
79 controller_sh_ata6,
80 };
81
82 static const char* model_name[] = {
83 "OHare ATA",
84 "Heathrow ATA",
85 "KeyLargo ATA-3",
86 "KeyLargo ATA-4",
87 "UniNorth ATA-6",
88 "K2 ATA-6",
89 "Shasta ATA-6",
90 };
91
92
93
94
95 #define IDE_TIMING_CONFIG 0x200
96 #define IDE_INTERRUPT 0x300
97
98
99 #define IDE_KAUAI_PIO_CONFIG 0x200
100 #define IDE_KAUAI_ULTRA_CONFIG 0x210
101 #define IDE_KAUAI_POLL_CONFIG 0x220
102
103
104
105
106
107
108 #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
109 #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
110 #define IDE_SYSCLK_NS 30
111 #define IDE_SYSCLK_66_NS 15
112
113
114
115
116
117 #define TR_133_PIOREG_PIO_MASK 0xff000fff
118 #define TR_133_PIOREG_MDMA_MASK 0x00fff800
119 #define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
120 #define TR_133_UDMAREG_UDMA_EN 0x00000001
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 #define TR_100_PIOREG_PIO_MASK 0xff000fff
139 #define TR_100_PIOREG_MDMA_MASK 0x00fff000
140 #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
141 #define TR_100_UDMAREG_UDMA_EN 0x00000001
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160 #define TR_66_UDMA_MASK 0xfff00000
161 #define TR_66_UDMA_EN 0x00100000
162 #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000
163 #define TR_66_UDMA_ADDRSETUP_SHIFT 29
164 #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000
165 #define TR_66_UDMA_RDY2PAUS_SHIFT 25
166 #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000
167 #define TR_66_UDMA_WRDATASETUP_SHIFT 21
168 #define TR_66_MDMA_MASK 0x000ffc00
169 #define TR_66_MDMA_RECOVERY_MASK 0x000f8000
170 #define TR_66_MDMA_RECOVERY_SHIFT 15
171 #define TR_66_MDMA_ACCESS_MASK 0x00007c00
172 #define TR_66_MDMA_ACCESS_SHIFT 10
173 #define TR_66_PIO_MASK 0x000003ff
174 #define TR_66_PIO_RECOVERY_MASK 0x000003e0
175 #define TR_66_PIO_RECOVERY_SHIFT 5
176 #define TR_66_PIO_ACCESS_MASK 0x0000001f
177 #define TR_66_PIO_ACCESS_SHIFT 0
178
179
180
181
182
183
184
185
186
187
188
189
190 #define TR_33_MDMA_MASK 0x003ff800
191 #define TR_33_MDMA_RECOVERY_MASK 0x001f0000
192 #define TR_33_MDMA_RECOVERY_SHIFT 16
193 #define TR_33_MDMA_ACCESS_MASK 0x0000f800
194 #define TR_33_MDMA_ACCESS_SHIFT 11
195 #define TR_33_MDMA_HALFTICK 0x00200000
196 #define TR_33_PIO_MASK 0x000007ff
197 #define TR_33_PIO_E 0x00000400
198 #define TR_33_PIO_RECOVERY_MASK 0x000003e0
199 #define TR_33_PIO_RECOVERY_SHIFT 5
200 #define TR_33_PIO_ACCESS_MASK 0x0000001f
201 #define TR_33_PIO_ACCESS_SHIFT 0
202
203
204
205
206 #define IDE_INTR_DMA 0x80000000
207 #define IDE_INTR_DEVICE 0x40000000
208
209
210
211
212 #define KAUAI_FCR_UATA_MAGIC 0x00000004
213 #define KAUAI_FCR_UATA_RESET_N 0x00000002
214 #define KAUAI_FCR_UATA_ENABLE 0x00000001
215
216
217
218
219
220
221
222 struct mdma_timings_t {
223 int accessTime;
224 int recoveryTime;
225 int cycleTime;
226 };
227
228 struct mdma_timings_t mdma_timings_33[] =
229 {
230 { 240, 240, 480 },
231 { 180, 180, 360 },
232 { 135, 135, 270 },
233 { 120, 120, 240 },
234 { 105, 105, 210 },
235 { 90, 90, 180 },
236 { 75, 75, 150 },
237 { 75, 45, 120 },
238 { 0, 0, 0 }
239 };
240
241 struct mdma_timings_t mdma_timings_33k[] =
242 {
243 { 240, 240, 480 },
244 { 180, 180, 360 },
245 { 150, 150, 300 },
246 { 120, 120, 240 },
247 { 90, 120, 210 },
248 { 90, 90, 180 },
249 { 90, 60, 150 },
250 { 90, 30, 120 },
251 { 0, 0, 0 }
252 };
253
254 struct mdma_timings_t mdma_timings_66[] =
255 {
256 { 240, 240, 480 },
257 { 180, 180, 360 },
258 { 135, 135, 270 },
259 { 120, 120, 240 },
260 { 105, 105, 210 },
261 { 90, 90, 180 },
262 { 90, 75, 165 },
263 { 75, 45, 120 },
264 { 0, 0, 0 }
265 };
266
267
268 struct {
269 int addrSetup;
270 int rdy2pause;
271 int wrDataSetup;
272 } kl66_udma_timings[] =
273 {
274 { 0, 180, 120 },
275 { 0, 150, 90 },
276 { 0, 120, 60 },
277 { 0, 90, 45 },
278 { 0, 90, 30 }
279 };
280
281
282 struct kauai_timing {
283 int cycle_time;
284 u32 timing_reg;
285 };
286
287 static struct kauai_timing kauai_pio_timings[] =
288 {
289 { 930 , 0x08000fff },
290 { 600 , 0x08000a92 },
291 { 383 , 0x0800060f },
292 { 360 , 0x08000492 },
293 { 330 , 0x0800048f },
294 { 300 , 0x080003cf },
295 { 270 , 0x080003cc },
296 { 240 , 0x0800038b },
297 { 239 , 0x0800030c },
298 { 180 , 0x05000249 },
299 { 120 , 0x04000148 },
300 { 0 , 0 },
301 };
302
303 static struct kauai_timing kauai_mdma_timings[] =
304 {
305 { 1260 , 0x00fff000 },
306 { 480 , 0x00618000 },
307 { 360 , 0x00492000 },
308 { 270 , 0x0038e000 },
309 { 240 , 0x0030c000 },
310 { 210 , 0x002cb000 },
311 { 180 , 0x00249000 },
312 { 150 , 0x00209000 },
313 { 120 , 0x00148000 },
314 { 0 , 0 },
315 };
316
317 static struct kauai_timing kauai_udma_timings[] =
318 {
319 { 120 , 0x000070c0 },
320 { 90 , 0x00005d80 },
321 { 60 , 0x00004a60 },
322 { 45 , 0x00003a50 },
323 { 30 , 0x00002a30 },
324 { 20 , 0x00002921 },
325 { 0 , 0 },
326 };
327
328 static struct kauai_timing shasta_pio_timings[] =
329 {
330 { 930 , 0x08000fff },
331 { 600 , 0x0A000c97 },
332 { 383 , 0x07000712 },
333 { 360 , 0x040003cd },
334 { 330 , 0x040003cd },
335 { 300 , 0x040003cd },
336 { 270 , 0x040003cd },
337 { 240 , 0x040003cd },
338 { 239 , 0x040003cd },
339 { 180 , 0x0400028b },
340 { 120 , 0x0400010a },
341 { 0 , 0 },
342 };
343
344 static struct kauai_timing shasta_mdma_timings[] =
345 {
346 { 1260 , 0x00fff000 },
347 { 480 , 0x00820800 },
348 { 360 , 0x00820800 },
349 { 270 , 0x00820800 },
350 { 240 , 0x00820800 },
351 { 210 , 0x00820800 },
352 { 180 , 0x00820800 },
353 { 150 , 0x0028b000 },
354 { 120 , 0x001ca000 },
355 { 0 , 0 },
356 };
357
358 static struct kauai_timing shasta_udma133_timings[] =
359 {
360 { 120 , 0x00035901, },
361 { 90 , 0x000348b1, },
362 { 60 , 0x00033881, },
363 { 45 , 0x00033861, },
364 { 30 , 0x00033841, },
365 { 20 , 0x00033031, },
366 { 15 , 0x00033021, },
367 { 0 , 0 },
368 };
369
370
371 static inline u32
372 kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
373 {
374 int i;
375
376 for (i=0; table[i].cycle_time; i++)
377 if (cycle_time > table[i+1].cycle_time)
378 return table[i].timing_reg;
379 BUG();
380 return 0;
381 }
382
383
384 #define MAX_DCMDS 256
385
386
387
388
389
390
391
392
393
394
395
396
397
398 #define IDE_WAKEUP_DELAY (1*HZ)
399
400 static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
401
402 #define PMAC_IDE_REG(x) \
403 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
404
405
406
407
408
409
410 static void pmac_ide_apply_timings(ide_drive_t *drive)
411 {
412 ide_hwif_t *hwif = drive->hwif;
413 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
414
415 if (drive->dn & 1)
416 writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
417 else
418 writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
419 (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
420 }
421
422
423
424
425
426
427 static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
428 {
429 ide_hwif_t *hwif = drive->hwif;
430 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
431
432 if (drive->dn & 1) {
433 writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
434 writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
435 } else {
436 writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
437 writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
438 }
439 (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
440 }
441
442
443
444
445 static void
446 pmac_ide_do_update_timings(ide_drive_t *drive)
447 {
448 ide_hwif_t *hwif = drive->hwif;
449 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
450
451 if (pmif->kind == controller_sh_ata6 ||
452 pmif->kind == controller_un_ata6 ||
453 pmif->kind == controller_k2_ata6)
454 pmac_ide_kauai_apply_timings(drive);
455 else
456 pmac_ide_apply_timings(drive);
457 }
458
459 static void pmac_dev_select(ide_drive_t *drive)
460 {
461 pmac_ide_apply_timings(drive);
462
463 writeb(drive->select | ATA_DEVICE_OBS,
464 (void __iomem *)drive->hwif->io_ports.device_addr);
465 }
466
467 static void pmac_kauai_dev_select(ide_drive_t *drive)
468 {
469 pmac_ide_kauai_apply_timings(drive);
470
471 writeb(drive->select | ATA_DEVICE_OBS,
472 (void __iomem *)drive->hwif->io_ports.device_addr);
473 }
474
475 static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
476 {
477 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
478 (void)readl((void __iomem *)(hwif->io_ports.data_addr
479 + IDE_TIMING_CONFIG));
480 }
481
482 static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
483 {
484 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
485 (void)readl((void __iomem *)(hwif->io_ports.data_addr
486 + IDE_TIMING_CONFIG));
487 }
488
489
490
491
492 static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
493 {
494 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
495 const u8 pio = drive->pio_mode - XFER_PIO_0;
496 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
497 u32 *timings, t;
498 unsigned accessTicks, recTicks;
499 unsigned accessTime, recTime;
500 unsigned int cycle_time;
501
502
503 timings = &pmif->timings[drive->dn & 1];
504 t = *timings;
505
506 cycle_time = ide_pio_cycle_time(drive, pio);
507
508 switch (pmif->kind) {
509 case controller_sh_ata6: {
510
511 u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
512 t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
513 break;
514 }
515 case controller_un_ata6:
516 case controller_k2_ata6: {
517
518 u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
519 t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
520 break;
521 }
522 case controller_kl_ata4:
523
524 recTime = cycle_time - tim->active - tim->setup;
525 recTime = max(recTime, 150U);
526 accessTime = tim->active;
527 accessTime = max(accessTime, 150U);
528 accessTicks = SYSCLK_TICKS_66(accessTime);
529 accessTicks = min(accessTicks, 0x1fU);
530 recTicks = SYSCLK_TICKS_66(recTime);
531 recTicks = min(recTicks, 0x1fU);
532 t = (t & ~TR_66_PIO_MASK) |
533 (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
534 (recTicks << TR_66_PIO_RECOVERY_SHIFT);
535 break;
536 default: {
537
538 int ebit = 0;
539 recTime = cycle_time - tim->active - tim->setup;
540 recTime = max(recTime, 150U);
541 accessTime = tim->active;
542 accessTime = max(accessTime, 150U);
543 accessTicks = SYSCLK_TICKS(accessTime);
544 accessTicks = min(accessTicks, 0x1fU);
545 accessTicks = max(accessTicks, 4U);
546 recTicks = SYSCLK_TICKS(recTime);
547 recTicks = min(recTicks, 0x1fU);
548 recTicks = max(recTicks, 5U) - 4;
549 if (recTicks > 9) {
550 recTicks--;
551 ebit = 1;
552 }
553 t = (t & ~TR_33_PIO_MASK) |
554 (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
555 (recTicks << TR_33_PIO_RECOVERY_SHIFT);
556 if (ebit)
557 t |= TR_33_PIO_E;
558 break;
559 }
560 }
561
562 #ifdef IDE_PMAC_DEBUG
563 printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
564 drive->name, pio, *timings);
565 #endif
566
567 *timings = t;
568 pmac_ide_do_update_timings(drive);
569 }
570
571
572
573
574 static int
575 set_timings_udma_ata4(u32 *timings, u8 speed)
576 {
577 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
578
579 if (speed > XFER_UDMA_4)
580 return 1;
581
582 rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
583 wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
584 addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
585
586 *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
587 (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
588 (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
589 (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
590 TR_66_UDMA_EN;
591 #ifdef IDE_PMAC_DEBUG
592 printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
593 speed & 0xf, *timings);
594 #endif
595
596 return 0;
597 }
598
599
600
601
602 static int
603 set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
604 {
605 struct ide_timing *t = ide_timing_find_mode(speed);
606 u32 tr;
607
608 if (speed > XFER_UDMA_5 || t == NULL)
609 return 1;
610 tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
611 *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
612 *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
613
614 return 0;
615 }
616
617
618
619
620 static int
621 set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
622 {
623 struct ide_timing *t = ide_timing_find_mode(speed);
624 u32 tr;
625
626 if (speed > XFER_UDMA_6 || t == NULL)
627 return 1;
628 tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
629 *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr;
630 *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN;
631
632 return 0;
633 }
634
635
636
637
638 static void
639 set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
640 u8 speed)
641 {
642 u16 *id = drive->id;
643 int cycleTime, accessTime = 0, recTime = 0;
644 unsigned accessTicks, recTicks;
645 struct mdma_timings_t* tm = NULL;
646 int i;
647
648
649 switch(speed & 0xf) {
650 case 0: cycleTime = 480; break;
651 case 1: cycleTime = 150; break;
652 case 2: cycleTime = 120; break;
653 default:
654 BUG();
655 break;
656 }
657
658
659 if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME])
660 cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime);
661
662
663 if ((intf_type == controller_ohare) && (cycleTime < 150))
664 cycleTime = 150;
665
666 switch(intf_type) {
667 case controller_sh_ata6:
668 case controller_un_ata6:
669 case controller_k2_ata6:
670 break;
671 case controller_kl_ata4:
672 tm = mdma_timings_66;
673 break;
674 case controller_kl_ata3:
675 tm = mdma_timings_33k;
676 break;
677 default:
678 tm = mdma_timings_33;
679 break;
680 }
681 if (tm != NULL) {
682
683 i = -1;
684 for (;;) {
685 if (tm[i+1].cycleTime < cycleTime)
686 break;
687 i++;
688 }
689 cycleTime = tm[i].cycleTime;
690 accessTime = tm[i].accessTime;
691 recTime = tm[i].recoveryTime;
692
693 #ifdef IDE_PMAC_DEBUG
694 printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
695 drive->name, cycleTime, accessTime, recTime);
696 #endif
697 }
698 switch(intf_type) {
699 case controller_sh_ata6: {
700
701 u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime);
702 *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
703 *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
704 }
705 break;
706 case controller_un_ata6:
707 case controller_k2_ata6: {
708
709 u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
710 *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
711 *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
712 }
713 break;
714 case controller_kl_ata4:
715
716 accessTicks = SYSCLK_TICKS_66(accessTime);
717 accessTicks = min(accessTicks, 0x1fU);
718 accessTicks = max(accessTicks, 0x1U);
719 recTicks = SYSCLK_TICKS_66(recTime);
720 recTicks = min(recTicks, 0x1fU);
721 recTicks = max(recTicks, 0x3U);
722
723 *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
724 (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
725 (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
726 break;
727 case controller_kl_ata3:
728
729 accessTicks = SYSCLK_TICKS(accessTime);
730 accessTicks = max(accessTicks, 1U);
731 accessTicks = min(accessTicks, 0x1fU);
732 accessTime = accessTicks * IDE_SYSCLK_NS;
733 recTicks = SYSCLK_TICKS(recTime);
734 recTicks = max(recTicks, 1U);
735 recTicks = min(recTicks, 0x1fU);
736 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
737 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
738 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
739 break;
740 default: {
741
742 int halfTick = 0;
743 int origAccessTime = accessTime;
744 int origRecTime = recTime;
745
746 accessTicks = SYSCLK_TICKS(accessTime);
747 accessTicks = max(accessTicks, 1U);
748 accessTicks = min(accessTicks, 0x1fU);
749 accessTime = accessTicks * IDE_SYSCLK_NS;
750 recTicks = SYSCLK_TICKS(recTime);
751 recTicks = max(recTicks, 2U) - 1;
752 recTicks = min(recTicks, 0x1fU);
753 recTime = (recTicks + 1) * IDE_SYSCLK_NS;
754 if ((accessTicks > 1) &&
755 ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
756 ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
757 halfTick = 1;
758 accessTicks--;
759 }
760 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
761 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
762 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
763 if (halfTick)
764 *timings |= TR_33_MDMA_HALFTICK;
765 }
766 }
767 #ifdef IDE_PMAC_DEBUG
768 printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
769 drive->name, speed & 0xf, *timings);
770 #endif
771 }
772
773 static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
774 {
775 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
776 int ret = 0;
777 u32 *timings, *timings2, tl[2];
778 u8 unit = drive->dn & 1;
779 const u8 speed = drive->dma_mode;
780
781 timings = &pmif->timings[unit];
782 timings2 = &pmif->timings[unit+2];
783
784
785 tl[0] = *timings;
786 tl[1] = *timings2;
787
788 if (speed >= XFER_UDMA_0) {
789 if (pmif->kind == controller_kl_ata4)
790 ret = set_timings_udma_ata4(&tl[0], speed);
791 else if (pmif->kind == controller_un_ata6
792 || pmif->kind == controller_k2_ata6)
793 ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
794 else if (pmif->kind == controller_sh_ata6)
795 ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
796 else
797 ret = -1;
798 } else
799 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
800
801 if (ret)
802 return;
803
804
805 *timings = tl[0];
806 *timings2 = tl[1];
807
808 pmac_ide_do_update_timings(drive);
809 }
810
811
812
813
814
815 static void
816 sanitize_timings(pmac_ide_hwif_t *pmif)
817 {
818 unsigned int value, value2 = 0;
819
820 switch(pmif->kind) {
821 case controller_sh_ata6:
822 value = 0x0a820c97;
823 value2 = 0x00033031;
824 break;
825 case controller_un_ata6:
826 case controller_k2_ata6:
827 value = 0x08618a92;
828 value2 = 0x00002921;
829 break;
830 case controller_kl_ata4:
831 value = 0x0008438c;
832 break;
833 case controller_kl_ata3:
834 value = 0x00084526;
835 break;
836 case controller_heathrow:
837 case controller_ohare:
838 default:
839 value = 0x00074526;
840 break;
841 }
842 pmif->timings[0] = pmif->timings[1] = value;
843 pmif->timings[2] = pmif->timings[3] = value2;
844 }
845
846 static int on_media_bay(pmac_ide_hwif_t *pmif)
847 {
848 return pmif->mdev && pmif->mdev->media_bay != NULL;
849 }
850
851
852
853
854 static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
855 {
856
857 pmif->timings[0] = 0;
858 pmif->timings[1] = 0;
859
860 disable_irq(pmif->irq);
861
862
863 if (on_media_bay(pmif))
864 return 0;
865
866
867 if (pmif->kauai_fcr) {
868 u32 fcr = readl(pmif->kauai_fcr);
869 fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
870 writel(fcr, pmif->kauai_fcr);
871 }
872
873
874 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id,
875 0);
876
877 return 0;
878 }
879
880
881
882
883 static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
884 {
885
886 if (!on_media_bay(pmif)) {
887 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
888 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
889 msleep(10);
890 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
891
892
893 if (pmif->kauai_fcr) {
894 u32 fcr = readl(pmif->kauai_fcr);
895 fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE;
896 writel(fcr, pmif->kauai_fcr);
897 }
898
899 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
900 }
901
902
903 sanitize_timings(pmif);
904
905 enable_irq(pmif->irq);
906
907 return 0;
908 }
909
910 static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
911 {
912 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
913 struct device_node *np = pmif->node;
914 const char *cable = of_get_property(np, "cable-type", NULL);
915 struct device_node *root = of_find_node_by_path("/");
916 const char *model = of_get_property(root, "model", NULL);
917
918 of_node_put(root);
919
920 if (cable && !strncmp(cable, "80-", 3)) {
921
922
923 if (!strncmp(model, "PowerBook", 9))
924 return ATA_CBL_PATA40_SHORT;
925 else
926 return ATA_CBL_PATA80;
927 }
928
929
930
931
932
933
934 if (of_device_is_compatible(np, "K2-UATA") ||
935 of_device_is_compatible(np, "shasta-ata"))
936 return ATA_CBL_PATA80;
937
938 return ATA_CBL_PATA40;
939 }
940
941 static void pmac_ide_init_dev(ide_drive_t *drive)
942 {
943 ide_hwif_t *hwif = drive->hwif;
944 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
945
946 if (on_media_bay(pmif)) {
947 if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
948 drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
949 return;
950 }
951 drive->dev_flags |= IDE_DFLAG_NOPROBE;
952 }
953 }
954
955 static const struct ide_tp_ops pmac_tp_ops = {
956 .exec_command = pmac_exec_command,
957 .read_status = ide_read_status,
958 .read_altstatus = ide_read_altstatus,
959 .write_devctl = pmac_write_devctl,
960
961 .dev_select = pmac_dev_select,
962 .tf_load = ide_tf_load,
963 .tf_read = ide_tf_read,
964
965 .input_data = ide_input_data,
966 .output_data = ide_output_data,
967 };
968
969 static const struct ide_tp_ops pmac_ata6_tp_ops = {
970 .exec_command = pmac_exec_command,
971 .read_status = ide_read_status,
972 .read_altstatus = ide_read_altstatus,
973 .write_devctl = pmac_write_devctl,
974
975 .dev_select = pmac_kauai_dev_select,
976 .tf_load = ide_tf_load,
977 .tf_read = ide_tf_read,
978
979 .input_data = ide_input_data,
980 .output_data = ide_output_data,
981 };
982
983 static const struct ide_port_ops pmac_ide_ata4_port_ops = {
984 .init_dev = pmac_ide_init_dev,
985 .set_pio_mode = pmac_ide_set_pio_mode,
986 .set_dma_mode = pmac_ide_set_dma_mode,
987 .cable_detect = pmac_ide_cable_detect,
988 };
989
990 static const struct ide_port_ops pmac_ide_port_ops = {
991 .init_dev = pmac_ide_init_dev,
992 .set_pio_mode = pmac_ide_set_pio_mode,
993 .set_dma_mode = pmac_ide_set_dma_mode,
994 };
995
996 static const struct ide_dma_ops pmac_dma_ops;
997
998 static const struct ide_port_info pmac_port_info = {
999 .name = DRV_NAME,
1000 .init_dma = pmac_ide_init_dma,
1001 .chipset = ide_pmac,
1002 .tp_ops = &pmac_tp_ops,
1003 .port_ops = &pmac_ide_port_ops,
1004 .dma_ops = &pmac_dma_ops,
1005 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
1006 IDE_HFLAG_POST_SET_MODE |
1007 IDE_HFLAG_MMIO |
1008 IDE_HFLAG_UNMASK_IRQS,
1009 .pio_mask = ATA_PIO4,
1010 .mwdma_mask = ATA_MWDMA2,
1011 };
1012
1013
1014
1015
1016
1017 static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
1018 {
1019 struct device_node *np = pmif->node;
1020 const int *bidp;
1021 struct ide_host *host;
1022 ide_hwif_t *hwif;
1023 struct ide_hw *hws[] = { hw };
1024 struct ide_port_info d = pmac_port_info;
1025 int rc;
1026
1027 pmif->broken_dma = pmif->broken_dma_warn = 0;
1028 if (of_device_is_compatible(np, "shasta-ata")) {
1029 pmif->kind = controller_sh_ata6;
1030 d.tp_ops = &pmac_ata6_tp_ops;
1031 d.port_ops = &pmac_ide_ata4_port_ops;
1032 d.udma_mask = ATA_UDMA6;
1033 } else if (of_device_is_compatible(np, "kauai-ata")) {
1034 pmif->kind = controller_un_ata6;
1035 d.tp_ops = &pmac_ata6_tp_ops;
1036 d.port_ops = &pmac_ide_ata4_port_ops;
1037 d.udma_mask = ATA_UDMA5;
1038 } else if (of_device_is_compatible(np, "K2-UATA")) {
1039 pmif->kind = controller_k2_ata6;
1040 d.tp_ops = &pmac_ata6_tp_ops;
1041 d.port_ops = &pmac_ide_ata4_port_ops;
1042 d.udma_mask = ATA_UDMA5;
1043 } else if (of_device_is_compatible(np, "keylargo-ata")) {
1044 if (of_node_name_eq(np, "ata-4")) {
1045 pmif->kind = controller_kl_ata4;
1046 d.port_ops = &pmac_ide_ata4_port_ops;
1047 d.udma_mask = ATA_UDMA4;
1048 } else
1049 pmif->kind = controller_kl_ata3;
1050 } else if (of_device_is_compatible(np, "heathrow-ata")) {
1051 pmif->kind = controller_heathrow;
1052 } else {
1053 pmif->kind = controller_ohare;
1054 pmif->broken_dma = 1;
1055 }
1056
1057 bidp = of_get_property(np, "AAPL,bus-id", NULL);
1058 pmif->aapl_bus_id = bidp ? *bidp : 0;
1059
1060
1061 if (pmif->kauai_fcr)
1062 writel(KAUAI_FCR_UATA_MAGIC |
1063 KAUAI_FCR_UATA_RESET_N |
1064 KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
1065
1066
1067 sanitize_timings(pmif);
1068
1069
1070 if (pmif->mdev)
1071 lock_media_bay(pmif->mdev->media_bay);
1072
1073 host = ide_host_alloc(&d, hws, 1);
1074 if (host == NULL) {
1075 rc = -ENOMEM;
1076 goto bail;
1077 }
1078 hwif = pmif->hwif = host->ports[0];
1079
1080 if (on_media_bay(pmif)) {
1081
1082 if (!bidp)
1083 pmif->aapl_bus_id = 1;
1084 } else if (pmif->kind == controller_ohare) {
1085
1086
1087
1088
1089 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
1090 } else {
1091
1092 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1093 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
1094 msleep(10);
1095 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
1096 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
1097 }
1098
1099 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
1100 "bus ID %d%s, irq %d\n", model_name[pmif->kind],
1101 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1102 on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
1103
1104 rc = ide_host_register(host, &d, hws);
1105 if (rc)
1106 pmif->hwif = NULL;
1107
1108 if (pmif->mdev)
1109 unlock_media_bay(pmif->mdev->media_bay);
1110
1111 bail:
1112 if (rc && host)
1113 ide_host_free(host);
1114 return rc;
1115 }
1116
1117 static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
1118 {
1119 int i;
1120
1121 for (i = 0; i < 8; ++i)
1122 hw->io_ports_array[i] = base + i * 0x10;
1123
1124 hw->io_ports.ctl_addr = base + 0x160;
1125 }
1126
1127
1128
1129
1130 static int pmac_ide_macio_attach(struct macio_dev *mdev,
1131 const struct of_device_id *match)
1132 {
1133 void __iomem *base;
1134 unsigned long regbase;
1135 pmac_ide_hwif_t *pmif;
1136 int irq, rc;
1137 struct ide_hw hw;
1138
1139 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1140 if (pmif == NULL)
1141 return -ENOMEM;
1142
1143 if (macio_resource_count(mdev) == 0) {
1144 printk(KERN_WARNING "ide-pmac: no address for %pOF\n",
1145 mdev->ofdev.dev.of_node);
1146 rc = -ENXIO;
1147 goto out_free_pmif;
1148 }
1149
1150
1151 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
1152 printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
1153 "%pOF!\n", mdev->ofdev.dev.of_node);
1154 rc = -EBUSY;
1155 goto out_free_pmif;
1156 }
1157
1158
1159
1160
1161
1162
1163 if (macio_irq_count(mdev) == 0) {
1164 printk(KERN_WARNING "ide-pmac: no intrs for device %pOF, using "
1165 "13\n", mdev->ofdev.dev.of_node);
1166 irq = irq_create_mapping(NULL, 13);
1167 } else
1168 irq = macio_irq(mdev, 0);
1169
1170 base = ioremap(macio_resource_start(mdev, 0), 0x400);
1171 regbase = (unsigned long) base;
1172
1173 pmif->mdev = mdev;
1174 pmif->node = mdev->ofdev.dev.of_node;
1175 pmif->regbase = regbase;
1176 pmif->irq = irq;
1177 pmif->kauai_fcr = NULL;
1178
1179 if (macio_resource_count(mdev) >= 2) {
1180 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1181 printk(KERN_WARNING "ide-pmac: can't request DMA "
1182 "resource for %pOF!\n",
1183 mdev->ofdev.dev.of_node);
1184 else
1185 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1186 } else
1187 pmif->dma_regs = NULL;
1188
1189 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1190
1191 memset(&hw, 0, sizeof(hw));
1192 pmac_ide_init_ports(&hw, pmif->regbase);
1193 hw.irq = irq;
1194 hw.dev = &mdev->bus->pdev->dev;
1195 hw.parent = &mdev->ofdev.dev;
1196
1197 rc = pmac_ide_setup_device(pmif, &hw);
1198 if (rc != 0) {
1199
1200 dev_set_drvdata(&mdev->ofdev.dev, NULL);
1201 iounmap(base);
1202 if (pmif->dma_regs) {
1203 iounmap(pmif->dma_regs);
1204 macio_release_resource(mdev, 1);
1205 }
1206 macio_release_resource(mdev, 0);
1207 kfree(pmif);
1208 }
1209
1210 return rc;
1211
1212 out_free_pmif:
1213 kfree(pmif);
1214 return rc;
1215 }
1216
1217 static int
1218 pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1219 {
1220 pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
1221 int rc = 0;
1222
1223 if (mesg.event != mdev->ofdev.dev.power.power_state.event
1224 && (mesg.event & PM_EVENT_SLEEP)) {
1225 rc = pmac_ide_do_suspend(pmif);
1226 if (rc == 0)
1227 mdev->ofdev.dev.power.power_state = mesg;
1228 }
1229
1230 return rc;
1231 }
1232
1233 static int
1234 pmac_ide_macio_resume(struct macio_dev *mdev)
1235 {
1236 pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
1237 int rc = 0;
1238
1239 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
1240 rc = pmac_ide_do_resume(pmif);
1241 if (rc == 0)
1242 mdev->ofdev.dev.power.power_state = PMSG_ON;
1243 }
1244
1245 return rc;
1246 }
1247
1248
1249
1250
1251 static int pmac_ide_pci_attach(struct pci_dev *pdev,
1252 const struct pci_device_id *id)
1253 {
1254 struct device_node *np;
1255 pmac_ide_hwif_t *pmif;
1256 void __iomem *base;
1257 unsigned long rbase, rlen;
1258 int rc;
1259 struct ide_hw hw;
1260
1261 np = pci_device_to_OF_node(pdev);
1262 if (np == NULL) {
1263 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
1264 return -ENODEV;
1265 }
1266
1267 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1268 if (pmif == NULL)
1269 return -ENOMEM;
1270
1271 if (pci_enable_device(pdev)) {
1272 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
1273 "%pOF\n", np);
1274 rc = -ENXIO;
1275 goto out_free_pmif;
1276 }
1277 pci_set_master(pdev);
1278
1279 if (pci_request_regions(pdev, "Kauai ATA")) {
1280 printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
1281 "%pOF\n", np);
1282 rc = -ENXIO;
1283 goto out_free_pmif;
1284 }
1285
1286 pmif->mdev = NULL;
1287 pmif->node = np;
1288
1289 rbase = pci_resource_start(pdev, 0);
1290 rlen = pci_resource_len(pdev, 0);
1291
1292 base = ioremap(rbase, rlen);
1293 pmif->regbase = (unsigned long) base + 0x2000;
1294 pmif->dma_regs = base + 0x1000;
1295 pmif->kauai_fcr = base;
1296 pmif->irq = pdev->irq;
1297
1298 pci_set_drvdata(pdev, pmif);
1299
1300 memset(&hw, 0, sizeof(hw));
1301 pmac_ide_init_ports(&hw, pmif->regbase);
1302 hw.irq = pdev->irq;
1303 hw.dev = &pdev->dev;
1304
1305 rc = pmac_ide_setup_device(pmif, &hw);
1306 if (rc != 0) {
1307
1308 iounmap(base);
1309 pci_release_regions(pdev);
1310 kfree(pmif);
1311 }
1312
1313 return rc;
1314
1315 out_free_pmif:
1316 kfree(pmif);
1317 return rc;
1318 }
1319
1320 static int
1321 pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1322 {
1323 pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
1324 int rc = 0;
1325
1326 if (mesg.event != pdev->dev.power.power_state.event
1327 && (mesg.event & PM_EVENT_SLEEP)) {
1328 rc = pmac_ide_do_suspend(pmif);
1329 if (rc == 0)
1330 pdev->dev.power.power_state = mesg;
1331 }
1332
1333 return rc;
1334 }
1335
1336 static int
1337 pmac_ide_pci_resume(struct pci_dev *pdev)
1338 {
1339 pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
1340 int rc = 0;
1341
1342 if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
1343 rc = pmac_ide_do_resume(pmif);
1344 if (rc == 0)
1345 pdev->dev.power.power_state = PMSG_ON;
1346 }
1347
1348 return rc;
1349 }
1350
1351 #ifdef CONFIG_PMAC_MEDIABAY
1352 static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
1353 {
1354 pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
1355
1356 switch(mb_state) {
1357 case MB_CD:
1358 if (!pmif->hwif->present)
1359 ide_port_scan(pmif->hwif);
1360 break;
1361 default:
1362 if (pmif->hwif->present)
1363 ide_port_unregister_devices(pmif->hwif);
1364 }
1365 }
1366 #endif
1367
1368
1369 static struct of_device_id pmac_ide_macio_match[] =
1370 {
1371 {
1372 .name = "IDE",
1373 },
1374 {
1375 .name = "ATA",
1376 },
1377 {
1378 .type = "ide",
1379 },
1380 {
1381 .type = "ata",
1382 },
1383 {},
1384 };
1385
1386 static struct macio_driver pmac_ide_macio_driver =
1387 {
1388 .driver = {
1389 .name = "ide-pmac",
1390 .owner = THIS_MODULE,
1391 .of_match_table = pmac_ide_macio_match,
1392 },
1393 .probe = pmac_ide_macio_attach,
1394 .suspend = pmac_ide_macio_suspend,
1395 .resume = pmac_ide_macio_resume,
1396 #ifdef CONFIG_PMAC_MEDIABAY
1397 .mediabay_event = pmac_ide_macio_mb_event,
1398 #endif
1399 };
1400
1401 static const struct pci_device_id pmac_ide_pci_match[] = {
1402 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
1403 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
1404 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
1405 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
1406 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
1407 {},
1408 };
1409
1410 static struct pci_driver pmac_ide_pci_driver = {
1411 .name = "ide-pmac",
1412 .id_table = pmac_ide_pci_match,
1413 .probe = pmac_ide_pci_attach,
1414 .suspend = pmac_ide_pci_suspend,
1415 .resume = pmac_ide_pci_resume,
1416 };
1417 MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
1418
1419 int __init pmac_ide_probe(void)
1420 {
1421 int error;
1422
1423 if (!machine_is(powermac))
1424 return -ENODEV;
1425
1426 #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
1427 error = pci_register_driver(&pmac_ide_pci_driver);
1428 if (error)
1429 goto out;
1430 error = macio_register_driver(&pmac_ide_macio_driver);
1431 if (error) {
1432 pci_unregister_driver(&pmac_ide_pci_driver);
1433 goto out;
1434 }
1435 #else
1436 error = macio_register_driver(&pmac_ide_macio_driver);
1437 if (error)
1438 goto out;
1439 error = pci_register_driver(&pmac_ide_pci_driver);
1440 if (error) {
1441 macio_unregister_driver(&pmac_ide_macio_driver);
1442 goto out;
1443 }
1444 #endif
1445 out:
1446 return error;
1447 }
1448
1449
1450
1451
1452
1453 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
1454 {
1455 ide_hwif_t *hwif = drive->hwif;
1456 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1457 struct dbdma_cmd *table;
1458 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1459 struct scatterlist *sg;
1460 int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
1461 int i = cmd->sg_nents, count = 0;
1462
1463
1464 table = (struct dbdma_cmd *) pmif->dma_table_cpu;
1465
1466
1467 writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
1468 while (readl(&dma->status) & RUN)
1469 udelay(1);
1470
1471
1472 sg = hwif->sg_table;
1473 while (i && sg_dma_len(sg)) {
1474 u32 cur_addr;
1475 u32 cur_len;
1476
1477 cur_addr = sg_dma_address(sg);
1478 cur_len = sg_dma_len(sg);
1479
1480 if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
1481 if (pmif->broken_dma_warn == 0) {
1482 printk(KERN_WARNING "%s: DMA on non aligned address, "
1483 "switching to PIO on Ohare chipset\n", drive->name);
1484 pmif->broken_dma_warn = 1;
1485 }
1486 return 0;
1487 }
1488 while (cur_len) {
1489 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
1490
1491 if (count++ >= MAX_DCMDS) {
1492 printk(KERN_WARNING "%s: DMA table too small\n",
1493 drive->name);
1494 return 0;
1495 }
1496 table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE);
1497 table->req_count = cpu_to_le16(tc);
1498 table->phy_addr = cpu_to_le32(cur_addr);
1499 table->cmd_dep = 0;
1500 table->xfer_status = 0;
1501 table->res_count = 0;
1502 cur_addr += tc;
1503 cur_len -= tc;
1504 ++table;
1505 }
1506 sg = sg_next(sg);
1507 i--;
1508 }
1509
1510
1511 if (count) {
1512 table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST);
1513
1514 memset(table, 0, sizeof(struct dbdma_cmd));
1515 table->command = cpu_to_le16(DBDMA_STOP);
1516 mb();
1517 writel(hwif->dmatable_dma, &dma->cmdptr);
1518 return 1;
1519 }
1520
1521 printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
1522
1523 return 0;
1524 }
1525
1526
1527
1528
1529
1530 static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
1531 {
1532 ide_hwif_t *hwif = drive->hwif;
1533 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1534 u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
1535 u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
1536
1537 if (pmac_ide_build_dmatable(drive, cmd) == 0)
1538 return 1;
1539
1540
1541 if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
1542 writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL),
1543 PMAC_IDE_REG(IDE_TIMING_CONFIG));
1544 (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
1545 }
1546
1547 return 0;
1548 }
1549
1550
1551
1552
1553
1554 static void
1555 pmac_ide_dma_start(ide_drive_t *drive)
1556 {
1557 ide_hwif_t *hwif = drive->hwif;
1558 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1559 volatile struct dbdma_regs __iomem *dma;
1560
1561 dma = pmif->dma_regs;
1562
1563 writel((RUN << 16) | RUN, &dma->control);
1564
1565 (void)readl(&dma->control);
1566 }
1567
1568
1569
1570
1571 static int
1572 pmac_ide_dma_end (ide_drive_t *drive)
1573 {
1574 ide_hwif_t *hwif = drive->hwif;
1575 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1576 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1577 u32 dstat;
1578
1579 dstat = readl(&dma->status);
1580 writel(((RUN|WAKE|DEAD) << 16), &dma->control);
1581
1582
1583
1584
1585
1586 return (dstat & (RUN|DEAD)) != RUN;
1587 }
1588
1589
1590
1591
1592
1593
1594
1595 static int
1596 pmac_ide_dma_test_irq (ide_drive_t *drive)
1597 {
1598 ide_hwif_t *hwif = drive->hwif;
1599 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1600 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1601 unsigned long status, timeout;
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618 status = readl(&dma->status);
1619 if (!(status & ACTIVE))
1620 return 1;
1621
1622
1623
1624
1625
1626
1627
1628 udelay(1);
1629 writel((FLUSH << 16) | FLUSH, &dma->control);
1630 timeout = 0;
1631 for (;;) {
1632 udelay(1);
1633 status = readl(&dma->status);
1634 if ((status & FLUSH) == 0)
1635 break;
1636 if (++timeout > 100) {
1637 printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
1638 hwif->index);
1639 break;
1640 }
1641 }
1642 return 1;
1643 }
1644
1645 static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
1646 {
1647 }
1648
1649 static void
1650 pmac_ide_dma_lost_irq (ide_drive_t *drive)
1651 {
1652 ide_hwif_t *hwif = drive->hwif;
1653 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1654 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1655 unsigned long status = readl(&dma->status);
1656
1657 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
1658 }
1659
1660 static const struct ide_dma_ops pmac_dma_ops = {
1661 .dma_host_set = pmac_ide_dma_host_set,
1662 .dma_setup = pmac_ide_dma_setup,
1663 .dma_start = pmac_ide_dma_start,
1664 .dma_end = pmac_ide_dma_end,
1665 .dma_test_irq = pmac_ide_dma_test_irq,
1666 .dma_lost_irq = pmac_ide_dma_lost_irq,
1667 };
1668
1669
1670
1671
1672
1673 static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
1674 {
1675 pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
1676 struct pci_dev *dev = to_pci_dev(hwif->dev);
1677
1678
1679
1680
1681 if (dev == NULL || pmif->dma_regs == 0)
1682 return -ENODEV;
1683
1684
1685
1686
1687
1688 pmif->dma_table_cpu = dma_alloc_coherent(&dev->dev,
1689 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
1690 &hwif->dmatable_dma, GFP_KERNEL);
1691 if (pmif->dma_table_cpu == NULL) {
1692 printk(KERN_ERR "%s: unable to allocate DMA command list\n",
1693 hwif->name);
1694 return -ENOMEM;
1695 }
1696
1697 hwif->sg_max_nents = MAX_DCMDS;
1698
1699 return 0;
1700 }
1701
1702 module_init(pmac_ide_probe);
1703
1704 MODULE_LICENSE("GPL");