This source file includes following definitions.
- pata_icside_irqenable_arcin_v5
- pata_icside_irqdisable_arcin_v5
- pata_icside_irqenable_arcin_v6
- pata_icside_irqdisable_arcin_v6
- pata_icside_irqpending_arcin_v6
- pata_icside_set_dmamode
- pata_icside_bmdma_setup
- pata_icside_bmdma_start
- pata_icside_bmdma_stop
- pata_icside_bmdma_status
- icside_dma_init
- pata_icside_postreset
- pata_icside_setup_ioaddr
- pata_icside_register_v5
- pata_icside_register_v6
- pata_icside_add_ports
- pata_icside_probe
- pata_icside_shutdown
- pata_icside_remove
- pata_icside_init
- pata_icside_exit
1
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/init.h>
5 #include <linux/blkdev.h>
6 #include <linux/gfp.h>
7 #include <scsi/scsi_host.h>
8 #include <linux/ata.h>
9 #include <linux/libata.h>
10
11 #include <asm/dma.h>
12 #include <asm/ecard.h>
13
14 #define DRV_NAME "pata_icside"
15
16 #define ICS_IDENT_OFFSET 0x2280
17
18 #define ICS_ARCIN_V5_INTRSTAT 0x0000
19 #define ICS_ARCIN_V5_INTROFFSET 0x0004
20
21 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200
22 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290
23 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200
24 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290
25
26 struct portinfo {
27 unsigned int dataoffset;
28 unsigned int ctrloffset;
29 unsigned int stepping;
30 };
31
32 static const struct portinfo pata_icside_portinfo_v5 = {
33 .dataoffset = 0x2800,
34 .ctrloffset = 0x2b80,
35 .stepping = 6,
36 };
37
38 static const struct portinfo pata_icside_portinfo_v6_1 = {
39 .dataoffset = 0x2000,
40 .ctrloffset = 0x2380,
41 .stepping = 6,
42 };
43
44 static const struct portinfo pata_icside_portinfo_v6_2 = {
45 .dataoffset = 0x3000,
46 .ctrloffset = 0x3380,
47 .stepping = 6,
48 };
49
50 struct pata_icside_state {
51 void __iomem *irq_port;
52 void __iomem *ioc_base;
53 unsigned int type;
54 unsigned int dma;
55 struct {
56 u8 port_sel;
57 u8 disabled;
58 unsigned int speed[ATA_MAX_DEVICES];
59 } port[2];
60 };
61
62 struct pata_icside_info {
63 struct pata_icside_state *state;
64 struct expansion_card *ec;
65 void __iomem *base;
66 void __iomem *irqaddr;
67 unsigned int irqmask;
68 const expansioncard_ops_t *irqops;
69 unsigned int mwdma_mask;
70 unsigned int nr_ports;
71 const struct portinfo *port[2];
72 unsigned long raw_base;
73 unsigned long raw_ioc_base;
74 };
75
76 #define ICS_TYPE_A3IN 0
77 #define ICS_TYPE_A3USER 1
78 #define ICS_TYPE_V6 3
79 #define ICS_TYPE_V5 15
80 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
81
82
83
84
85
86 static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
87 {
88 struct pata_icside_state *state = ec->irq_data;
89
90 writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
91 }
92
93
94
95
96 static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
97 {
98 struct pata_icside_state *state = ec->irq_data;
99
100 readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
101 }
102
103 static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
104 .irqenable = pata_icside_irqenable_arcin_v5,
105 .irqdisable = pata_icside_irqdisable_arcin_v5,
106 };
107
108
109
110
111
112
113 static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
114 {
115 struct pata_icside_state *state = ec->irq_data;
116 void __iomem *base = state->irq_port;
117
118 if (!state->port[0].disabled)
119 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
120 if (!state->port[1].disabled)
121 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
122 }
123
124
125
126
127 static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
128 {
129 struct pata_icside_state *state = ec->irq_data;
130
131 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
132 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
133 }
134
135
136
137
138 static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
139 {
140 struct pata_icside_state *state = ec->irq_data;
141
142 return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
143 readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
144 }
145
146 static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
147 .irqenable = pata_icside_irqenable_arcin_v6,
148 .irqdisable = pata_icside_irqdisable_arcin_v6,
149 .irqpending = pata_icside_irqpending_arcin_v6,
150 };
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188 static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
189 {
190 struct pata_icside_state *state = ap->host->private_data;
191 struct ata_timing t;
192 unsigned int cycle;
193 char iomd_type;
194
195
196
197
198 if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
199 return;
200
201
202
203
204
205 if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
206 iomd_type = 'D', cycle = 187;
207 else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
208 iomd_type = 'C', cycle = 250;
209 else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
210 iomd_type = 'B', cycle = 437;
211 else
212 iomd_type = 'A', cycle = 562;
213
214 ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
215 t.active, t.recover, t.cycle, iomd_type);
216
217 state->port[ap->port_no].speed[adev->devno] = cycle;
218 }
219
220 static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
221 {
222 struct ata_port *ap = qc->ap;
223 struct pata_icside_state *state = ap->host->private_data;
224 unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
225
226
227
228
229
230 BUG_ON(dma_channel_active(state->dma));
231
232
233
234
235 writeb(state->port[ap->port_no].port_sel, state->ioc_base);
236
237 set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
238 set_dma_sg(state->dma, qc->sg, qc->n_elem);
239 set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
240
241
242 ap->ops->sff_exec_command(ap, &qc->tf);
243 }
244
245 static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
246 {
247 struct ata_port *ap = qc->ap;
248 struct pata_icside_state *state = ap->host->private_data;
249
250 BUG_ON(dma_channel_active(state->dma));
251 enable_dma(state->dma);
252 }
253
254 static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
255 {
256 struct ata_port *ap = qc->ap;
257 struct pata_icside_state *state = ap->host->private_data;
258
259 disable_dma(state->dma);
260
261
262 ata_sff_dma_pause(ap);
263 }
264
265 static u8 pata_icside_bmdma_status(struct ata_port *ap)
266 {
267 struct pata_icside_state *state = ap->host->private_data;
268 void __iomem *irq_port;
269
270 irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
271 ICS_ARCIN_V6_INTRSTAT_1);
272
273 return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
274 }
275
276 static int icside_dma_init(struct pata_icside_info *info)
277 {
278 struct pata_icside_state *state = info->state;
279 struct expansion_card *ec = info->ec;
280 int i;
281
282 for (i = 0; i < ATA_MAX_DEVICES; i++) {
283 state->port[0].speed[i] = 480;
284 state->port[1].speed[i] = 480;
285 }
286
287 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
288 state->dma = ec->dma;
289 info->mwdma_mask = ATA_MWDMA2;
290 }
291
292 return 0;
293 }
294
295
296 static struct scsi_host_template pata_icside_sht = {
297 ATA_BASE_SHT(DRV_NAME),
298 .sg_tablesize = SG_MAX_SEGMENTS,
299 .dma_boundary = IOMD_DMA_BOUNDARY,
300 };
301
302 static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
303 {
304 struct ata_port *ap = link->ap;
305 struct pata_icside_state *state = ap->host->private_data;
306
307 if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
308 return ata_sff_postreset(link, classes);
309
310 state->port[ap->port_no].disabled = 1;
311
312 if (state->type == ICS_TYPE_V6) {
313
314
315
316
317
318 void __iomem *irq_port = state->irq_port +
319 (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
320 readb(irq_port);
321 }
322 }
323
324 static struct ata_port_operations pata_icside_port_ops = {
325 .inherits = &ata_bmdma_port_ops,
326
327 .qc_prep = ata_noop_qc_prep,
328 .sff_data_xfer = ata_sff_data_xfer32,
329 .bmdma_setup = pata_icside_bmdma_setup,
330 .bmdma_start = pata_icside_bmdma_start,
331 .bmdma_stop = pata_icside_bmdma_stop,
332 .bmdma_status = pata_icside_bmdma_status,
333
334 .cable_detect = ata_cable_40wire,
335 .set_dmamode = pata_icside_set_dmamode,
336 .postreset = pata_icside_postreset,
337
338 .port_start = ATA_OP_NULL,
339 };
340
341 static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
342 struct pata_icside_info *info,
343 const struct portinfo *port)
344 {
345 struct ata_ioports *ioaddr = &ap->ioaddr;
346 void __iomem *cmd = base + port->dataoffset;
347
348 ioaddr->cmd_addr = cmd;
349 ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping);
350 ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping);
351 ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping);
352 ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping);
353 ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping);
354 ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping);
355 ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping);
356 ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping);
357 ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping);
358 ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping);
359
360 ioaddr->ctl_addr = base + port->ctrloffset;
361 ioaddr->altstatus_addr = ioaddr->ctl_addr;
362
363 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
364 info->raw_base + port->dataoffset,
365 info->raw_base + port->ctrloffset);
366
367 if (info->raw_ioc_base)
368 ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
369 }
370
371 static int pata_icside_register_v5(struct pata_icside_info *info)
372 {
373 struct pata_icside_state *state = info->state;
374 void __iomem *base;
375
376 base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
377 if (!base)
378 return -ENOMEM;
379
380 state->irq_port = base;
381
382 info->base = base;
383 info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
384 info->irqmask = 1;
385 info->irqops = &pata_icside_ops_arcin_v5;
386 info->nr_ports = 1;
387 info->port[0] = &pata_icside_portinfo_v5;
388
389 info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
390
391 return 0;
392 }
393
394 static int pata_icside_register_v6(struct pata_icside_info *info)
395 {
396 struct pata_icside_state *state = info->state;
397 struct expansion_card *ec = info->ec;
398 void __iomem *ioc_base, *easi_base;
399 unsigned int sel = 0;
400
401 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
402 if (!ioc_base)
403 return -ENOMEM;
404
405 easi_base = ioc_base;
406
407 if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
408 easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
409 if (!easi_base)
410 return -ENOMEM;
411
412
413
414
415 sel = 1 << 5;
416 }
417
418 writeb(sel, ioc_base);
419
420 state->irq_port = easi_base;
421 state->ioc_base = ioc_base;
422 state->port[0].port_sel = sel;
423 state->port[1].port_sel = sel | 1;
424
425 info->base = easi_base;
426 info->irqops = &pata_icside_ops_arcin_v6;
427 info->nr_ports = 2;
428 info->port[0] = &pata_icside_portinfo_v6_1;
429 info->port[1] = &pata_icside_portinfo_v6_2;
430
431 info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
432 info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
433
434 return icside_dma_init(info);
435 }
436
437 static int pata_icside_add_ports(struct pata_icside_info *info)
438 {
439 struct expansion_card *ec = info->ec;
440 struct ata_host *host;
441 int i;
442
443 if (info->irqaddr) {
444 ec->irqaddr = info->irqaddr;
445 ec->irqmask = info->irqmask;
446 }
447 if (info->irqops)
448 ecard_setirq(ec, info->irqops, info->state);
449
450
451
452
453 ec->ops->irqdisable(ec, ec->irq);
454
455 host = ata_host_alloc(&ec->dev, info->nr_ports);
456 if (!host)
457 return -ENOMEM;
458
459 host->private_data = info->state;
460 host->flags = ATA_HOST_SIMPLEX;
461
462 for (i = 0; i < info->nr_ports; i++) {
463 struct ata_port *ap = host->ports[i];
464
465 ap->pio_mask = ATA_PIO4;
466 ap->mwdma_mask = info->mwdma_mask;
467 ap->flags |= ATA_FLAG_SLAVE_POSS;
468 ap->ops = &pata_icside_port_ops;
469
470 pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
471 }
472
473 return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
474 &pata_icside_sht);
475 }
476
477 static int pata_icside_probe(struct expansion_card *ec,
478 const struct ecard_id *id)
479 {
480 struct pata_icside_state *state;
481 struct pata_icside_info info;
482 void __iomem *idmem;
483 int ret;
484
485 ret = ecard_request_resources(ec);
486 if (ret)
487 goto out;
488
489 state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
490 if (!state) {
491 ret = -ENOMEM;
492 goto release;
493 }
494
495 state->type = ICS_TYPE_NOTYPE;
496 state->dma = NO_DMA;
497
498 idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
499 if (idmem) {
500 unsigned int type;
501
502 type = readb(idmem + ICS_IDENT_OFFSET) & 1;
503 type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
504 type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
505 type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
506 ecardm_iounmap(ec, idmem);
507
508 state->type = type;
509 }
510
511 memset(&info, 0, sizeof(info));
512 info.state = state;
513 info.ec = ec;
514
515 switch (state->type) {
516 case ICS_TYPE_A3IN:
517 dev_warn(&ec->dev, "A3IN unsupported\n");
518 ret = -ENODEV;
519 break;
520
521 case ICS_TYPE_A3USER:
522 dev_warn(&ec->dev, "A3USER unsupported\n");
523 ret = -ENODEV;
524 break;
525
526 case ICS_TYPE_V5:
527 ret = pata_icside_register_v5(&info);
528 break;
529
530 case ICS_TYPE_V6:
531 ret = pata_icside_register_v6(&info);
532 break;
533
534 default:
535 dev_warn(&ec->dev, "unknown interface type\n");
536 ret = -ENODEV;
537 break;
538 }
539
540 if (ret == 0)
541 ret = pata_icside_add_ports(&info);
542
543 if (ret == 0)
544 goto out;
545
546 release:
547 ecard_release_resources(ec);
548 out:
549 return ret;
550 }
551
552 static void pata_icside_shutdown(struct expansion_card *ec)
553 {
554 struct ata_host *host = ecard_get_drvdata(ec);
555 unsigned long flags;
556
557
558
559
560
561
562 local_irq_save(flags);
563 ec->ops->irqdisable(ec, ec->irq);
564 local_irq_restore(flags);
565
566
567
568
569
570
571 if (host) {
572 struct pata_icside_state *state = host->private_data;
573 if (state->ioc_base)
574 writeb(0, state->ioc_base);
575 }
576 }
577
578 static void pata_icside_remove(struct expansion_card *ec)
579 {
580 struct ata_host *host = ecard_get_drvdata(ec);
581 struct pata_icside_state *state = host->private_data;
582
583 ata_host_detach(host);
584
585 pata_icside_shutdown(ec);
586
587
588
589
590
591 if (state->dma != NO_DMA)
592 free_dma(state->dma);
593
594 ecard_release_resources(ec);
595 }
596
597 static const struct ecard_id pata_icside_ids[] = {
598 { MANU_ICS, PROD_ICS_IDE },
599 { MANU_ICS2, PROD_ICS2_IDE },
600 { 0xffff, 0xffff }
601 };
602
603 static struct ecard_driver pata_icside_driver = {
604 .probe = pata_icside_probe,
605 .remove = pata_icside_remove,
606 .shutdown = pata_icside_shutdown,
607 .id_table = pata_icside_ids,
608 .drv = {
609 .name = DRV_NAME,
610 },
611 };
612
613 static int __init pata_icside_init(void)
614 {
615 return ecard_register_driver(&pata_icside_driver);
616 }
617
618 static void __exit pata_icside_exit(void)
619 {
620 ecard_remove_driver(&pata_icside_driver);
621 }
622
623 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
624 MODULE_LICENSE("GPL");
625 MODULE_DESCRIPTION("ICS PATA driver");
626
627 module_init(pata_icside_init);
628 module_exit(pata_icside_exit);