1/* 2 * Copyright (c) 1996-2004 Russell King. 3 * 4 * Please note that this platform does not support 32-bit IDE IO. 5 */ 6 7#include <linux/string.h> 8#include <linux/module.h> 9#include <linux/ioport.h> 10#include <linux/slab.h> 11#include <linux/blkdev.h> 12#include <linux/errno.h> 13#include <linux/ide.h> 14#include <linux/dma-mapping.h> 15#include <linux/device.h> 16#include <linux/init.h> 17#include <linux/scatterlist.h> 18#include <linux/io.h> 19 20#include <asm/dma.h> 21#include <asm/ecard.h> 22 23#define DRV_NAME "icside" 24 25#define ICS_IDENT_OFFSET 0x2280 26 27#define ICS_ARCIN_V5_INTRSTAT 0x0000 28#define ICS_ARCIN_V5_INTROFFSET 0x0004 29#define ICS_ARCIN_V5_IDEOFFSET 0x2800 30#define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80 31#define ICS_ARCIN_V5_IDESTEPPING 6 32 33#define ICS_ARCIN_V6_IDEOFFSET_1 0x2000 34#define ICS_ARCIN_V6_INTROFFSET_1 0x2200 35#define ICS_ARCIN_V6_INTRSTAT_1 0x2290 36#define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380 37#define ICS_ARCIN_V6_IDEOFFSET_2 0x3000 38#define ICS_ARCIN_V6_INTROFFSET_2 0x3200 39#define ICS_ARCIN_V6_INTRSTAT_2 0x3290 40#define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380 41#define ICS_ARCIN_V6_IDESTEPPING 6 42 43struct cardinfo { 44 unsigned int dataoffset; 45 unsigned int ctrloffset; 46 unsigned int stepping; 47}; 48 49static struct cardinfo icside_cardinfo_v5 = { 50 .dataoffset = ICS_ARCIN_V5_IDEOFFSET, 51 .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET, 52 .stepping = ICS_ARCIN_V5_IDESTEPPING, 53}; 54 55static struct cardinfo icside_cardinfo_v6_1 = { 56 .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1, 57 .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1, 58 .stepping = ICS_ARCIN_V6_IDESTEPPING, 59}; 60 61static struct cardinfo icside_cardinfo_v6_2 = { 62 .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2, 63 .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2, 64 .stepping = ICS_ARCIN_V6_IDESTEPPING, 65}; 66 67struct icside_state { 68 unsigned int channel; 69 unsigned int enabled; 70 void __iomem *irq_port; 71 void __iomem *ioc_base; 72 unsigned int sel; 73 unsigned int type; 74 struct ide_host *host; 75}; 76 77#define ICS_TYPE_A3IN 0 78#define ICS_TYPE_A3USER 1 79#define ICS_TYPE_V6 3 80#define ICS_TYPE_V5 15 81#define ICS_TYPE_NOTYPE ((unsigned int)-1) 82 83/* ---------------- Version 5 PCB Support Functions --------------------- */ 84/* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) 85 * Purpose : enable interrupts from card 86 */ 87static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) 88{ 89 struct icside_state *state = ec->irq_data; 90 91 writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); 92} 93 94/* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) 95 * Purpose : disable interrupts from card 96 */ 97static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) 98{ 99 struct icside_state *state = ec->irq_data; 100 101 readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); 102} 103 104static const expansioncard_ops_t icside_ops_arcin_v5 = { 105 .irqenable = icside_irqenable_arcin_v5, 106 .irqdisable = icside_irqdisable_arcin_v5, 107}; 108 109 110/* ---------------- Version 6 PCB Support Functions --------------------- */ 111/* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) 112 * Purpose : enable interrupts from card 113 */ 114static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) 115{ 116 struct icside_state *state = ec->irq_data; 117 void __iomem *base = state->irq_port; 118 119 state->enabled = 1; 120 121 switch (state->channel) { 122 case 0: 123 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); 124 readb(base + ICS_ARCIN_V6_INTROFFSET_2); 125 break; 126 case 1: 127 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); 128 readb(base + ICS_ARCIN_V6_INTROFFSET_1); 129 break; 130 } 131} 132 133/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) 134 * Purpose : disable interrupts from card 135 */ 136static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) 137{ 138 struct icside_state *state = ec->irq_data; 139 140 state->enabled = 0; 141 142 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 143 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 144} 145 146/* Prototype: icside_irqprobe(struct expansion_card *ec) 147 * Purpose : detect an active interrupt from card 148 */ 149static int icside_irqpending_arcin_v6(struct expansion_card *ec) 150{ 151 struct icside_state *state = ec->irq_data; 152 153 return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || 154 readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; 155} 156 157static const expansioncard_ops_t icside_ops_arcin_v6 = { 158 .irqenable = icside_irqenable_arcin_v6, 159 .irqdisable = icside_irqdisable_arcin_v6, 160 .irqpending = icside_irqpending_arcin_v6, 161}; 162 163/* 164 * Handle routing of interrupts. This is called before 165 * we write the command to the drive. 166 */ 167static void icside_maskproc(ide_drive_t *drive, int mask) 168{ 169 ide_hwif_t *hwif = drive->hwif; 170 struct expansion_card *ec = ECARD_DEV(hwif->dev); 171 struct icside_state *state = ecard_get_drvdata(ec); 172 unsigned long flags; 173 174 local_irq_save(flags); 175 176 state->channel = hwif->channel; 177 178 if (state->enabled && !mask) { 179 switch (hwif->channel) { 180 case 0: 181 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 182 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 183 break; 184 case 1: 185 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 186 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 187 break; 188 } 189 } else { 190 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 191 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 192 } 193 194 local_irq_restore(flags); 195} 196 197static const struct ide_port_ops icside_v6_no_dma_port_ops = { 198 .maskproc = icside_maskproc, 199}; 200 201#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 202/* 203 * SG-DMA support. 204 * 205 * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. 206 * There is only one DMA controller per card, which means that only 207 * one drive can be accessed at one time. NOTE! We do not enforce that 208 * here, but we rely on the main IDE driver spotting that both 209 * interfaces use the same IRQ, which should guarantee this. 210 */ 211 212/* 213 * Configure the IOMD to give the appropriate timings for the transfer 214 * mode being requested. We take the advice of the ATA standards, and 215 * calculate the cycle time based on the transfer mode, and the EIDE 216 * MW DMA specs that the drive provides in the IDENTIFY command. 217 * 218 * We have the following IOMD DMA modes to choose from: 219 * 220 * Type Active Recovery Cycle 221 * A 250 (250) 312 (550) 562 (800) 222 * B 187 250 437 223 * C 125 (125) 125 (375) 250 (500) 224 * D 62 125 187 225 * 226 * (figures in brackets are actual measured timings) 227 * 228 * However, we also need to take care of the read/write active and 229 * recovery timings: 230 * 231 * Read Write 232 * Mode Active -- Recovery -- Cycle IOMD type 233 * MW0 215 50 215 480 A 234 * MW1 80 50 50 150 C 235 * MW2 70 25 25 120 C 236 */ 237static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) 238{ 239 unsigned long cycle_time = 0; 240 int use_dma_info = 0; 241 const u8 xfer_mode = drive->dma_mode; 242 243 switch (xfer_mode) { 244 case XFER_MW_DMA_2: 245 cycle_time = 250; 246 use_dma_info = 1; 247 break; 248 249 case XFER_MW_DMA_1: 250 cycle_time = 250; 251 use_dma_info = 1; 252 break; 253 254 case XFER_MW_DMA_0: 255 cycle_time = 480; 256 break; 257 258 case XFER_SW_DMA_2: 259 case XFER_SW_DMA_1: 260 case XFER_SW_DMA_0: 261 cycle_time = 480; 262 break; 263 } 264 265 /* 266 * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should 267 * take care to note the values in the ID... 268 */ 269 if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time) 270 cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME]; 271 272 ide_set_drivedata(drive, (void *)cycle_time); 273 274 printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", 275 drive->name, ide_xfer_verbose(xfer_mode), 276 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); 277} 278 279static const struct ide_port_ops icside_v6_port_ops = { 280 .set_dma_mode = icside_set_dma_mode, 281 .maskproc = icside_maskproc, 282}; 283 284static void icside_dma_host_set(ide_drive_t *drive, int on) 285{ 286} 287 288static int icside_dma_end(ide_drive_t *drive) 289{ 290 ide_hwif_t *hwif = drive->hwif; 291 struct expansion_card *ec = ECARD_DEV(hwif->dev); 292 293 disable_dma(ec->dma); 294 295 return get_dma_residue(ec->dma) != 0; 296} 297 298static void icside_dma_start(ide_drive_t *drive) 299{ 300 ide_hwif_t *hwif = drive->hwif; 301 struct expansion_card *ec = ECARD_DEV(hwif->dev); 302 303 /* We can not enable DMA on both channels simultaneously. */ 304 BUG_ON(dma_channel_active(ec->dma)); 305 enable_dma(ec->dma); 306} 307 308static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) 309{ 310 ide_hwif_t *hwif = drive->hwif; 311 struct expansion_card *ec = ECARD_DEV(hwif->dev); 312 struct icside_state *state = ecard_get_drvdata(ec); 313 unsigned int dma_mode; 314 315 if (cmd->tf_flags & IDE_TFLAG_WRITE) 316 dma_mode = DMA_MODE_WRITE; 317 else 318 dma_mode = DMA_MODE_READ; 319 320 /* 321 * We can not enable DMA on both channels. 322 */ 323 BUG_ON(dma_channel_active(ec->dma)); 324 325 /* 326 * Ensure that we have the right interrupt routed. 327 */ 328 icside_maskproc(drive, 0); 329 330 /* 331 * Route the DMA signals to the correct interface. 332 */ 333 writeb(state->sel | hwif->channel, state->ioc_base); 334 335 /* 336 * Select the correct timing for this drive. 337 */ 338 set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive)); 339 340 /* 341 * Tell the DMA engine about the SG table and 342 * data direction. 343 */ 344 set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents); 345 set_dma_mode(ec->dma, dma_mode); 346 347 return 0; 348} 349 350static int icside_dma_test_irq(ide_drive_t *drive) 351{ 352 ide_hwif_t *hwif = drive->hwif; 353 struct expansion_card *ec = ECARD_DEV(hwif->dev); 354 struct icside_state *state = ecard_get_drvdata(ec); 355 356 return readb(state->irq_port + 357 (hwif->channel ? 358 ICS_ARCIN_V6_INTRSTAT_2 : 359 ICS_ARCIN_V6_INTRSTAT_1)) & 1; 360} 361 362static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d) 363{ 364 hwif->dmatable_cpu = NULL; 365 hwif->dmatable_dma = 0; 366 367 return 0; 368} 369 370static const struct ide_dma_ops icside_v6_dma_ops = { 371 .dma_host_set = icside_dma_host_set, 372 .dma_setup = icside_dma_setup, 373 .dma_start = icside_dma_start, 374 .dma_end = icside_dma_end, 375 .dma_test_irq = icside_dma_test_irq, 376 .dma_lost_irq = ide_dma_lost_irq, 377}; 378#endif 379 380static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) 381{ 382 return -EOPNOTSUPP; 383} 384 385static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, 386 struct cardinfo *info, struct expansion_card *ec) 387{ 388 unsigned long port = (unsigned long)base + info->dataoffset; 389 390 hw->io_ports.data_addr = port; 391 hw->io_ports.error_addr = port + (1 << info->stepping); 392 hw->io_ports.nsect_addr = port + (2 << info->stepping); 393 hw->io_ports.lbal_addr = port + (3 << info->stepping); 394 hw->io_ports.lbam_addr = port + (4 << info->stepping); 395 hw->io_ports.lbah_addr = port + (5 << info->stepping); 396 hw->io_ports.device_addr = port + (6 << info->stepping); 397 hw->io_ports.status_addr = port + (7 << info->stepping); 398 hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset; 399 400 hw->irq = ec->irq; 401 hw->dev = &ec->dev; 402} 403 404static const struct ide_port_info icside_v5_port_info = { 405 .host_flags = IDE_HFLAG_NO_DMA, 406 .chipset = ide_acorn, 407}; 408 409static int icside_register_v5(struct icside_state *state, 410 struct expansion_card *ec) 411{ 412 void __iomem *base; 413 struct ide_host *host; 414 struct ide_hw hw, *hws[] = { &hw }; 415 int ret; 416 417 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 418 if (!base) 419 return -ENOMEM; 420 421 state->irq_port = base; 422 423 ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; 424 ec->irqmask = 1; 425 426 ecard_setirq(ec, &icside_ops_arcin_v5, state); 427 428 /* 429 * Be on the safe side - disable interrupts 430 */ 431 icside_irqdisable_arcin_v5(ec, 0); 432 433 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); 434 435 host = ide_host_alloc(&icside_v5_port_info, hws, 1); 436 if (host == NULL) 437 return -ENODEV; 438 439 state->host = host; 440 441 ecard_set_drvdata(ec, state); 442 443 ret = ide_host_register(host, &icside_v5_port_info, hws); 444 if (ret) 445 goto err_free; 446 447 return 0; 448err_free: 449 ide_host_free(host); 450 ecard_set_drvdata(ec, NULL); 451 return ret; 452} 453 454static const struct ide_port_info icside_v6_port_info __initconst = { 455 .init_dma = icside_dma_off_init, 456 .port_ops = &icside_v6_no_dma_port_ops, 457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 458 .mwdma_mask = ATA_MWDMA2, 459 .swdma_mask = ATA_SWDMA2, 460 .chipset = ide_acorn, 461}; 462 463static int icside_register_v6(struct icside_state *state, 464 struct expansion_card *ec) 465{ 466 void __iomem *ioc_base, *easi_base; 467 struct ide_host *host; 468 unsigned int sel = 0; 469 int ret; 470 struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] }; 471 struct ide_port_info d = icside_v6_port_info; 472 473 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 474 if (!ioc_base) { 475 ret = -ENOMEM; 476 goto out; 477 } 478 479 easi_base = ioc_base; 480 481 if (ecard_resource_flags(ec, ECARD_RES_EASI)) { 482 easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0); 483 if (!easi_base) { 484 ret = -ENOMEM; 485 goto out; 486 } 487 488 /* 489 * Enable access to the EASI region. 490 */ 491 sel = 1 << 5; 492 } 493 494 writeb(sel, ioc_base); 495 496 ecard_setirq(ec, &icside_ops_arcin_v6, state); 497 498 state->irq_port = easi_base; 499 state->ioc_base = ioc_base; 500 state->sel = sel; 501 502 /* 503 * Be on the safe side - disable interrupts 504 */ 505 icside_irqdisable_arcin_v6(ec, 0); 506 507 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); 508 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); 509 510 host = ide_host_alloc(&d, hws, 2); 511 if (host == NULL) 512 return -ENODEV; 513 514 state->host = host; 515 516 ecard_set_drvdata(ec, state); 517 518#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 519 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 520 d.init_dma = icside_dma_init; 521 d.port_ops = &icside_v6_port_ops; 522 d.dma_ops = &icside_v6_dma_ops; 523 } 524#endif 525 526 ret = ide_host_register(host, &d, hws); 527 if (ret) 528 goto err_free; 529 530 return 0; 531err_free: 532 ide_host_free(host); 533 if (d.dma_ops) 534 free_dma(ec->dma); 535 ecard_set_drvdata(ec, NULL); 536out: 537 return ret; 538} 539 540static int icside_probe(struct expansion_card *ec, const struct ecard_id *id) 541{ 542 struct icside_state *state; 543 void __iomem *idmem; 544 int ret; 545 546 ret = ecard_request_resources(ec); 547 if (ret) 548 goto out; 549 550 state = kzalloc(sizeof(struct icside_state), GFP_KERNEL); 551 if (!state) { 552 ret = -ENOMEM; 553 goto release; 554 } 555 556 state->type = ICS_TYPE_NOTYPE; 557 558 idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 559 if (idmem) { 560 unsigned int type; 561 562 type = readb(idmem + ICS_IDENT_OFFSET) & 1; 563 type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; 564 type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; 565 type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; 566 ecardm_iounmap(ec, idmem); 567 568 state->type = type; 569 } 570 571 switch (state->type) { 572 case ICS_TYPE_A3IN: 573 dev_warn(&ec->dev, "A3IN unsupported\n"); 574 ret = -ENODEV; 575 break; 576 577 case ICS_TYPE_A3USER: 578 dev_warn(&ec->dev, "A3USER unsupported\n"); 579 ret = -ENODEV; 580 break; 581 582 case ICS_TYPE_V5: 583 ret = icside_register_v5(state, ec); 584 break; 585 586 case ICS_TYPE_V6: 587 ret = icside_register_v6(state, ec); 588 break; 589 590 default: 591 dev_warn(&ec->dev, "unknown interface type\n"); 592 ret = -ENODEV; 593 break; 594 } 595 596 if (ret == 0) 597 goto out; 598 599 kfree(state); 600 release: 601 ecard_release_resources(ec); 602 out: 603 return ret; 604} 605 606static void icside_remove(struct expansion_card *ec) 607{ 608 struct icside_state *state = ecard_get_drvdata(ec); 609 610 switch (state->type) { 611 case ICS_TYPE_V5: 612 /* FIXME: tell IDE to stop using the interface */ 613 614 /* Disable interrupts */ 615 icside_irqdisable_arcin_v5(ec, 0); 616 break; 617 618 case ICS_TYPE_V6: 619 /* FIXME: tell IDE to stop using the interface */ 620 if (ec->dma != NO_DMA) 621 free_dma(ec->dma); 622 623 /* Disable interrupts */ 624 icside_irqdisable_arcin_v6(ec, 0); 625 626 /* Reset the ROM pointer/EASI selection */ 627 writeb(0, state->ioc_base); 628 break; 629 } 630 631 ecard_set_drvdata(ec, NULL); 632 633 kfree(state); 634 ecard_release_resources(ec); 635} 636 637static void icside_shutdown(struct expansion_card *ec) 638{ 639 struct icside_state *state = ecard_get_drvdata(ec); 640 unsigned long flags; 641 642 /* 643 * Disable interrupts from this card. We need to do 644 * this before disabling EASI since we may be accessing 645 * this register via that region. 646 */ 647 local_irq_save(flags); 648 ec->ops->irqdisable(ec, 0); 649 local_irq_restore(flags); 650 651 /* 652 * Reset the ROM pointer so that we can read the ROM 653 * after a soft reboot. This also disables access to 654 * the IDE taskfile via the EASI region. 655 */ 656 if (state->ioc_base) 657 writeb(0, state->ioc_base); 658} 659 660static const struct ecard_id icside_ids[] = { 661 { MANU_ICS, PROD_ICS_IDE }, 662 { MANU_ICS2, PROD_ICS2_IDE }, 663 { 0xffff, 0xffff } 664}; 665 666static struct ecard_driver icside_driver = { 667 .probe = icside_probe, 668 .remove = icside_remove, 669 .shutdown = icside_shutdown, 670 .id_table = icside_ids, 671 .drv = { 672 .name = "icside", 673 }, 674}; 675 676static int __init icside_init(void) 677{ 678 return ecard_register_driver(&icside_driver); 679} 680 681static void __exit icside_exit(void) 682{ 683 ecard_remove_driver(&icside_driver); 684} 685 686MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 687MODULE_LICENSE("GPL"); 688MODULE_DESCRIPTION("ICS IDE driver"); 689 690module_init(icside_init); 691module_exit(icside_exit); 692