1/* 2 * Copyright © 2009 - Maxim Levitsky 3 * driver for Ricoh xD readers 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10#include <linux/kernel.h> 11#include <linux/module.h> 12#include <linux/jiffies.h> 13#include <linux/workqueue.h> 14#include <linux/interrupt.h> 15#include <linux/pci.h> 16#include <linux/pci_ids.h> 17#include <linux/delay.h> 18#include <linux/slab.h> 19#include <asm/byteorder.h> 20#include <linux/sched.h> 21#include "sm_common.h" 22#include "r852.h" 23 24 25static bool r852_enable_dma = 1; 26module_param(r852_enable_dma, bool, S_IRUGO); 27MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); 28 29static int debug; 30module_param(debug, int, S_IRUGO | S_IWUSR); 31MODULE_PARM_DESC(debug, "Debug level (0-2)"); 32 33/* read register */ 34static inline uint8_t r852_read_reg(struct r852_device *dev, int address) 35{ 36 uint8_t reg = readb(dev->mmio + address); 37 return reg; 38} 39 40/* write register */ 41static inline void r852_write_reg(struct r852_device *dev, 42 int address, uint8_t value) 43{ 44 writeb(value, dev->mmio + address); 45 mmiowb(); 46} 47 48 49/* read dword sized register */ 50static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) 51{ 52 uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); 53 return reg; 54} 55 56/* write dword sized register */ 57static inline void r852_write_reg_dword(struct r852_device *dev, 58 int address, uint32_t value) 59{ 60 writel(cpu_to_le32(value), dev->mmio + address); 61 mmiowb(); 62} 63 64/* returns pointer to our private structure */ 65static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) 66{ 67 struct nand_chip *chip = mtd->priv; 68 return chip->priv; 69} 70 71 72/* check if controller supports dma */ 73static void r852_dma_test(struct r852_device *dev) 74{ 75 dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & 76 (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); 77 78 if (!dev->dma_usable) 79 message("Non dma capable device detected, dma disabled"); 80 81 if (!r852_enable_dma) { 82 message("disabling dma on user request"); 83 dev->dma_usable = 0; 84 } 85} 86 87/* 88 * Enable dma. Enables ether first or second stage of the DMA, 89 * Expects dev->dma_dir and dev->dma_state be set 90 */ 91static void r852_dma_enable(struct r852_device *dev) 92{ 93 uint8_t dma_reg, dma_irq_reg; 94 95 /* Set up dma settings */ 96 dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); 97 dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); 98 99 if (dev->dma_dir) 100 dma_reg |= R852_DMA_READ; 101 102 if (dev->dma_state == DMA_INTERNAL) { 103 dma_reg |= R852_DMA_INTERNAL; 104 /* Precaution to make sure HW doesn't write */ 105 /* to random kernel memory */ 106 r852_write_reg_dword(dev, R852_DMA_ADDR, 107 cpu_to_le32(dev->phys_bounce_buffer)); 108 } else { 109 dma_reg |= R852_DMA_MEMORY; 110 r852_write_reg_dword(dev, R852_DMA_ADDR, 111 cpu_to_le32(dev->phys_dma_addr)); 112 } 113 114 /* Precaution: make sure write reached the device */ 115 r852_read_reg_dword(dev, R852_DMA_ADDR); 116 117 r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); 118 119 /* Set dma irq */ 120 dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 121 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 122 dma_irq_reg | 123 R852_DMA_IRQ_INTERNAL | 124 R852_DMA_IRQ_ERROR | 125 R852_DMA_IRQ_MEMORY); 126} 127 128/* 129 * Disable dma, called from the interrupt handler, which specifies 130 * success of the operation via 'error' argument 131 */ 132static void r852_dma_done(struct r852_device *dev, int error) 133{ 134 WARN_ON(dev->dma_stage == 0); 135 136 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, 137 r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); 138 139 r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); 140 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); 141 142 /* Precaution to make sure HW doesn't write to random kernel memory */ 143 r852_write_reg_dword(dev, R852_DMA_ADDR, 144 cpu_to_le32(dev->phys_bounce_buffer)); 145 r852_read_reg_dword(dev, R852_DMA_ADDR); 146 147 dev->dma_error = error; 148 dev->dma_stage = 0; 149 150 if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) 151 pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, 152 dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 153} 154 155/* 156 * Wait, till dma is done, which includes both phases of it 157 */ 158static int r852_dma_wait(struct r852_device *dev) 159{ 160 long timeout = wait_for_completion_timeout(&dev->dma_done, 161 msecs_to_jiffies(1000)); 162 if (!timeout) { 163 dbg("timeout waiting for DMA interrupt"); 164 return -ETIMEDOUT; 165 } 166 167 return 0; 168} 169 170/* 171 * Read/Write one page using dma. Only pages can be read (512 bytes) 172*/ 173static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) 174{ 175 int bounce = 0; 176 unsigned long flags; 177 int error; 178 179 dev->dma_error = 0; 180 181 /* Set dma direction */ 182 dev->dma_dir = do_read; 183 dev->dma_stage = 1; 184 reinit_completion(&dev->dma_done); 185 186 dbg_verbose("doing dma %s ", do_read ? "read" : "write"); 187 188 /* Set initial dma state: for reading first fill on board buffer, 189 from device, for writes first fill the buffer from memory*/ 190 dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; 191 192 /* if incoming buffer is not page aligned, we should do bounce */ 193 if ((unsigned long)buf & (R852_DMA_LEN-1)) 194 bounce = 1; 195 196 if (!bounce) { 197 dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, 198 R852_DMA_LEN, 199 (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); 200 201 if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) 202 bounce = 1; 203 } 204 205 if (bounce) { 206 dbg_verbose("dma: using bounce buffer"); 207 dev->phys_dma_addr = dev->phys_bounce_buffer; 208 if (!do_read) 209 memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); 210 } 211 212 /* Enable DMA */ 213 spin_lock_irqsave(&dev->irqlock, flags); 214 r852_dma_enable(dev); 215 spin_unlock_irqrestore(&dev->irqlock, flags); 216 217 /* Wait till complete */ 218 error = r852_dma_wait(dev); 219 220 if (error) { 221 r852_dma_done(dev, error); 222 return; 223 } 224 225 if (do_read && bounce) 226 memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); 227} 228 229/* 230 * Program data lines of the nand chip to send data to it 231 */ 232static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 233{ 234 struct r852_device *dev = r852_get_dev(mtd); 235 uint32_t reg; 236 237 /* Don't allow any access to hardware if we suspect card removal */ 238 if (dev->card_unstable) 239 return; 240 241 /* Special case for whole sector read */ 242 if (len == R852_DMA_LEN && dev->dma_usable) { 243 r852_do_dma(dev, (uint8_t *)buf, 0); 244 return; 245 } 246 247 /* write DWORD chinks - faster */ 248 while (len >= 4) { 249 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; 250 r852_write_reg_dword(dev, R852_DATALINE, reg); 251 buf += 4; 252 len -= 4; 253 254 } 255 256 /* write rest */ 257 while (len > 0) { 258 r852_write_reg(dev, R852_DATALINE, *buf++); 259 len--; 260 } 261} 262 263/* 264 * Read data lines of the nand chip to retrieve data 265 */ 266static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 267{ 268 struct r852_device *dev = r852_get_dev(mtd); 269 uint32_t reg; 270 271 if (dev->card_unstable) { 272 /* since we can't signal error here, at least, return 273 predictable buffer */ 274 memset(buf, 0, len); 275 return; 276 } 277 278 /* special case for whole sector read */ 279 if (len == R852_DMA_LEN && dev->dma_usable) { 280 r852_do_dma(dev, buf, 1); 281 return; 282 } 283 284 /* read in dword sized chunks */ 285 while (len >= 4) { 286 287 reg = r852_read_reg_dword(dev, R852_DATALINE); 288 *buf++ = reg & 0xFF; 289 *buf++ = (reg >> 8) & 0xFF; 290 *buf++ = (reg >> 16) & 0xFF; 291 *buf++ = (reg >> 24) & 0xFF; 292 len -= 4; 293 } 294 295 /* read the reset by bytes */ 296 while (len--) 297 *buf++ = r852_read_reg(dev, R852_DATALINE); 298} 299 300/* 301 * Read one byte from nand chip 302 */ 303static uint8_t r852_read_byte(struct mtd_info *mtd) 304{ 305 struct r852_device *dev = r852_get_dev(mtd); 306 307 /* Same problem as in r852_read_buf.... */ 308 if (dev->card_unstable) 309 return 0; 310 311 return r852_read_reg(dev, R852_DATALINE); 312} 313 314/* 315 * Control several chip lines & send commands 316 */ 317static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) 318{ 319 struct r852_device *dev = r852_get_dev(mtd); 320 321 if (dev->card_unstable) 322 return; 323 324 if (ctrl & NAND_CTRL_CHANGE) { 325 326 dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | 327 R852_CTL_ON | R852_CTL_CARDENABLE); 328 329 if (ctrl & NAND_ALE) 330 dev->ctlreg |= R852_CTL_DATA; 331 332 if (ctrl & NAND_CLE) 333 dev->ctlreg |= R852_CTL_COMMAND; 334 335 if (ctrl & NAND_NCE) 336 dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); 337 else 338 dev->ctlreg &= ~R852_CTL_WRITE; 339 340 /* when write is stareted, enable write access */ 341 if (dat == NAND_CMD_ERASE1) 342 dev->ctlreg |= R852_CTL_WRITE; 343 344 r852_write_reg(dev, R852_CTL, dev->ctlreg); 345 } 346 347 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need 348 to set write mode */ 349 if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { 350 dev->ctlreg |= R852_CTL_WRITE; 351 r852_write_reg(dev, R852_CTL, dev->ctlreg); 352 } 353 354 if (dat != NAND_CMD_NONE) 355 r852_write_reg(dev, R852_DATALINE, dat); 356} 357 358/* 359 * Wait till card is ready. 360 * based on nand_wait, but returns errors on DMA error 361 */ 362static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) 363{ 364 struct r852_device *dev = chip->priv; 365 366 unsigned long timeout; 367 int status; 368 369 timeout = jiffies + (chip->state == FL_ERASING ? 370 msecs_to_jiffies(400) : msecs_to_jiffies(20)); 371 372 while (time_before(jiffies, timeout)) 373 if (chip->dev_ready(mtd)) 374 break; 375 376 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 377 status = (int)chip->read_byte(mtd); 378 379 /* Unfortunelly, no way to send detailed error status... */ 380 if (dev->dma_error) { 381 status |= NAND_STATUS_FAIL; 382 dev->dma_error = 0; 383 } 384 return status; 385} 386 387/* 388 * Check if card is ready 389 */ 390 391static int r852_ready(struct mtd_info *mtd) 392{ 393 struct r852_device *dev = r852_get_dev(mtd); 394 return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); 395} 396 397 398/* 399 * Set ECC engine mode 400*/ 401 402static void r852_ecc_hwctl(struct mtd_info *mtd, int mode) 403{ 404 struct r852_device *dev = r852_get_dev(mtd); 405 406 if (dev->card_unstable) 407 return; 408 409 switch (mode) { 410 case NAND_ECC_READ: 411 case NAND_ECC_WRITE: 412 /* enable ecc generation/check*/ 413 dev->ctlreg |= R852_CTL_ECC_ENABLE; 414 415 /* flush ecc buffer */ 416 r852_write_reg(dev, R852_CTL, 417 dev->ctlreg | R852_CTL_ECC_ACCESS); 418 419 r852_read_reg_dword(dev, R852_DATALINE); 420 r852_write_reg(dev, R852_CTL, dev->ctlreg); 421 return; 422 423 case NAND_ECC_READSYN: 424 /* disable ecc generation */ 425 dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 426 r852_write_reg(dev, R852_CTL, dev->ctlreg); 427 } 428} 429 430/* 431 * Calculate ECC, only used for writes 432 */ 433 434static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, 435 uint8_t *ecc_code) 436{ 437 struct r852_device *dev = r852_get_dev(mtd); 438 struct sm_oob *oob = (struct sm_oob *)ecc_code; 439 uint32_t ecc1, ecc2; 440 441 if (dev->card_unstable) 442 return 0; 443 444 dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 445 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 446 447 ecc1 = r852_read_reg_dword(dev, R852_DATALINE); 448 ecc2 = r852_read_reg_dword(dev, R852_DATALINE); 449 450 oob->ecc1[0] = (ecc1) & 0xFF; 451 oob->ecc1[1] = (ecc1 >> 8) & 0xFF; 452 oob->ecc1[2] = (ecc1 >> 16) & 0xFF; 453 454 oob->ecc2[0] = (ecc2) & 0xFF; 455 oob->ecc2[1] = (ecc2 >> 8) & 0xFF; 456 oob->ecc2[2] = (ecc2 >> 16) & 0xFF; 457 458 r852_write_reg(dev, R852_CTL, dev->ctlreg); 459 return 0; 460} 461 462/* 463 * Correct the data using ECC, hw did almost everything for us 464 */ 465 466static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, 467 uint8_t *read_ecc, uint8_t *calc_ecc) 468{ 469 uint16_t ecc_reg; 470 uint8_t ecc_status, err_byte; 471 int i, error = 0; 472 473 struct r852_device *dev = r852_get_dev(mtd); 474 475 if (dev->card_unstable) 476 return 0; 477 478 if (dev->dma_error) { 479 dev->dma_error = 0; 480 return -1; 481 } 482 483 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 484 ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); 485 r852_write_reg(dev, R852_CTL, dev->ctlreg); 486 487 for (i = 0 ; i <= 1 ; i++) { 488 489 ecc_status = (ecc_reg >> 8) & 0xFF; 490 491 /* ecc uncorrectable error */ 492 if (ecc_status & R852_ECC_FAIL) { 493 dbg("ecc: unrecoverable error, in half %d", i); 494 error = -1; 495 goto exit; 496 } 497 498 /* correctable error */ 499 if (ecc_status & R852_ECC_CORRECTABLE) { 500 501 err_byte = ecc_reg & 0xFF; 502 dbg("ecc: recoverable error, " 503 "in half %d, byte %d, bit %d", i, 504 err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); 505 506 dat[err_byte] ^= 507 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); 508 error++; 509 } 510 511 dat += 256; 512 ecc_reg >>= 16; 513 } 514exit: 515 return error; 516} 517 518/* 519 * This is copy of nand_read_oob_std 520 * nand_read_oob_syndrome assumes we can send column address - we can't 521 */ 522static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 523 int page) 524{ 525 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 526 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 527 return 0; 528} 529 530/* 531 * Start the nand engine 532 */ 533 534static void r852_engine_enable(struct r852_device *dev) 535{ 536 if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { 537 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 538 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 539 } else { 540 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 541 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 542 } 543 msleep(300); 544 r852_write_reg(dev, R852_CTL, 0); 545} 546 547 548/* 549 * Stop the nand engine 550 */ 551 552static void r852_engine_disable(struct r852_device *dev) 553{ 554 r852_write_reg_dword(dev, R852_HW, 0); 555 r852_write_reg(dev, R852_CTL, R852_CTL_RESET); 556} 557 558/* 559 * Test if card is present 560 */ 561 562static void r852_card_update_present(struct r852_device *dev) 563{ 564 unsigned long flags; 565 uint8_t reg; 566 567 spin_lock_irqsave(&dev->irqlock, flags); 568 reg = r852_read_reg(dev, R852_CARD_STA); 569 dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); 570 spin_unlock_irqrestore(&dev->irqlock, flags); 571} 572 573/* 574 * Update card detection IRQ state according to current card state 575 * which is read in r852_card_update_present 576 */ 577static void r852_update_card_detect(struct r852_device *dev) 578{ 579 int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 580 dev->card_unstable = 0; 581 582 card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); 583 card_detect_reg |= R852_CARD_IRQ_GENABLE; 584 585 card_detect_reg |= dev->card_detected ? 586 R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; 587 588 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); 589} 590 591static ssize_t r852_media_type_show(struct device *sys_dev, 592 struct device_attribute *attr, char *buf) 593{ 594 struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); 595 struct r852_device *dev = r852_get_dev(mtd); 596 char *data = dev->sm ? "smartmedia" : "xd"; 597 598 strcpy(buf, data); 599 return strlen(data); 600} 601 602static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); 603 604 605/* Detect properties of card in slot */ 606static void r852_update_media_status(struct r852_device *dev) 607{ 608 uint8_t reg; 609 unsigned long flags; 610 int readonly; 611 612 spin_lock_irqsave(&dev->irqlock, flags); 613 if (!dev->card_detected) { 614 message("card removed"); 615 spin_unlock_irqrestore(&dev->irqlock, flags); 616 return ; 617 } 618 619 readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; 620 reg = r852_read_reg(dev, R852_DMA_CAP); 621 dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); 622 623 message("detected %s %s card in slot", 624 dev->sm ? "SmartMedia" : "xD", 625 readonly ? "readonly" : "writeable"); 626 627 dev->readonly = readonly; 628 spin_unlock_irqrestore(&dev->irqlock, flags); 629} 630 631/* 632 * Register the nand device 633 * Called when the card is detected 634 */ 635static int r852_register_nand_device(struct r852_device *dev) 636{ 637 dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); 638 639 if (!dev->mtd) 640 goto error1; 641 642 WARN_ON(dev->card_registred); 643 644 dev->mtd->owner = THIS_MODULE; 645 dev->mtd->priv = dev->chip; 646 dev->mtd->dev.parent = &dev->pci_dev->dev; 647 648 if (dev->readonly) 649 dev->chip->options |= NAND_ROM; 650 651 r852_engine_enable(dev); 652 653 if (sm_register_device(dev->mtd, dev->sm)) 654 goto error2; 655 656 if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) 657 message("can't create media type sysfs attribute"); 658 659 dev->card_registred = 1; 660 return 0; 661error2: 662 kfree(dev->mtd); 663error1: 664 /* Force card redetect */ 665 dev->card_detected = 0; 666 return -1; 667} 668 669/* 670 * Unregister the card 671 */ 672 673static void r852_unregister_nand_device(struct r852_device *dev) 674{ 675 if (!dev->card_registred) 676 return; 677 678 device_remove_file(&dev->mtd->dev, &dev_attr_media_type); 679 nand_release(dev->mtd); 680 r852_engine_disable(dev); 681 dev->card_registred = 0; 682 kfree(dev->mtd); 683 dev->mtd = NULL; 684} 685 686/* Card state updater */ 687static void r852_card_detect_work(struct work_struct *work) 688{ 689 struct r852_device *dev = 690 container_of(work, struct r852_device, card_detect_work.work); 691 692 r852_card_update_present(dev); 693 r852_update_card_detect(dev); 694 dev->card_unstable = 0; 695 696 /* False alarm */ 697 if (dev->card_detected == dev->card_registred) 698 goto exit; 699 700 /* Read media properties */ 701 r852_update_media_status(dev); 702 703 /* Register the card */ 704 if (dev->card_detected) 705 r852_register_nand_device(dev); 706 else 707 r852_unregister_nand_device(dev); 708exit: 709 r852_update_card_detect(dev); 710} 711 712/* Ack + disable IRQ generation */ 713static void r852_disable_irqs(struct r852_device *dev) 714{ 715 uint8_t reg; 716 reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 717 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); 718 719 reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 720 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 721 reg & ~R852_DMA_IRQ_MASK); 722 723 r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); 724 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); 725} 726 727/* Interrupt handler */ 728static irqreturn_t r852_irq(int irq, void *data) 729{ 730 struct r852_device *dev = (struct r852_device *)data; 731 732 uint8_t card_status, dma_status; 733 unsigned long flags; 734 irqreturn_t ret = IRQ_NONE; 735 736 spin_lock_irqsave(&dev->irqlock, flags); 737 738 /* handle card detection interrupts first */ 739 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); 740 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); 741 742 if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { 743 744 ret = IRQ_HANDLED; 745 dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); 746 747 /* we shouldn't receive any interrupts if we wait for card 748 to settle */ 749 WARN_ON(dev->card_unstable); 750 751 /* disable irqs while card is unstable */ 752 /* this will timeout DMA if active, but better that garbage */ 753 r852_disable_irqs(dev); 754 755 if (dev->card_unstable) 756 goto out; 757 758 /* let, card state to settle a bit, and then do the work */ 759 dev->card_unstable = 1; 760 queue_delayed_work(dev->card_workqueue, 761 &dev->card_detect_work, msecs_to_jiffies(100)); 762 goto out; 763 } 764 765 766 /* Handle dma interrupts */ 767 dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); 768 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); 769 770 if (dma_status & R852_DMA_IRQ_MASK) { 771 772 ret = IRQ_HANDLED; 773 774 if (dma_status & R852_DMA_IRQ_ERROR) { 775 dbg("received dma error IRQ"); 776 r852_dma_done(dev, -EIO); 777 complete(&dev->dma_done); 778 goto out; 779 } 780 781 /* received DMA interrupt out of nowhere? */ 782 WARN_ON_ONCE(dev->dma_stage == 0); 783 784 if (dev->dma_stage == 0) 785 goto out; 786 787 /* done device access */ 788 if (dev->dma_state == DMA_INTERNAL && 789 (dma_status & R852_DMA_IRQ_INTERNAL)) { 790 791 dev->dma_state = DMA_MEMORY; 792 dev->dma_stage++; 793 } 794 795 /* done memory DMA */ 796 if (dev->dma_state == DMA_MEMORY && 797 (dma_status & R852_DMA_IRQ_MEMORY)) { 798 dev->dma_state = DMA_INTERNAL; 799 dev->dma_stage++; 800 } 801 802 /* Enable 2nd half of dma dance */ 803 if (dev->dma_stage == 2) 804 r852_dma_enable(dev); 805 806 /* Operation done */ 807 if (dev->dma_stage == 3) { 808 r852_dma_done(dev, 0); 809 complete(&dev->dma_done); 810 } 811 goto out; 812 } 813 814 /* Handle unknown interrupts */ 815 if (dma_status) 816 dbg("bad dma IRQ status = %x", dma_status); 817 818 if (card_status & ~R852_CARD_STA_CD) 819 dbg("strange card status = %x", card_status); 820 821out: 822 spin_unlock_irqrestore(&dev->irqlock, flags); 823 return ret; 824} 825 826static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 827{ 828 int error; 829 struct nand_chip *chip; 830 struct r852_device *dev; 831 832 /* pci initialization */ 833 error = pci_enable_device(pci_dev); 834 835 if (error) 836 goto error1; 837 838 pci_set_master(pci_dev); 839 840 error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); 841 if (error) 842 goto error2; 843 844 error = pci_request_regions(pci_dev, DRV_NAME); 845 846 if (error) 847 goto error3; 848 849 error = -ENOMEM; 850 851 /* init nand chip, but register it only on card insert */ 852 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); 853 854 if (!chip) 855 goto error4; 856 857 /* commands */ 858 chip->cmd_ctrl = r852_cmdctl; 859 chip->waitfunc = r852_wait; 860 chip->dev_ready = r852_ready; 861 862 /* I/O */ 863 chip->read_byte = r852_read_byte; 864 chip->read_buf = r852_read_buf; 865 chip->write_buf = r852_write_buf; 866 867 /* ecc */ 868 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 869 chip->ecc.size = R852_DMA_LEN; 870 chip->ecc.bytes = SM_OOB_SIZE; 871 chip->ecc.strength = 2; 872 chip->ecc.hwctl = r852_ecc_hwctl; 873 chip->ecc.calculate = r852_ecc_calculate; 874 chip->ecc.correct = r852_ecc_correct; 875 876 /* TODO: hack */ 877 chip->ecc.read_oob = r852_read_oob; 878 879 /* init our device structure */ 880 dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); 881 882 if (!dev) 883 goto error5; 884 885 chip->priv = dev; 886 dev->chip = chip; 887 dev->pci_dev = pci_dev; 888 pci_set_drvdata(pci_dev, dev); 889 890 dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, 891 &dev->phys_bounce_buffer); 892 893 if (!dev->bounce_buffer) 894 goto error6; 895 896 897 error = -ENODEV; 898 dev->mmio = pci_ioremap_bar(pci_dev, 0); 899 900 if (!dev->mmio) 901 goto error7; 902 903 error = -ENOMEM; 904 dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); 905 906 if (!dev->tmp_buffer) 907 goto error8; 908 909 init_completion(&dev->dma_done); 910 911 dev->card_workqueue = create_freezable_workqueue(DRV_NAME); 912 913 if (!dev->card_workqueue) 914 goto error9; 915 916 INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); 917 918 /* shutdown everything - precation */ 919 r852_engine_disable(dev); 920 r852_disable_irqs(dev); 921 922 r852_dma_test(dev); 923 924 dev->irq = pci_dev->irq; 925 spin_lock_init(&dev->irqlock); 926 927 dev->card_detected = 0; 928 r852_card_update_present(dev); 929 930 /*register irq handler*/ 931 error = -ENODEV; 932 if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, 933 DRV_NAME, dev)) 934 goto error10; 935 936 /* kick initial present test */ 937 queue_delayed_work(dev->card_workqueue, 938 &dev->card_detect_work, 0); 939 940 941 printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n"); 942 return 0; 943 944error10: 945 destroy_workqueue(dev->card_workqueue); 946error9: 947 kfree(dev->tmp_buffer); 948error8: 949 pci_iounmap(pci_dev, dev->mmio); 950error7: 951 pci_free_consistent(pci_dev, R852_DMA_LEN, 952 dev->bounce_buffer, dev->phys_bounce_buffer); 953error6: 954 kfree(dev); 955error5: 956 kfree(chip); 957error4: 958 pci_release_regions(pci_dev); 959error3: 960error2: 961 pci_disable_device(pci_dev); 962error1: 963 return error; 964} 965 966static void r852_remove(struct pci_dev *pci_dev) 967{ 968 struct r852_device *dev = pci_get_drvdata(pci_dev); 969 970 /* Stop detect workqueue - 971 we are going to unregister the device anyway*/ 972 cancel_delayed_work_sync(&dev->card_detect_work); 973 destroy_workqueue(dev->card_workqueue); 974 975 /* Unregister the device, this might make more IO */ 976 r852_unregister_nand_device(dev); 977 978 /* Stop interrupts */ 979 r852_disable_irqs(dev); 980 synchronize_irq(dev->irq); 981 free_irq(dev->irq, dev); 982 983 /* Cleanup */ 984 kfree(dev->tmp_buffer); 985 pci_iounmap(pci_dev, dev->mmio); 986 pci_free_consistent(pci_dev, R852_DMA_LEN, 987 dev->bounce_buffer, dev->phys_bounce_buffer); 988 989 kfree(dev->chip); 990 kfree(dev); 991 992 /* Shutdown the PCI device */ 993 pci_release_regions(pci_dev); 994 pci_disable_device(pci_dev); 995} 996 997static void r852_shutdown(struct pci_dev *pci_dev) 998{ 999 struct r852_device *dev = pci_get_drvdata(pci_dev); 1000 1001 cancel_delayed_work_sync(&dev->card_detect_work); 1002 r852_disable_irqs(dev); 1003 synchronize_irq(dev->irq); 1004 pci_disable_device(pci_dev); 1005} 1006 1007#ifdef CONFIG_PM_SLEEP 1008static int r852_suspend(struct device *device) 1009{ 1010 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1011 1012 if (dev->ctlreg & R852_CTL_CARDENABLE) 1013 return -EBUSY; 1014 1015 /* First make sure the detect work is gone */ 1016 cancel_delayed_work_sync(&dev->card_detect_work); 1017 1018 /* Turn off the interrupts and stop the device */ 1019 r852_disable_irqs(dev); 1020 r852_engine_disable(dev); 1021 1022 /* If card was pulled off just during the suspend, which is very 1023 unlikely, we will remove it on resume, it too late now 1024 anyway... */ 1025 dev->card_unstable = 0; 1026 return 0; 1027} 1028 1029static int r852_resume(struct device *device) 1030{ 1031 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1032 1033 r852_disable_irqs(dev); 1034 r852_card_update_present(dev); 1035 r852_engine_disable(dev); 1036 1037 1038 /* If card status changed, just do the work */ 1039 if (dev->card_detected != dev->card_registred) { 1040 dbg("card was %s during low power state", 1041 dev->card_detected ? "added" : "removed"); 1042 1043 queue_delayed_work(dev->card_workqueue, 1044 &dev->card_detect_work, msecs_to_jiffies(1000)); 1045 return 0; 1046 } 1047 1048 /* Otherwise, initialize the card */ 1049 if (dev->card_registred) { 1050 r852_engine_enable(dev); 1051 dev->chip->select_chip(dev->mtd, 0); 1052 dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); 1053 dev->chip->select_chip(dev->mtd, -1); 1054 } 1055 1056 /* Program card detection IRQ */ 1057 r852_update_card_detect(dev); 1058 return 0; 1059} 1060#endif 1061 1062static const struct pci_device_id r852_pci_id_tbl[] = { 1063 1064 { PCI_VDEVICE(RICOH, 0x0852), }, 1065 { }, 1066}; 1067 1068MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1069 1070static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1071 1072static struct pci_driver r852_pci_driver = { 1073 .name = DRV_NAME, 1074 .id_table = r852_pci_id_tbl, 1075 .probe = r852_probe, 1076 .remove = r852_remove, 1077 .shutdown = r852_shutdown, 1078 .driver.pm = &r852_pm_ops, 1079}; 1080 1081module_pci_driver(r852_pci_driver); 1082 1083MODULE_LICENSE("GPL"); 1084MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1085MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver"); 1086