root/drivers/ata/libata-sff.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ata_sff_check_status
  2. ata_sff_altstatus
  3. ata_sff_irq_status
  4. ata_sff_sync
  5. ata_sff_pause
  6. ata_sff_dma_pause
  7. ata_sff_busy_sleep
  8. ata_sff_check_ready
  9. ata_sff_wait_ready
  10. ata_sff_set_devctl
  11. ata_sff_dev_select
  12. ata_dev_select
  13. ata_sff_irq_on
  14. ata_sff_tf_load
  15. ata_sff_tf_read
  16. ata_sff_exec_command
  17. ata_tf_to_host
  18. ata_sff_data_xfer
  19. ata_sff_data_xfer32
  20. ata_pio_sector
  21. ata_pio_sectors
  22. atapi_send_cdb
  23. __atapi_pio_bytes
  24. atapi_pio_bytes
  25. ata_hsm_ok_in_wq
  26. ata_hsm_qc_complete
  27. ata_sff_hsm_move
  28. ata_sff_queue_work
  29. ata_sff_queue_delayed_work
  30. ata_sff_queue_pio_task
  31. ata_sff_flush_pio_task
  32. ata_sff_pio_task
  33. ata_sff_qc_issue
  34. ata_sff_qc_fill_rtf
  35. ata_sff_idle_irq
  36. __ata_sff_port_intr
  37. ata_sff_port_intr
  38. __ata_sff_interrupt
  39. ata_sff_interrupt
  40. ata_sff_lost_interrupt
  41. ata_sff_freeze
  42. ata_sff_thaw
  43. ata_sff_prereset
  44. ata_devchk
  45. ata_sff_dev_classify
  46. ata_sff_wait_after_reset
  47. ata_bus_softreset
  48. ata_sff_softreset
  49. sata_sff_hardreset
  50. ata_sff_postreset
  51. ata_sff_drain_fifo
  52. ata_sff_error_handler
  53. ata_sff_std_ports
  54. ata_resources_present
  55. ata_pci_sff_init_host
  56. ata_pci_sff_prepare_host
  57. ata_pci_sff_activate_host
  58. ata_sff_find_valid_pi
  59. ata_pci_init_one
  60. ata_pci_sff_init_one
  61. ata_bmdma_fill_sg
  62. ata_bmdma_fill_sg_dumb
  63. ata_bmdma_qc_prep
  64. ata_bmdma_dumb_qc_prep
  65. ata_bmdma_qc_issue
  66. ata_bmdma_port_intr
  67. ata_bmdma_interrupt
  68. ata_bmdma_error_handler
  69. ata_bmdma_post_internal_cmd
  70. ata_bmdma_irq_clear
  71. ata_bmdma_setup
  72. ata_bmdma_start
  73. ata_bmdma_stop
  74. ata_bmdma_status
  75. ata_bmdma_port_start
  76. ata_bmdma_port_start32
  77. ata_pci_bmdma_clear_simplex
  78. ata_bmdma_nodma
  79. ata_pci_bmdma_init
  80. ata_pci_bmdma_prepare_host
  81. ata_pci_bmdma_init_one
  82. ata_sff_port_init
  83. ata_sff_init
  84. ata_sff_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  libata-sff.c - helper library for PCI IDE BMDMA
   4  *
   5  *  Maintained by:  Tejun Heo <tj@kernel.org>
   6  *                  Please ALWAYS copy linux-ide@vger.kernel.org
   7  *                  on emails.
   8  *
   9  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
  10  *  Copyright 2003-2006 Jeff Garzik
  11  *
  12  *  libata documentation is available via 'make {ps|pdf}docs',
  13  *  as Documentation/driver-api/libata.rst
  14  *
  15  *  Hardware documentation available from http://www.t13.org/ and
  16  *  http://www.sata-io.org/
  17  */
  18 
  19 #include <linux/kernel.h>
  20 #include <linux/gfp.h>
  21 #include <linux/pci.h>
  22 #include <linux/module.h>
  23 #include <linux/libata.h>
  24 #include <linux/highmem.h>
  25 
  26 #include "libata.h"
  27 
  28 static struct workqueue_struct *ata_sff_wq;
  29 
  30 const struct ata_port_operations ata_sff_port_ops = {
  31         .inherits               = &ata_base_port_ops,
  32 
  33         .qc_prep                = ata_noop_qc_prep,
  34         .qc_issue               = ata_sff_qc_issue,
  35         .qc_fill_rtf            = ata_sff_qc_fill_rtf,
  36 
  37         .freeze                 = ata_sff_freeze,
  38         .thaw                   = ata_sff_thaw,
  39         .prereset               = ata_sff_prereset,
  40         .softreset              = ata_sff_softreset,
  41         .hardreset              = sata_sff_hardreset,
  42         .postreset              = ata_sff_postreset,
  43         .error_handler          = ata_sff_error_handler,
  44 
  45         .sff_dev_select         = ata_sff_dev_select,
  46         .sff_check_status       = ata_sff_check_status,
  47         .sff_tf_load            = ata_sff_tf_load,
  48         .sff_tf_read            = ata_sff_tf_read,
  49         .sff_exec_command       = ata_sff_exec_command,
  50         .sff_data_xfer          = ata_sff_data_xfer,
  51         .sff_drain_fifo         = ata_sff_drain_fifo,
  52 
  53         .lost_interrupt         = ata_sff_lost_interrupt,
  54 };
  55 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
  56 
  57 /**
  58  *      ata_sff_check_status - Read device status reg & clear interrupt
  59  *      @ap: port where the device is
  60  *
  61  *      Reads ATA taskfile status register for currently-selected device
  62  *      and return its value. This also clears pending interrupts
  63  *      from this device
  64  *
  65  *      LOCKING:
  66  *      Inherited from caller.
  67  */
  68 u8 ata_sff_check_status(struct ata_port *ap)
  69 {
  70         return ioread8(ap->ioaddr.status_addr);
  71 }
  72 EXPORT_SYMBOL_GPL(ata_sff_check_status);
  73 
  74 /**
  75  *      ata_sff_altstatus - Read device alternate status reg
  76  *      @ap: port where the device is
  77  *
  78  *      Reads ATA taskfile alternate status register for
  79  *      currently-selected device and return its value.
  80  *
  81  *      Note: may NOT be used as the check_altstatus() entry in
  82  *      ata_port_operations.
  83  *
  84  *      LOCKING:
  85  *      Inherited from caller.
  86  */
  87 static u8 ata_sff_altstatus(struct ata_port *ap)
  88 {
  89         if (ap->ops->sff_check_altstatus)
  90                 return ap->ops->sff_check_altstatus(ap);
  91 
  92         return ioread8(ap->ioaddr.altstatus_addr);
  93 }
  94 
  95 /**
  96  *      ata_sff_irq_status - Check if the device is busy
  97  *      @ap: port where the device is
  98  *
  99  *      Determine if the port is currently busy. Uses altstatus
 100  *      if available in order to avoid clearing shared IRQ status
 101  *      when finding an IRQ source. Non ctl capable devices don't
 102  *      share interrupt lines fortunately for us.
 103  *
 104  *      LOCKING:
 105  *      Inherited from caller.
 106  */
 107 static u8 ata_sff_irq_status(struct ata_port *ap)
 108 {
 109         u8 status;
 110 
 111         if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 112                 status = ata_sff_altstatus(ap);
 113                 /* Not us: We are busy */
 114                 if (status & ATA_BUSY)
 115                         return status;
 116         }
 117         /* Clear INTRQ latch */
 118         status = ap->ops->sff_check_status(ap);
 119         return status;
 120 }
 121 
 122 /**
 123  *      ata_sff_sync - Flush writes
 124  *      @ap: Port to wait for.
 125  *
 126  *      CAUTION:
 127  *      If we have an mmio device with no ctl and no altstatus
 128  *      method this will fail. No such devices are known to exist.
 129  *
 130  *      LOCKING:
 131  *      Inherited from caller.
 132  */
 133 
 134 static void ata_sff_sync(struct ata_port *ap)
 135 {
 136         if (ap->ops->sff_check_altstatus)
 137                 ap->ops->sff_check_altstatus(ap);
 138         else if (ap->ioaddr.altstatus_addr)
 139                 ioread8(ap->ioaddr.altstatus_addr);
 140 }
 141 
 142 /**
 143  *      ata_sff_pause           -       Flush writes and wait 400nS
 144  *      @ap: Port to pause for.
 145  *
 146  *      CAUTION:
 147  *      If we have an mmio device with no ctl and no altstatus
 148  *      method this will fail. No such devices are known to exist.
 149  *
 150  *      LOCKING:
 151  *      Inherited from caller.
 152  */
 153 
 154 void ata_sff_pause(struct ata_port *ap)
 155 {
 156         ata_sff_sync(ap);
 157         ndelay(400);
 158 }
 159 EXPORT_SYMBOL_GPL(ata_sff_pause);
 160 
 161 /**
 162  *      ata_sff_dma_pause       -       Pause before commencing DMA
 163  *      @ap: Port to pause for.
 164  *
 165  *      Perform I/O fencing and ensure sufficient cycle delays occur
 166  *      for the HDMA1:0 transition
 167  */
 168 
 169 void ata_sff_dma_pause(struct ata_port *ap)
 170 {
 171         if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 172                 /* An altstatus read will cause the needed delay without
 173                    messing up the IRQ status */
 174                 ata_sff_altstatus(ap);
 175                 return;
 176         }
 177         /* There are no DMA controllers without ctl. BUG here to ensure
 178            we never violate the HDMA1:0 transition timing and risk
 179            corruption. */
 180         BUG();
 181 }
 182 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
 183 
 184 /**
 185  *      ata_sff_busy_sleep - sleep until BSY clears, or timeout
 186  *      @ap: port containing status register to be polled
 187  *      @tmout_pat: impatience timeout in msecs
 188  *      @tmout: overall timeout in msecs
 189  *
 190  *      Sleep until ATA Status register bit BSY clears,
 191  *      or a timeout occurs.
 192  *
 193  *      LOCKING:
 194  *      Kernel thread context (may sleep).
 195  *
 196  *      RETURNS:
 197  *      0 on success, -errno otherwise.
 198  */
 199 int ata_sff_busy_sleep(struct ata_port *ap,
 200                        unsigned long tmout_pat, unsigned long tmout)
 201 {
 202         unsigned long timer_start, timeout;
 203         u8 status;
 204 
 205         status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
 206         timer_start = jiffies;
 207         timeout = ata_deadline(timer_start, tmout_pat);
 208         while (status != 0xff && (status & ATA_BUSY) &&
 209                time_before(jiffies, timeout)) {
 210                 ata_msleep(ap, 50);
 211                 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
 212         }
 213 
 214         if (status != 0xff && (status & ATA_BUSY))
 215                 ata_port_warn(ap,
 216                               "port is slow to respond, please be patient (Status 0x%x)\n",
 217                               status);
 218 
 219         timeout = ata_deadline(timer_start, tmout);
 220         while (status != 0xff && (status & ATA_BUSY) &&
 221                time_before(jiffies, timeout)) {
 222                 ata_msleep(ap, 50);
 223                 status = ap->ops->sff_check_status(ap);
 224         }
 225 
 226         if (status == 0xff)
 227                 return -ENODEV;
 228 
 229         if (status & ATA_BUSY) {
 230                 ata_port_err(ap,
 231                              "port failed to respond (%lu secs, Status 0x%x)\n",
 232                              DIV_ROUND_UP(tmout, 1000), status);
 233                 return -EBUSY;
 234         }
 235 
 236         return 0;
 237 }
 238 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
 239 
 240 static int ata_sff_check_ready(struct ata_link *link)
 241 {
 242         u8 status = link->ap->ops->sff_check_status(link->ap);
 243 
 244         return ata_check_ready(status);
 245 }
 246 
 247 /**
 248  *      ata_sff_wait_ready - sleep until BSY clears, or timeout
 249  *      @link: SFF link to wait ready status for
 250  *      @deadline: deadline jiffies for the operation
 251  *
 252  *      Sleep until ATA Status register bit BSY clears, or timeout
 253  *      occurs.
 254  *
 255  *      LOCKING:
 256  *      Kernel thread context (may sleep).
 257  *
 258  *      RETURNS:
 259  *      0 on success, -errno otherwise.
 260  */
 261 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
 262 {
 263         return ata_wait_ready(link, deadline, ata_sff_check_ready);
 264 }
 265 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
 266 
 267 /**
 268  *      ata_sff_set_devctl - Write device control reg
 269  *      @ap: port where the device is
 270  *      @ctl: value to write
 271  *
 272  *      Writes ATA taskfile device control register.
 273  *
 274  *      Note: may NOT be used as the sff_set_devctl() entry in
 275  *      ata_port_operations.
 276  *
 277  *      LOCKING:
 278  *      Inherited from caller.
 279  */
 280 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
 281 {
 282         if (ap->ops->sff_set_devctl)
 283                 ap->ops->sff_set_devctl(ap, ctl);
 284         else
 285                 iowrite8(ctl, ap->ioaddr.ctl_addr);
 286 }
 287 
 288 /**
 289  *      ata_sff_dev_select - Select device 0/1 on ATA bus
 290  *      @ap: ATA channel to manipulate
 291  *      @device: ATA device (numbered from zero) to select
 292  *
 293  *      Use the method defined in the ATA specification to
 294  *      make either device 0, or device 1, active on the
 295  *      ATA channel.  Works with both PIO and MMIO.
 296  *
 297  *      May be used as the dev_select() entry in ata_port_operations.
 298  *
 299  *      LOCKING:
 300  *      caller.
 301  */
 302 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
 303 {
 304         u8 tmp;
 305 
 306         if (device == 0)
 307                 tmp = ATA_DEVICE_OBS;
 308         else
 309                 tmp = ATA_DEVICE_OBS | ATA_DEV1;
 310 
 311         iowrite8(tmp, ap->ioaddr.device_addr);
 312         ata_sff_pause(ap);      /* needed; also flushes, for mmio */
 313 }
 314 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
 315 
 316 /**
 317  *      ata_dev_select - Select device 0/1 on ATA bus
 318  *      @ap: ATA channel to manipulate
 319  *      @device: ATA device (numbered from zero) to select
 320  *      @wait: non-zero to wait for Status register BSY bit to clear
 321  *      @can_sleep: non-zero if context allows sleeping
 322  *
 323  *      Use the method defined in the ATA specification to
 324  *      make either device 0, or device 1, active on the
 325  *      ATA channel.
 326  *
 327  *      This is a high-level version of ata_sff_dev_select(), which
 328  *      additionally provides the services of inserting the proper
 329  *      pauses and status polling, where needed.
 330  *
 331  *      LOCKING:
 332  *      caller.
 333  */
 334 static void ata_dev_select(struct ata_port *ap, unsigned int device,
 335                            unsigned int wait, unsigned int can_sleep)
 336 {
 337         if (ata_msg_probe(ap))
 338                 ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
 339                               device, wait);
 340 
 341         if (wait)
 342                 ata_wait_idle(ap);
 343 
 344         ap->ops->sff_dev_select(ap, device);
 345 
 346         if (wait) {
 347                 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
 348                         ata_msleep(ap, 150);
 349                 ata_wait_idle(ap);
 350         }
 351 }
 352 
 353 /**
 354  *      ata_sff_irq_on - Enable interrupts on a port.
 355  *      @ap: Port on which interrupts are enabled.
 356  *
 357  *      Enable interrupts on a legacy IDE device using MMIO or PIO,
 358  *      wait for idle, clear any pending interrupts.
 359  *
 360  *      Note: may NOT be used as the sff_irq_on() entry in
 361  *      ata_port_operations.
 362  *
 363  *      LOCKING:
 364  *      Inherited from caller.
 365  */
 366 void ata_sff_irq_on(struct ata_port *ap)
 367 {
 368         struct ata_ioports *ioaddr = &ap->ioaddr;
 369 
 370         if (ap->ops->sff_irq_on) {
 371                 ap->ops->sff_irq_on(ap);
 372                 return;
 373         }
 374 
 375         ap->ctl &= ~ATA_NIEN;
 376         ap->last_ctl = ap->ctl;
 377 
 378         if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
 379                 ata_sff_set_devctl(ap, ap->ctl);
 380         ata_wait_idle(ap);
 381 
 382         if (ap->ops->sff_irq_clear)
 383                 ap->ops->sff_irq_clear(ap);
 384 }
 385 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
 386 
 387 /**
 388  *      ata_sff_tf_load - send taskfile registers to host controller
 389  *      @ap: Port to which output is sent
 390  *      @tf: ATA taskfile register set
 391  *
 392  *      Outputs ATA taskfile to standard ATA host controller.
 393  *
 394  *      LOCKING:
 395  *      Inherited from caller.
 396  */
 397 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 398 {
 399         struct ata_ioports *ioaddr = &ap->ioaddr;
 400         unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
 401 
 402         if (tf->ctl != ap->last_ctl) {
 403                 if (ioaddr->ctl_addr)
 404                         iowrite8(tf->ctl, ioaddr->ctl_addr);
 405                 ap->last_ctl = tf->ctl;
 406                 ata_wait_idle(ap);
 407         }
 408 
 409         if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
 410                 WARN_ON_ONCE(!ioaddr->ctl_addr);
 411                 iowrite8(tf->hob_feature, ioaddr->feature_addr);
 412                 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
 413                 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
 414                 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
 415                 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
 416                 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
 417                         tf->hob_feature,
 418                         tf->hob_nsect,
 419                         tf->hob_lbal,
 420                         tf->hob_lbam,
 421                         tf->hob_lbah);
 422         }
 423 
 424         if (is_addr) {
 425                 iowrite8(tf->feature, ioaddr->feature_addr);
 426                 iowrite8(tf->nsect, ioaddr->nsect_addr);
 427                 iowrite8(tf->lbal, ioaddr->lbal_addr);
 428                 iowrite8(tf->lbam, ioaddr->lbam_addr);
 429                 iowrite8(tf->lbah, ioaddr->lbah_addr);
 430                 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
 431                         tf->feature,
 432                         tf->nsect,
 433                         tf->lbal,
 434                         tf->lbam,
 435                         tf->lbah);
 436         }
 437 
 438         if (tf->flags & ATA_TFLAG_DEVICE) {
 439                 iowrite8(tf->device, ioaddr->device_addr);
 440                 VPRINTK("device 0x%X\n", tf->device);
 441         }
 442 
 443         ata_wait_idle(ap);
 444 }
 445 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 446 
 447 /**
 448  *      ata_sff_tf_read - input device's ATA taskfile shadow registers
 449  *      @ap: Port from which input is read
 450  *      @tf: ATA taskfile register set for storing input
 451  *
 452  *      Reads ATA taskfile registers for currently-selected device
 453  *      into @tf. Assumes the device has a fully SFF compliant task file
 454  *      layout and behaviour. If you device does not (eg has a different
 455  *      status method) then you will need to provide a replacement tf_read
 456  *
 457  *      LOCKING:
 458  *      Inherited from caller.
 459  */
 460 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 461 {
 462         struct ata_ioports *ioaddr = &ap->ioaddr;
 463 
 464         tf->command = ata_sff_check_status(ap);
 465         tf->feature = ioread8(ioaddr->error_addr);
 466         tf->nsect = ioread8(ioaddr->nsect_addr);
 467         tf->lbal = ioread8(ioaddr->lbal_addr);
 468         tf->lbam = ioread8(ioaddr->lbam_addr);
 469         tf->lbah = ioread8(ioaddr->lbah_addr);
 470         tf->device = ioread8(ioaddr->device_addr);
 471 
 472         if (tf->flags & ATA_TFLAG_LBA48) {
 473                 if (likely(ioaddr->ctl_addr)) {
 474                         iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
 475                         tf->hob_feature = ioread8(ioaddr->error_addr);
 476                         tf->hob_nsect = ioread8(ioaddr->nsect_addr);
 477                         tf->hob_lbal = ioread8(ioaddr->lbal_addr);
 478                         tf->hob_lbam = ioread8(ioaddr->lbam_addr);
 479                         tf->hob_lbah = ioread8(ioaddr->lbah_addr);
 480                         iowrite8(tf->ctl, ioaddr->ctl_addr);
 481                         ap->last_ctl = tf->ctl;
 482                 } else
 483                         WARN_ON_ONCE(1);
 484         }
 485 }
 486 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
 487 
 488 /**
 489  *      ata_sff_exec_command - issue ATA command to host controller
 490  *      @ap: port to which command is being issued
 491  *      @tf: ATA taskfile register set
 492  *
 493  *      Issues ATA command, with proper synchronization with interrupt
 494  *      handler / other threads.
 495  *
 496  *      LOCKING:
 497  *      spin_lock_irqsave(host lock)
 498  */
 499 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 500 {
 501         DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 502 
 503         iowrite8(tf->command, ap->ioaddr.command_addr);
 504         ata_sff_pause(ap);
 505 }
 506 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
 507 
 508 /**
 509  *      ata_tf_to_host - issue ATA taskfile to host controller
 510  *      @ap: port to which command is being issued
 511  *      @tf: ATA taskfile register set
 512  *
 513  *      Issues ATA taskfile register set to ATA host controller,
 514  *      with proper synchronization with interrupt handler and
 515  *      other threads.
 516  *
 517  *      LOCKING:
 518  *      spin_lock_irqsave(host lock)
 519  */
 520 static inline void ata_tf_to_host(struct ata_port *ap,
 521                                   const struct ata_taskfile *tf)
 522 {
 523         ap->ops->sff_tf_load(ap, tf);
 524         ap->ops->sff_exec_command(ap, tf);
 525 }
 526 
 527 /**
 528  *      ata_sff_data_xfer - Transfer data by PIO
 529  *      @qc: queued command
 530  *      @buf: data buffer
 531  *      @buflen: buffer length
 532  *      @rw: read/write
 533  *
 534  *      Transfer data from/to the device data register by PIO.
 535  *
 536  *      LOCKING:
 537  *      Inherited from caller.
 538  *
 539  *      RETURNS:
 540  *      Bytes consumed.
 541  */
 542 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
 543                                unsigned int buflen, int rw)
 544 {
 545         struct ata_port *ap = qc->dev->link->ap;
 546         void __iomem *data_addr = ap->ioaddr.data_addr;
 547         unsigned int words = buflen >> 1;
 548 
 549         /* Transfer multiple of 2 bytes */
 550         if (rw == READ)
 551                 ioread16_rep(data_addr, buf, words);
 552         else
 553                 iowrite16_rep(data_addr, buf, words);
 554 
 555         /* Transfer trailing byte, if any. */
 556         if (unlikely(buflen & 0x01)) {
 557                 unsigned char pad[2] = { };
 558 
 559                 /* Point buf to the tail of buffer */
 560                 buf += buflen - 1;
 561 
 562                 /*
 563                  * Use io*16_rep() accessors here as well to avoid pointlessly
 564                  * swapping bytes to and from on the big endian machines...
 565                  */
 566                 if (rw == READ) {
 567                         ioread16_rep(data_addr, pad, 1);
 568                         *buf = pad[0];
 569                 } else {
 570                         pad[0] = *buf;
 571                         iowrite16_rep(data_addr, pad, 1);
 572                 }
 573                 words++;
 574         }
 575 
 576         return words << 1;
 577 }
 578 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
 579 
 580 /**
 581  *      ata_sff_data_xfer32 - Transfer data by PIO
 582  *      @qc: queued command
 583  *      @buf: data buffer
 584  *      @buflen: buffer length
 585  *      @rw: read/write
 586  *
 587  *      Transfer data from/to the device data register by PIO using 32bit
 588  *      I/O operations.
 589  *
 590  *      LOCKING:
 591  *      Inherited from caller.
 592  *
 593  *      RETURNS:
 594  *      Bytes consumed.
 595  */
 596 
 597 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
 598                                unsigned int buflen, int rw)
 599 {
 600         struct ata_device *dev = qc->dev;
 601         struct ata_port *ap = dev->link->ap;
 602         void __iomem *data_addr = ap->ioaddr.data_addr;
 603         unsigned int words = buflen >> 2;
 604         int slop = buflen & 3;
 605 
 606         if (!(ap->pflags & ATA_PFLAG_PIO32))
 607                 return ata_sff_data_xfer(qc, buf, buflen, rw);
 608 
 609         /* Transfer multiple of 4 bytes */
 610         if (rw == READ)
 611                 ioread32_rep(data_addr, buf, words);
 612         else
 613                 iowrite32_rep(data_addr, buf, words);
 614 
 615         /* Transfer trailing bytes, if any */
 616         if (unlikely(slop)) {
 617                 unsigned char pad[4] = { };
 618 
 619                 /* Point buf to the tail of buffer */
 620                 buf += buflen - slop;
 621 
 622                 /*
 623                  * Use io*_rep() accessors here as well to avoid pointlessly
 624                  * swapping bytes to and from on the big endian machines...
 625                  */
 626                 if (rw == READ) {
 627                         if (slop < 3)
 628                                 ioread16_rep(data_addr, pad, 1);
 629                         else
 630                                 ioread32_rep(data_addr, pad, 1);
 631                         memcpy(buf, pad, slop);
 632                 } else {
 633                         memcpy(pad, buf, slop);
 634                         if (slop < 3)
 635                                 iowrite16_rep(data_addr, pad, 1);
 636                         else
 637                                 iowrite32_rep(data_addr, pad, 1);
 638                 }
 639         }
 640         return (buflen + 1) & ~1;
 641 }
 642 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 643 
 644 /**
 645  *      ata_pio_sector - Transfer a sector of data.
 646  *      @qc: Command on going
 647  *
 648  *      Transfer qc->sect_size bytes of data from/to the ATA device.
 649  *
 650  *      LOCKING:
 651  *      Inherited from caller.
 652  */
 653 static void ata_pio_sector(struct ata_queued_cmd *qc)
 654 {
 655         int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
 656         struct ata_port *ap = qc->ap;
 657         struct page *page;
 658         unsigned int offset;
 659         unsigned char *buf;
 660 
 661         if (!qc->cursg) {
 662                 qc->curbytes = qc->nbytes;
 663                 return;
 664         }
 665         if (qc->curbytes == qc->nbytes - qc->sect_size)
 666                 ap->hsm_task_state = HSM_ST_LAST;
 667 
 668         page = sg_page(qc->cursg);
 669         offset = qc->cursg->offset + qc->cursg_ofs;
 670 
 671         /* get the current page and offset */
 672         page = nth_page(page, (offset >> PAGE_SHIFT));
 673         offset %= PAGE_SIZE;
 674 
 675         DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 676 
 677         /* do the actual data transfer */
 678         buf = kmap_atomic(page);
 679         ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
 680         kunmap_atomic(buf);
 681 
 682         if (!do_write && !PageSlab(page))
 683                 flush_dcache_page(page);
 684 
 685         qc->curbytes += qc->sect_size;
 686         qc->cursg_ofs += qc->sect_size;
 687 
 688         if (qc->cursg_ofs == qc->cursg->length) {
 689                 qc->cursg = sg_next(qc->cursg);
 690                 if (!qc->cursg)
 691                         ap->hsm_task_state = HSM_ST_LAST;
 692                 qc->cursg_ofs = 0;
 693         }
 694 }
 695 
 696 /**
 697  *      ata_pio_sectors - Transfer one or many sectors.
 698  *      @qc: Command on going
 699  *
 700  *      Transfer one or many sectors of data from/to the
 701  *      ATA device for the DRQ request.
 702  *
 703  *      LOCKING:
 704  *      Inherited from caller.
 705  */
 706 static void ata_pio_sectors(struct ata_queued_cmd *qc)
 707 {
 708         if (is_multi_taskfile(&qc->tf)) {
 709                 /* READ/WRITE MULTIPLE */
 710                 unsigned int nsect;
 711 
 712                 WARN_ON_ONCE(qc->dev->multi_count == 0);
 713 
 714                 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
 715                             qc->dev->multi_count);
 716                 while (nsect--)
 717                         ata_pio_sector(qc);
 718         } else
 719                 ata_pio_sector(qc);
 720 
 721         ata_sff_sync(qc->ap); /* flush */
 722 }
 723 
 724 /**
 725  *      atapi_send_cdb - Write CDB bytes to hardware
 726  *      @ap: Port to which ATAPI device is attached.
 727  *      @qc: Taskfile currently active
 728  *
 729  *      When device has indicated its readiness to accept
 730  *      a CDB, this function is called.  Send the CDB.
 731  *
 732  *      LOCKING:
 733  *      caller.
 734  */
 735 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
 736 {
 737         /* send SCSI cdb */
 738         DPRINTK("send cdb\n");
 739         WARN_ON_ONCE(qc->dev->cdb_len < 12);
 740 
 741         ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
 742         ata_sff_sync(ap);
 743         /* FIXME: If the CDB is for DMA do we need to do the transition delay
 744            or is bmdma_start guaranteed to do it ? */
 745         switch (qc->tf.protocol) {
 746         case ATAPI_PROT_PIO:
 747                 ap->hsm_task_state = HSM_ST;
 748                 break;
 749         case ATAPI_PROT_NODATA:
 750                 ap->hsm_task_state = HSM_ST_LAST;
 751                 break;
 752 #ifdef CONFIG_ATA_BMDMA
 753         case ATAPI_PROT_DMA:
 754                 ap->hsm_task_state = HSM_ST_LAST;
 755                 /* initiate bmdma */
 756                 ap->ops->bmdma_start(qc);
 757                 break;
 758 #endif /* CONFIG_ATA_BMDMA */
 759         default:
 760                 BUG();
 761         }
 762 }
 763 
 764 /**
 765  *      __atapi_pio_bytes - Transfer data from/to the ATAPI device.
 766  *      @qc: Command on going
 767  *      @bytes: number of bytes
 768  *
 769  *      Transfer Transfer data from/to the ATAPI device.
 770  *
 771  *      LOCKING:
 772  *      Inherited from caller.
 773  *
 774  */
 775 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 776 {
 777         int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
 778         struct ata_port *ap = qc->ap;
 779         struct ata_device *dev = qc->dev;
 780         struct ata_eh_info *ehi = &dev->link->eh_info;
 781         struct scatterlist *sg;
 782         struct page *page;
 783         unsigned char *buf;
 784         unsigned int offset, count, consumed;
 785 
 786 next_sg:
 787         sg = qc->cursg;
 788         if (unlikely(!sg)) {
 789                 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
 790                                   "buf=%u cur=%u bytes=%u",
 791                                   qc->nbytes, qc->curbytes, bytes);
 792                 return -1;
 793         }
 794 
 795         page = sg_page(sg);
 796         offset = sg->offset + qc->cursg_ofs;
 797 
 798         /* get the current page and offset */
 799         page = nth_page(page, (offset >> PAGE_SHIFT));
 800         offset %= PAGE_SIZE;
 801 
 802         /* don't overrun current sg */
 803         count = min(sg->length - qc->cursg_ofs, bytes);
 804 
 805         /* don't cross page boundaries */
 806         count = min(count, (unsigned int)PAGE_SIZE - offset);
 807 
 808         DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 809 
 810         /* do the actual data transfer */
 811         buf = kmap_atomic(page);
 812         consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
 813         kunmap_atomic(buf);
 814 
 815         bytes -= min(bytes, consumed);
 816         qc->curbytes += count;
 817         qc->cursg_ofs += count;
 818 
 819         if (qc->cursg_ofs == sg->length) {
 820                 qc->cursg = sg_next(qc->cursg);
 821                 qc->cursg_ofs = 0;
 822         }
 823 
 824         /*
 825          * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
 826          * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
 827          * check correctly as it doesn't know if it is the last request being
 828          * made. Somebody should implement a proper sanity check.
 829          */
 830         if (bytes)
 831                 goto next_sg;
 832         return 0;
 833 }
 834 
 835 /**
 836  *      atapi_pio_bytes - Transfer data from/to the ATAPI device.
 837  *      @qc: Command on going
 838  *
 839  *      Transfer Transfer data from/to the ATAPI device.
 840  *
 841  *      LOCKING:
 842  *      Inherited from caller.
 843  */
 844 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
 845 {
 846         struct ata_port *ap = qc->ap;
 847         struct ata_device *dev = qc->dev;
 848         struct ata_eh_info *ehi = &dev->link->eh_info;
 849         unsigned int ireason, bc_lo, bc_hi, bytes;
 850         int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
 851 
 852         /* Abuse qc->result_tf for temp storage of intermediate TF
 853          * here to save some kernel stack usage.
 854          * For normal completion, qc->result_tf is not relevant. For
 855          * error, qc->result_tf is later overwritten by ata_qc_complete().
 856          * So, the correctness of qc->result_tf is not affected.
 857          */
 858         ap->ops->sff_tf_read(ap, &qc->result_tf);
 859         ireason = qc->result_tf.nsect;
 860         bc_lo = qc->result_tf.lbam;
 861         bc_hi = qc->result_tf.lbah;
 862         bytes = (bc_hi << 8) | bc_lo;
 863 
 864         /* shall be cleared to zero, indicating xfer of data */
 865         if (unlikely(ireason & ATAPI_COD))
 866                 goto atapi_check;
 867 
 868         /* make sure transfer direction matches expected */
 869         i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
 870         if (unlikely(do_write != i_write))
 871                 goto atapi_check;
 872 
 873         if (unlikely(!bytes))
 874                 goto atapi_check;
 875 
 876         VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
 877 
 878         if (unlikely(__atapi_pio_bytes(qc, bytes)))
 879                 goto err_out;
 880         ata_sff_sync(ap); /* flush */
 881 
 882         return;
 883 
 884  atapi_check:
 885         ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
 886                           ireason, bytes);
 887  err_out:
 888         qc->err_mask |= AC_ERR_HSM;
 889         ap->hsm_task_state = HSM_ST_ERR;
 890 }
 891 
 892 /**
 893  *      ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
 894  *      @ap: the target ata_port
 895  *      @qc: qc on going
 896  *
 897  *      RETURNS:
 898  *      1 if ok in workqueue, 0 otherwise.
 899  */
 900 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 901                                                 struct ata_queued_cmd *qc)
 902 {
 903         if (qc->tf.flags & ATA_TFLAG_POLLING)
 904                 return 1;
 905 
 906         if (ap->hsm_task_state == HSM_ST_FIRST) {
 907                 if (qc->tf.protocol == ATA_PROT_PIO &&
 908                    (qc->tf.flags & ATA_TFLAG_WRITE))
 909                     return 1;
 910 
 911                 if (ata_is_atapi(qc->tf.protocol) &&
 912                    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 913                         return 1;
 914         }
 915 
 916         return 0;
 917 }
 918 
 919 /**
 920  *      ata_hsm_qc_complete - finish a qc running on standard HSM
 921  *      @qc: Command to complete
 922  *      @in_wq: 1 if called from workqueue, 0 otherwise
 923  *
 924  *      Finish @qc which is running on standard HSM.
 925  *
 926  *      LOCKING:
 927  *      If @in_wq is zero, spin_lock_irqsave(host lock).
 928  *      Otherwise, none on entry and grabs host lock.
 929  */
 930 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 931 {
 932         struct ata_port *ap = qc->ap;
 933 
 934         if (ap->ops->error_handler) {
 935                 if (in_wq) {
 936                         /* EH might have kicked in while host lock is
 937                          * released.
 938                          */
 939                         qc = ata_qc_from_tag(ap, qc->tag);
 940                         if (qc) {
 941                                 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
 942                                         ata_sff_irq_on(ap);
 943                                         ata_qc_complete(qc);
 944                                 } else
 945                                         ata_port_freeze(ap);
 946                         }
 947                 } else {
 948                         if (likely(!(qc->err_mask & AC_ERR_HSM)))
 949                                 ata_qc_complete(qc);
 950                         else
 951                                 ata_port_freeze(ap);
 952                 }
 953         } else {
 954                 if (in_wq) {
 955                         ata_sff_irq_on(ap);
 956                         ata_qc_complete(qc);
 957                 } else
 958                         ata_qc_complete(qc);
 959         }
 960 }
 961 
 962 /**
 963  *      ata_sff_hsm_move - move the HSM to the next state.
 964  *      @ap: the target ata_port
 965  *      @qc: qc on going
 966  *      @status: current device status
 967  *      @in_wq: 1 if called from workqueue, 0 otherwise
 968  *
 969  *      RETURNS:
 970  *      1 when poll next status needed, 0 otherwise.
 971  */
 972 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 973                      u8 status, int in_wq)
 974 {
 975         struct ata_link *link = qc->dev->link;
 976         struct ata_eh_info *ehi = &link->eh_info;
 977         int poll_next;
 978 
 979         lockdep_assert_held(ap->lock);
 980 
 981         WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 982 
 983         /* Make sure ata_sff_qc_issue() does not throw things
 984          * like DMA polling into the workqueue. Notice that
 985          * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
 986          */
 987         WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
 988 
 989 fsm_start:
 990         DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
 991                 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
 992 
 993         switch (ap->hsm_task_state) {
 994         case HSM_ST_FIRST:
 995                 /* Send first data block or PACKET CDB */
 996 
 997                 /* If polling, we will stay in the work queue after
 998                  * sending the data. Otherwise, interrupt handler
 999                  * takes over after sending the data.
1000                  */
1001                 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1002 
1003                 /* check device status */
1004                 if (unlikely((status & ATA_DRQ) == 0)) {
1005                         /* handle BSY=0, DRQ=0 as error */
1006                         if (likely(status & (ATA_ERR | ATA_DF)))
1007                                 /* device stops HSM for abort/error */
1008                                 qc->err_mask |= AC_ERR_DEV;
1009                         else {
1010                                 /* HSM violation. Let EH handle this */
1011                                 ata_ehi_push_desc(ehi,
1012                                         "ST_FIRST: !(DRQ|ERR|DF)");
1013                                 qc->err_mask |= AC_ERR_HSM;
1014                         }
1015 
1016                         ap->hsm_task_state = HSM_ST_ERR;
1017                         goto fsm_start;
1018                 }
1019 
1020                 /* Device should not ask for data transfer (DRQ=1)
1021                  * when it finds something wrong.
1022                  * We ignore DRQ here and stop the HSM by
1023                  * changing hsm_task_state to HSM_ST_ERR and
1024                  * let the EH abort the command or reset the device.
1025                  */
1026                 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1027                         /* Some ATAPI tape drives forget to clear the ERR bit
1028                          * when doing the next command (mostly request sense).
1029                          * We ignore ERR here to workaround and proceed sending
1030                          * the CDB.
1031                          */
1032                         if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1033                                 ata_ehi_push_desc(ehi, "ST_FIRST: "
1034                                         "DRQ=1 with device error, "
1035                                         "dev_stat 0x%X", status);
1036                                 qc->err_mask |= AC_ERR_HSM;
1037                                 ap->hsm_task_state = HSM_ST_ERR;
1038                                 goto fsm_start;
1039                         }
1040                 }
1041 
1042                 if (qc->tf.protocol == ATA_PROT_PIO) {
1043                         /* PIO data out protocol.
1044                          * send first data block.
1045                          */
1046 
1047                         /* ata_pio_sectors() might change the state
1048                          * to HSM_ST_LAST. so, the state is changed here
1049                          * before ata_pio_sectors().
1050                          */
1051                         ap->hsm_task_state = HSM_ST;
1052                         ata_pio_sectors(qc);
1053                 } else
1054                         /* send CDB */
1055                         atapi_send_cdb(ap, qc);
1056 
1057                 /* if polling, ata_sff_pio_task() handles the rest.
1058                  * otherwise, interrupt handler takes over from here.
1059                  */
1060                 break;
1061 
1062         case HSM_ST:
1063                 /* complete command or read/write the data register */
1064                 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1065                         /* ATAPI PIO protocol */
1066                         if ((status & ATA_DRQ) == 0) {
1067                                 /* No more data to transfer or device error.
1068                                  * Device error will be tagged in HSM_ST_LAST.
1069                                  */
1070                                 ap->hsm_task_state = HSM_ST_LAST;
1071                                 goto fsm_start;
1072                         }
1073 
1074                         /* Device should not ask for data transfer (DRQ=1)
1075                          * when it finds something wrong.
1076                          * We ignore DRQ here and stop the HSM by
1077                          * changing hsm_task_state to HSM_ST_ERR and
1078                          * let the EH abort the command or reset the device.
1079                          */
1080                         if (unlikely(status & (ATA_ERR | ATA_DF))) {
1081                                 ata_ehi_push_desc(ehi, "ST-ATAPI: "
1082                                         "DRQ=1 with device error, "
1083                                         "dev_stat 0x%X", status);
1084                                 qc->err_mask |= AC_ERR_HSM;
1085                                 ap->hsm_task_state = HSM_ST_ERR;
1086                                 goto fsm_start;
1087                         }
1088 
1089                         atapi_pio_bytes(qc);
1090 
1091                         if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1092                                 /* bad ireason reported by device */
1093                                 goto fsm_start;
1094 
1095                 } else {
1096                         /* ATA PIO protocol */
1097                         if (unlikely((status & ATA_DRQ) == 0)) {
1098                                 /* handle BSY=0, DRQ=0 as error */
1099                                 if (likely(status & (ATA_ERR | ATA_DF))) {
1100                                         /* device stops HSM for abort/error */
1101                                         qc->err_mask |= AC_ERR_DEV;
1102 
1103                                         /* If diagnostic failed and this is
1104                                          * IDENTIFY, it's likely a phantom
1105                                          * device.  Mark hint.
1106                                          */
1107                                         if (qc->dev->horkage &
1108                                             ATA_HORKAGE_DIAGNOSTIC)
1109                                                 qc->err_mask |=
1110                                                         AC_ERR_NODEV_HINT;
1111                                 } else {
1112                                         /* HSM violation. Let EH handle this.
1113                                          * Phantom devices also trigger this
1114                                          * condition.  Mark hint.
1115                                          */
1116                                         ata_ehi_push_desc(ehi, "ST-ATA: "
1117                                                 "DRQ=0 without device error, "
1118                                                 "dev_stat 0x%X", status);
1119                                         qc->err_mask |= AC_ERR_HSM |
1120                                                         AC_ERR_NODEV_HINT;
1121                                 }
1122 
1123                                 ap->hsm_task_state = HSM_ST_ERR;
1124                                 goto fsm_start;
1125                         }
1126 
1127                         /* For PIO reads, some devices may ask for
1128                          * data transfer (DRQ=1) alone with ERR=1.
1129                          * We respect DRQ here and transfer one
1130                          * block of junk data before changing the
1131                          * hsm_task_state to HSM_ST_ERR.
1132                          *
1133                          * For PIO writes, ERR=1 DRQ=1 doesn't make
1134                          * sense since the data block has been
1135                          * transferred to the device.
1136                          */
1137                         if (unlikely(status & (ATA_ERR | ATA_DF))) {
1138                                 /* data might be corrputed */
1139                                 qc->err_mask |= AC_ERR_DEV;
1140 
1141                                 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1142                                         ata_pio_sectors(qc);
1143                                         status = ata_wait_idle(ap);
1144                                 }
1145 
1146                                 if (status & (ATA_BUSY | ATA_DRQ)) {
1147                                         ata_ehi_push_desc(ehi, "ST-ATA: "
1148                                                 "BUSY|DRQ persists on ERR|DF, "
1149                                                 "dev_stat 0x%X", status);
1150                                         qc->err_mask |= AC_ERR_HSM;
1151                                 }
1152 
1153                                 /* There are oddball controllers with
1154                                  * status register stuck at 0x7f and
1155                                  * lbal/m/h at zero which makes it
1156                                  * pass all other presence detection
1157                                  * mechanisms we have.  Set NODEV_HINT
1158                                  * for it.  Kernel bz#7241.
1159                                  */
1160                                 if (status == 0x7f)
1161                                         qc->err_mask |= AC_ERR_NODEV_HINT;
1162 
1163                                 /* ata_pio_sectors() might change the
1164                                  * state to HSM_ST_LAST. so, the state
1165                                  * is changed after ata_pio_sectors().
1166                                  */
1167                                 ap->hsm_task_state = HSM_ST_ERR;
1168                                 goto fsm_start;
1169                         }
1170 
1171                         ata_pio_sectors(qc);
1172 
1173                         if (ap->hsm_task_state == HSM_ST_LAST &&
1174                             (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1175                                 /* all data read */
1176                                 status = ata_wait_idle(ap);
1177                                 goto fsm_start;
1178                         }
1179                 }
1180 
1181                 poll_next = 1;
1182                 break;
1183 
1184         case HSM_ST_LAST:
1185                 if (unlikely(!ata_ok(status))) {
1186                         qc->err_mask |= __ac_err_mask(status);
1187                         ap->hsm_task_state = HSM_ST_ERR;
1188                         goto fsm_start;
1189                 }
1190 
1191                 /* no more data to transfer */
1192                 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1193                         ap->print_id, qc->dev->devno, status);
1194 
1195                 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1196 
1197                 ap->hsm_task_state = HSM_ST_IDLE;
1198 
1199                 /* complete taskfile transaction */
1200                 ata_hsm_qc_complete(qc, in_wq);
1201 
1202                 poll_next = 0;
1203                 break;
1204 
1205         case HSM_ST_ERR:
1206                 ap->hsm_task_state = HSM_ST_IDLE;
1207 
1208                 /* complete taskfile transaction */
1209                 ata_hsm_qc_complete(qc, in_wq);
1210 
1211                 poll_next = 0;
1212                 break;
1213         default:
1214                 poll_next = 0;
1215                 WARN(true, "ata%d: SFF host state machine in invalid state %d",
1216                      ap->print_id, ap->hsm_task_state);
1217         }
1218 
1219         return poll_next;
1220 }
1221 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1222 
1223 void ata_sff_queue_work(struct work_struct *work)
1224 {
1225         queue_work(ata_sff_wq, work);
1226 }
1227 EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1228 
1229 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1230 {
1231         queue_delayed_work(ata_sff_wq, dwork, delay);
1232 }
1233 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1234 
1235 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1236 {
1237         struct ata_port *ap = link->ap;
1238 
1239         WARN_ON((ap->sff_pio_task_link != NULL) &&
1240                 (ap->sff_pio_task_link != link));
1241         ap->sff_pio_task_link = link;
1242 
1243         /* may fail if ata_sff_flush_pio_task() in progress */
1244         ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1245 }
1246 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1247 
1248 void ata_sff_flush_pio_task(struct ata_port *ap)
1249 {
1250         DPRINTK("ENTER\n");
1251 
1252         cancel_delayed_work_sync(&ap->sff_pio_task);
1253 
1254         /*
1255          * We wanna reset the HSM state to IDLE.  If we do so without
1256          * grabbing the port lock, critical sections protected by it which
1257          * expect the HSM state to stay stable may get surprised.  For
1258          * example, we may set IDLE in between the time
1259          * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1260          * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1261          */
1262         spin_lock_irq(ap->lock);
1263         ap->hsm_task_state = HSM_ST_IDLE;
1264         spin_unlock_irq(ap->lock);
1265 
1266         ap->sff_pio_task_link = NULL;
1267 
1268         if (ata_msg_ctl(ap))
1269                 ata_port_dbg(ap, "%s: EXIT\n", __func__);
1270 }
1271 
1272 static void ata_sff_pio_task(struct work_struct *work)
1273 {
1274         struct ata_port *ap =
1275                 container_of(work, struct ata_port, sff_pio_task.work);
1276         struct ata_link *link = ap->sff_pio_task_link;
1277         struct ata_queued_cmd *qc;
1278         u8 status;
1279         int poll_next;
1280 
1281         spin_lock_irq(ap->lock);
1282 
1283         BUG_ON(ap->sff_pio_task_link == NULL);
1284         /* qc can be NULL if timeout occurred */
1285         qc = ata_qc_from_tag(ap, link->active_tag);
1286         if (!qc) {
1287                 ap->sff_pio_task_link = NULL;
1288                 goto out_unlock;
1289         }
1290 
1291 fsm_start:
1292         WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1293 
1294         /*
1295          * This is purely heuristic.  This is a fast path.
1296          * Sometimes when we enter, BSY will be cleared in
1297          * a chk-status or two.  If not, the drive is probably seeking
1298          * or something.  Snooze for a couple msecs, then
1299          * chk-status again.  If still busy, queue delayed work.
1300          */
1301         status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1302         if (status & ATA_BUSY) {
1303                 spin_unlock_irq(ap->lock);
1304                 ata_msleep(ap, 2);
1305                 spin_lock_irq(ap->lock);
1306 
1307                 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1308                 if (status & ATA_BUSY) {
1309                         ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1310                         goto out_unlock;
1311                 }
1312         }
1313 
1314         /*
1315          * hsm_move() may trigger another command to be processed.
1316          * clean the link beforehand.
1317          */
1318         ap->sff_pio_task_link = NULL;
1319         /* move the HSM */
1320         poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1321 
1322         /* another command or interrupt handler
1323          * may be running at this point.
1324          */
1325         if (poll_next)
1326                 goto fsm_start;
1327 out_unlock:
1328         spin_unlock_irq(ap->lock);
1329 }
1330 
1331 /**
1332  *      ata_sff_qc_issue - issue taskfile to a SFF controller
1333  *      @qc: command to issue to device
1334  *
1335  *      This function issues a PIO or NODATA command to a SFF
1336  *      controller.
1337  *
1338  *      LOCKING:
1339  *      spin_lock_irqsave(host lock)
1340  *
1341  *      RETURNS:
1342  *      Zero on success, AC_ERR_* mask on failure
1343  */
1344 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1345 {
1346         struct ata_port *ap = qc->ap;
1347         struct ata_link *link = qc->dev->link;
1348 
1349         /* Use polling pio if the LLD doesn't handle
1350          * interrupt driven pio and atapi CDB interrupt.
1351          */
1352         if (ap->flags & ATA_FLAG_PIO_POLLING)
1353                 qc->tf.flags |= ATA_TFLAG_POLLING;
1354 
1355         /* select the device */
1356         ata_dev_select(ap, qc->dev->devno, 1, 0);
1357 
1358         /* start the command */
1359         switch (qc->tf.protocol) {
1360         case ATA_PROT_NODATA:
1361                 if (qc->tf.flags & ATA_TFLAG_POLLING)
1362                         ata_qc_set_polling(qc);
1363 
1364                 ata_tf_to_host(ap, &qc->tf);
1365                 ap->hsm_task_state = HSM_ST_LAST;
1366 
1367                 if (qc->tf.flags & ATA_TFLAG_POLLING)
1368                         ata_sff_queue_pio_task(link, 0);
1369 
1370                 break;
1371 
1372         case ATA_PROT_PIO:
1373                 if (qc->tf.flags & ATA_TFLAG_POLLING)
1374                         ata_qc_set_polling(qc);
1375 
1376                 ata_tf_to_host(ap, &qc->tf);
1377 
1378                 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1379                         /* PIO data out protocol */
1380                         ap->hsm_task_state = HSM_ST_FIRST;
1381                         ata_sff_queue_pio_task(link, 0);
1382 
1383                         /* always send first data block using the
1384                          * ata_sff_pio_task() codepath.
1385                          */
1386                 } else {
1387                         /* PIO data in protocol */
1388                         ap->hsm_task_state = HSM_ST;
1389 
1390                         if (qc->tf.flags & ATA_TFLAG_POLLING)
1391                                 ata_sff_queue_pio_task(link, 0);
1392 
1393                         /* if polling, ata_sff_pio_task() handles the
1394                          * rest.  otherwise, interrupt handler takes
1395                          * over from here.
1396                          */
1397                 }
1398 
1399                 break;
1400 
1401         case ATAPI_PROT_PIO:
1402         case ATAPI_PROT_NODATA:
1403                 if (qc->tf.flags & ATA_TFLAG_POLLING)
1404                         ata_qc_set_polling(qc);
1405 
1406                 ata_tf_to_host(ap, &qc->tf);
1407 
1408                 ap->hsm_task_state = HSM_ST_FIRST;
1409 
1410                 /* send cdb by polling if no cdb interrupt */
1411                 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1412                     (qc->tf.flags & ATA_TFLAG_POLLING))
1413                         ata_sff_queue_pio_task(link, 0);
1414                 break;
1415 
1416         default:
1417                 return AC_ERR_SYSTEM;
1418         }
1419 
1420         return 0;
1421 }
1422 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1423 
1424 /**
1425  *      ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1426  *      @qc: qc to fill result TF for
1427  *
1428  *      @qc is finished and result TF needs to be filled.  Fill it
1429  *      using ->sff_tf_read.
1430  *
1431  *      LOCKING:
1432  *      spin_lock_irqsave(host lock)
1433  *
1434  *      RETURNS:
1435  *      true indicating that result TF is successfully filled.
1436  */
1437 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1438 {
1439         qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1440         return true;
1441 }
1442 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1443 
1444 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1445 {
1446         ap->stats.idle_irq++;
1447 
1448 #ifdef ATA_IRQ_TRAP
1449         if ((ap->stats.idle_irq % 1000) == 0) {
1450                 ap->ops->sff_check_status(ap);
1451                 if (ap->ops->sff_irq_clear)
1452                         ap->ops->sff_irq_clear(ap);
1453                 ata_port_warn(ap, "irq trap\n");
1454                 return 1;
1455         }
1456 #endif
1457         return 0;       /* irq not handled */
1458 }
1459 
1460 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1461                                         struct ata_queued_cmd *qc,
1462                                         bool hsmv_on_idle)
1463 {
1464         u8 status;
1465 
1466         VPRINTK("ata%u: protocol %d task_state %d\n",
1467                 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1468 
1469         /* Check whether we are expecting interrupt in this state */
1470         switch (ap->hsm_task_state) {
1471         case HSM_ST_FIRST:
1472                 /* Some pre-ATAPI-4 devices assert INTRQ
1473                  * at this state when ready to receive CDB.
1474                  */
1475 
1476                 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1477                  * The flag was turned on only for atapi devices.  No
1478                  * need to check ata_is_atapi(qc->tf.protocol) again.
1479                  */
1480                 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1481                         return ata_sff_idle_irq(ap);
1482                 break;
1483         case HSM_ST_IDLE:
1484                 return ata_sff_idle_irq(ap);
1485         default:
1486                 break;
1487         }
1488 
1489         /* check main status, clearing INTRQ if needed */
1490         status = ata_sff_irq_status(ap);
1491         if (status & ATA_BUSY) {
1492                 if (hsmv_on_idle) {
1493                         /* BMDMA engine is already stopped, we're screwed */
1494                         qc->err_mask |= AC_ERR_HSM;
1495                         ap->hsm_task_state = HSM_ST_ERR;
1496                 } else
1497                         return ata_sff_idle_irq(ap);
1498         }
1499 
1500         /* clear irq events */
1501         if (ap->ops->sff_irq_clear)
1502                 ap->ops->sff_irq_clear(ap);
1503 
1504         ata_sff_hsm_move(ap, qc, status, 0);
1505 
1506         return 1;       /* irq handled */
1507 }
1508 
1509 /**
1510  *      ata_sff_port_intr - Handle SFF port interrupt
1511  *      @ap: Port on which interrupt arrived (possibly...)
1512  *      @qc: Taskfile currently active in engine
1513  *
1514  *      Handle port interrupt for given queued command.
1515  *
1516  *      LOCKING:
1517  *      spin_lock_irqsave(host lock)
1518  *
1519  *      RETURNS:
1520  *      One if interrupt was handled, zero if not (shared irq).
1521  */
1522 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1523 {
1524         return __ata_sff_port_intr(ap, qc, false);
1525 }
1526 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1527 
1528 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1529         unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1530 {
1531         struct ata_host *host = dev_instance;
1532         bool retried = false;
1533         unsigned int i;
1534         unsigned int handled, idle, polling;
1535         unsigned long flags;
1536 
1537         /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1538         spin_lock_irqsave(&host->lock, flags);
1539 
1540 retry:
1541         handled = idle = polling = 0;
1542         for (i = 0; i < host->n_ports; i++) {
1543                 struct ata_port *ap = host->ports[i];
1544                 struct ata_queued_cmd *qc;
1545 
1546                 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1547                 if (qc) {
1548                         if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1549                                 handled |= port_intr(ap, qc);
1550                         else
1551                                 polling |= 1 << i;
1552                 } else
1553                         idle |= 1 << i;
1554         }
1555 
1556         /*
1557          * If no port was expecting IRQ but the controller is actually
1558          * asserting IRQ line, nobody cared will ensue.  Check IRQ
1559          * pending status if available and clear spurious IRQ.
1560          */
1561         if (!handled && !retried) {
1562                 bool retry = false;
1563 
1564                 for (i = 0; i < host->n_ports; i++) {
1565                         struct ata_port *ap = host->ports[i];
1566 
1567                         if (polling & (1 << i))
1568                                 continue;
1569 
1570                         if (!ap->ops->sff_irq_check ||
1571                             !ap->ops->sff_irq_check(ap))
1572                                 continue;
1573 
1574                         if (idle & (1 << i)) {
1575                                 ap->ops->sff_check_status(ap);
1576                                 if (ap->ops->sff_irq_clear)
1577                                         ap->ops->sff_irq_clear(ap);
1578                         } else {
1579                                 /* clear INTRQ and check if BUSY cleared */
1580                                 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1581                                         retry |= true;
1582                                 /*
1583                                  * With command in flight, we can't do
1584                                  * sff_irq_clear() w/o racing with completion.
1585                                  */
1586                         }
1587                 }
1588 
1589                 if (retry) {
1590                         retried = true;
1591                         goto retry;
1592                 }
1593         }
1594 
1595         spin_unlock_irqrestore(&host->lock, flags);
1596 
1597         return IRQ_RETVAL(handled);
1598 }
1599 
1600 /**
1601  *      ata_sff_interrupt - Default SFF ATA host interrupt handler
1602  *      @irq: irq line (unused)
1603  *      @dev_instance: pointer to our ata_host information structure
1604  *
1605  *      Default interrupt handler for PCI IDE devices.  Calls
1606  *      ata_sff_port_intr() for each port that is not disabled.
1607  *
1608  *      LOCKING:
1609  *      Obtains host lock during operation.
1610  *
1611  *      RETURNS:
1612  *      IRQ_NONE or IRQ_HANDLED.
1613  */
1614 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1615 {
1616         return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1617 }
1618 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1619 
1620 /**
1621  *      ata_sff_lost_interrupt  -       Check for an apparent lost interrupt
1622  *      @ap: port that appears to have timed out
1623  *
1624  *      Called from the libata error handlers when the core code suspects
1625  *      an interrupt has been lost. If it has complete anything we can and
1626  *      then return. Interface must support altstatus for this faster
1627  *      recovery to occur.
1628  *
1629  *      Locking:
1630  *      Caller holds host lock
1631  */
1632 
1633 void ata_sff_lost_interrupt(struct ata_port *ap)
1634 {
1635         u8 status;
1636         struct ata_queued_cmd *qc;
1637 
1638         /* Only one outstanding command per SFF channel */
1639         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1640         /* We cannot lose an interrupt on a non-existent or polled command */
1641         if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1642                 return;
1643         /* See if the controller thinks it is still busy - if so the command
1644            isn't a lost IRQ but is still in progress */
1645         status = ata_sff_altstatus(ap);
1646         if (status & ATA_BUSY)
1647                 return;
1648 
1649         /* There was a command running, we are no longer busy and we have
1650            no interrupt. */
1651         ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
1652                                                                 status);
1653         /* Run the host interrupt logic as if the interrupt had not been
1654            lost */
1655         ata_sff_port_intr(ap, qc);
1656 }
1657 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1658 
1659 /**
1660  *      ata_sff_freeze - Freeze SFF controller port
1661  *      @ap: port to freeze
1662  *
1663  *      Freeze SFF controller port.
1664  *
1665  *      LOCKING:
1666  *      Inherited from caller.
1667  */
1668 void ata_sff_freeze(struct ata_port *ap)
1669 {
1670         ap->ctl |= ATA_NIEN;
1671         ap->last_ctl = ap->ctl;
1672 
1673         if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1674                 ata_sff_set_devctl(ap, ap->ctl);
1675 
1676         /* Under certain circumstances, some controllers raise IRQ on
1677          * ATA_NIEN manipulation.  Also, many controllers fail to mask
1678          * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1679          */
1680         ap->ops->sff_check_status(ap);
1681 
1682         if (ap->ops->sff_irq_clear)
1683                 ap->ops->sff_irq_clear(ap);
1684 }
1685 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1686 
1687 /**
1688  *      ata_sff_thaw - Thaw SFF controller port
1689  *      @ap: port to thaw
1690  *
1691  *      Thaw SFF controller port.
1692  *
1693  *      LOCKING:
1694  *      Inherited from caller.
1695  */
1696 void ata_sff_thaw(struct ata_port *ap)
1697 {
1698         /* clear & re-enable interrupts */
1699         ap->ops->sff_check_status(ap);
1700         if (ap->ops->sff_irq_clear)
1701                 ap->ops->sff_irq_clear(ap);
1702         ata_sff_irq_on(ap);
1703 }
1704 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1705 
1706 /**
1707  *      ata_sff_prereset - prepare SFF link for reset
1708  *      @link: SFF link to be reset
1709  *      @deadline: deadline jiffies for the operation
1710  *
1711  *      SFF link @link is about to be reset.  Initialize it.  It first
1712  *      calls ata_std_prereset() and wait for !BSY if the port is
1713  *      being softreset.
1714  *
1715  *      LOCKING:
1716  *      Kernel thread context (may sleep)
1717  *
1718  *      RETURNS:
1719  *      0 on success, -errno otherwise.
1720  */
1721 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1722 {
1723         struct ata_eh_context *ehc = &link->eh_context;
1724         int rc;
1725 
1726         rc = ata_std_prereset(link, deadline);
1727         if (rc)
1728                 return rc;
1729 
1730         /* if we're about to do hardreset, nothing more to do */
1731         if (ehc->i.action & ATA_EH_HARDRESET)
1732                 return 0;
1733 
1734         /* wait for !BSY if we don't know that no device is attached */
1735         if (!ata_link_offline(link)) {
1736                 rc = ata_sff_wait_ready(link, deadline);
1737                 if (rc && rc != -ENODEV) {
1738                         ata_link_warn(link,
1739                                       "device not ready (errno=%d), forcing hardreset\n",
1740                                       rc);
1741                         ehc->i.action |= ATA_EH_HARDRESET;
1742                 }
1743         }
1744 
1745         return 0;
1746 }
1747 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1748 
1749 /**
1750  *      ata_devchk - PATA device presence detection
1751  *      @ap: ATA channel to examine
1752  *      @device: Device to examine (starting at zero)
1753  *
1754  *      This technique was originally described in
1755  *      Hale Landis's ATADRVR (www.ata-atapi.com), and
1756  *      later found its way into the ATA/ATAPI spec.
1757  *
1758  *      Write a pattern to the ATA shadow registers,
1759  *      and if a device is present, it will respond by
1760  *      correctly storing and echoing back the
1761  *      ATA shadow register contents.
1762  *
1763  *      LOCKING:
1764  *      caller.
1765  */
1766 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1767 {
1768         struct ata_ioports *ioaddr = &ap->ioaddr;
1769         u8 nsect, lbal;
1770 
1771         ap->ops->sff_dev_select(ap, device);
1772 
1773         iowrite8(0x55, ioaddr->nsect_addr);
1774         iowrite8(0xaa, ioaddr->lbal_addr);
1775 
1776         iowrite8(0xaa, ioaddr->nsect_addr);
1777         iowrite8(0x55, ioaddr->lbal_addr);
1778 
1779         iowrite8(0x55, ioaddr->nsect_addr);
1780         iowrite8(0xaa, ioaddr->lbal_addr);
1781 
1782         nsect = ioread8(ioaddr->nsect_addr);
1783         lbal = ioread8(ioaddr->lbal_addr);
1784 
1785         if ((nsect == 0x55) && (lbal == 0xaa))
1786                 return 1;       /* we found a device */
1787 
1788         return 0;               /* nothing found */
1789 }
1790 
1791 /**
1792  *      ata_sff_dev_classify - Parse returned ATA device signature
1793  *      @dev: ATA device to classify (starting at zero)
1794  *      @present: device seems present
1795  *      @r_err: Value of error register on completion
1796  *
1797  *      After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1798  *      an ATA/ATAPI-defined set of values is placed in the ATA
1799  *      shadow registers, indicating the results of device detection
1800  *      and diagnostics.
1801  *
1802  *      Select the ATA device, and read the values from the ATA shadow
1803  *      registers.  Then parse according to the Error register value,
1804  *      and the spec-defined values examined by ata_dev_classify().
1805  *
1806  *      LOCKING:
1807  *      caller.
1808  *
1809  *      RETURNS:
1810  *      Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1811  */
1812 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1813                                   u8 *r_err)
1814 {
1815         struct ata_port *ap = dev->link->ap;
1816         struct ata_taskfile tf;
1817         unsigned int class;
1818         u8 err;
1819 
1820         ap->ops->sff_dev_select(ap, dev->devno);
1821 
1822         memset(&tf, 0, sizeof(tf));
1823 
1824         ap->ops->sff_tf_read(ap, &tf);
1825         err = tf.feature;
1826         if (r_err)
1827                 *r_err = err;
1828 
1829         /* see if device passed diags: continue and warn later */
1830         if (err == 0)
1831                 /* diagnostic fail : do nothing _YET_ */
1832                 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1833         else if (err == 1)
1834                 /* do nothing */ ;
1835         else if ((dev->devno == 0) && (err == 0x81))
1836                 /* do nothing */ ;
1837         else
1838                 return ATA_DEV_NONE;
1839 
1840         /* determine if device is ATA or ATAPI */
1841         class = ata_dev_classify(&tf);
1842 
1843         if (class == ATA_DEV_UNKNOWN) {
1844                 /* If the device failed diagnostic, it's likely to
1845                  * have reported incorrect device signature too.
1846                  * Assume ATA device if the device seems present but
1847                  * device signature is invalid with diagnostic
1848                  * failure.
1849                  */
1850                 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1851                         class = ATA_DEV_ATA;
1852                 else
1853                         class = ATA_DEV_NONE;
1854         } else if ((class == ATA_DEV_ATA) &&
1855                    (ap->ops->sff_check_status(ap) == 0))
1856                 class = ATA_DEV_NONE;
1857 
1858         return class;
1859 }
1860 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1861 
1862 /**
1863  *      ata_sff_wait_after_reset - wait for devices to become ready after reset
1864  *      @link: SFF link which is just reset
1865  *      @devmask: mask of present devices
1866  *      @deadline: deadline jiffies for the operation
1867  *
1868  *      Wait devices attached to SFF @link to become ready after
1869  *      reset.  It contains preceding 150ms wait to avoid accessing TF
1870  *      status register too early.
1871  *
1872  *      LOCKING:
1873  *      Kernel thread context (may sleep).
1874  *
1875  *      RETURNS:
1876  *      0 on success, -ENODEV if some or all of devices in @devmask
1877  *      don't seem to exist.  -errno on other errors.
1878  */
1879 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1880                              unsigned long deadline)
1881 {
1882         struct ata_port *ap = link->ap;
1883         struct ata_ioports *ioaddr = &ap->ioaddr;
1884         unsigned int dev0 = devmask & (1 << 0);
1885         unsigned int dev1 = devmask & (1 << 1);
1886         int rc, ret = 0;
1887 
1888         ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1889 
1890         /* always check readiness of the master device */
1891         rc = ata_sff_wait_ready(link, deadline);
1892         /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1893          * and TF status is 0xff, bail out on it too.
1894          */
1895         if (rc)
1896                 return rc;
1897 
1898         /* if device 1 was found in ata_devchk, wait for register
1899          * access briefly, then wait for BSY to clear.
1900          */
1901         if (dev1) {
1902                 int i;
1903 
1904                 ap->ops->sff_dev_select(ap, 1);
1905 
1906                 /* Wait for register access.  Some ATAPI devices fail
1907                  * to set nsect/lbal after reset, so don't waste too
1908                  * much time on it.  We're gonna wait for !BSY anyway.
1909                  */
1910                 for (i = 0; i < 2; i++) {
1911                         u8 nsect, lbal;
1912 
1913                         nsect = ioread8(ioaddr->nsect_addr);
1914                         lbal = ioread8(ioaddr->lbal_addr);
1915                         if ((nsect == 1) && (lbal == 1))
1916                                 break;
1917                         ata_msleep(ap, 50);     /* give drive a breather */
1918                 }
1919 
1920                 rc = ata_sff_wait_ready(link, deadline);
1921                 if (rc) {
1922                         if (rc != -ENODEV)
1923                                 return rc;
1924                         ret = rc;
1925                 }
1926         }
1927 
1928         /* is all this really necessary? */
1929         ap->ops->sff_dev_select(ap, 0);
1930         if (dev1)
1931                 ap->ops->sff_dev_select(ap, 1);
1932         if (dev0)
1933                 ap->ops->sff_dev_select(ap, 0);
1934 
1935         return ret;
1936 }
1937 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1938 
1939 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1940                              unsigned long deadline)
1941 {
1942         struct ata_ioports *ioaddr = &ap->ioaddr;
1943 
1944         DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1945 
1946         if (ap->ioaddr.ctl_addr) {
1947                 /* software reset.  causes dev0 to be selected */
1948                 iowrite8(ap->ctl, ioaddr->ctl_addr);
1949                 udelay(20);     /* FIXME: flush */
1950                 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1951                 udelay(20);     /* FIXME: flush */
1952                 iowrite8(ap->ctl, ioaddr->ctl_addr);
1953                 ap->last_ctl = ap->ctl;
1954         }
1955 
1956         /* wait the port to become ready */
1957         return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1958 }
1959 
1960 /**
1961  *      ata_sff_softreset - reset host port via ATA SRST
1962  *      @link: ATA link to reset
1963  *      @classes: resulting classes of attached devices
1964  *      @deadline: deadline jiffies for the operation
1965  *
1966  *      Reset host port using ATA SRST.
1967  *
1968  *      LOCKING:
1969  *      Kernel thread context (may sleep)
1970  *
1971  *      RETURNS:
1972  *      0 on success, -errno otherwise.
1973  */
1974 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1975                       unsigned long deadline)
1976 {
1977         struct ata_port *ap = link->ap;
1978         unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1979         unsigned int devmask = 0;
1980         int rc;
1981         u8 err;
1982 
1983         DPRINTK("ENTER\n");
1984 
1985         /* determine if device 0/1 are present */
1986         if (ata_devchk(ap, 0))
1987                 devmask |= (1 << 0);
1988         if (slave_possible && ata_devchk(ap, 1))
1989                 devmask |= (1 << 1);
1990 
1991         /* select device 0 again */
1992         ap->ops->sff_dev_select(ap, 0);
1993 
1994         /* issue bus reset */
1995         DPRINTK("about to softreset, devmask=%x\n", devmask);
1996         rc = ata_bus_softreset(ap, devmask, deadline);
1997         /* if link is occupied, -ENODEV too is an error */
1998         if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
1999                 ata_link_err(link, "SRST failed (errno=%d)\n", rc);
2000                 return rc;
2001         }
2002 
2003         /* determine by signature whether we have ATA or ATAPI devices */
2004         classes[0] = ata_sff_dev_classify(&link->device[0],
2005                                           devmask & (1 << 0), &err);
2006         if (slave_possible && err != 0x81)
2007                 classes[1] = ata_sff_dev_classify(&link->device[1],
2008                                                   devmask & (1 << 1), &err);
2009 
2010         DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2011         return 0;
2012 }
2013 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2014 
2015 /**
2016  *      sata_sff_hardreset - reset host port via SATA phy reset
2017  *      @link: link to reset
2018  *      @class: resulting class of attached device
2019  *      @deadline: deadline jiffies for the operation
2020  *
2021  *      SATA phy-reset host port using DET bits of SControl register,
2022  *      wait for !BSY and classify the attached device.
2023  *
2024  *      LOCKING:
2025  *      Kernel thread context (may sleep)
2026  *
2027  *      RETURNS:
2028  *      0 on success, -errno otherwise.
2029  */
2030 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2031                        unsigned long deadline)
2032 {
2033         struct ata_eh_context *ehc = &link->eh_context;
2034         const unsigned long *timing = sata_ehc_deb_timing(ehc);
2035         bool online;
2036         int rc;
2037 
2038         rc = sata_link_hardreset(link, timing, deadline, &online,
2039                                  ata_sff_check_ready);
2040         if (online)
2041                 *class = ata_sff_dev_classify(link->device, 1, NULL);
2042 
2043         DPRINTK("EXIT, class=%u\n", *class);
2044         return rc;
2045 }
2046 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2047 
2048 /**
2049  *      ata_sff_postreset - SFF postreset callback
2050  *      @link: the target SFF ata_link
2051  *      @classes: classes of attached devices
2052  *
2053  *      This function is invoked after a successful reset.  It first
2054  *      calls ata_std_postreset() and performs SFF specific postreset
2055  *      processing.
2056  *
2057  *      LOCKING:
2058  *      Kernel thread context (may sleep)
2059  */
2060 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2061 {
2062         struct ata_port *ap = link->ap;
2063 
2064         ata_std_postreset(link, classes);
2065 
2066         /* is double-select really necessary? */
2067         if (classes[0] != ATA_DEV_NONE)
2068                 ap->ops->sff_dev_select(ap, 1);
2069         if (classes[1] != ATA_DEV_NONE)
2070                 ap->ops->sff_dev_select(ap, 0);
2071 
2072         /* bail out if no device is present */
2073         if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2074                 DPRINTK("EXIT, no device\n");
2075                 return;
2076         }
2077 
2078         /* set up device control */
2079         if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2080                 ata_sff_set_devctl(ap, ap->ctl);
2081                 ap->last_ctl = ap->ctl;
2082         }
2083 }
2084 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2085 
2086 /**
2087  *      ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2088  *      @qc: command
2089  *
2090  *      Drain the FIFO and device of any stuck data following a command
2091  *      failing to complete. In some cases this is necessary before a
2092  *      reset will recover the device.
2093  *
2094  */
2095 
2096 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2097 {
2098         int count;
2099         struct ata_port *ap;
2100 
2101         /* We only need to flush incoming data when a command was running */
2102         if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2103                 return;
2104 
2105         ap = qc->ap;
2106         /* Drain up to 64K of data before we give up this recovery method */
2107         for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2108                                                 && count < 65536; count += 2)
2109                 ioread16(ap->ioaddr.data_addr);
2110 
2111         /* Can become DEBUG later */
2112         if (count)
2113                 ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2114 
2115 }
2116 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2117 
2118 /**
2119  *      ata_sff_error_handler - Stock error handler for SFF controller
2120  *      @ap: port to handle error for
2121  *
2122  *      Stock error handler for SFF controller.  It can handle both
2123  *      PATA and SATA controllers.  Many controllers should be able to
2124  *      use this EH as-is or with some added handling before and
2125  *      after.
2126  *
2127  *      LOCKING:
2128  *      Kernel thread context (may sleep)
2129  */
2130 void ata_sff_error_handler(struct ata_port *ap)
2131 {
2132         ata_reset_fn_t softreset = ap->ops->softreset;
2133         ata_reset_fn_t hardreset = ap->ops->hardreset;
2134         struct ata_queued_cmd *qc;
2135         unsigned long flags;
2136 
2137         qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2138         if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2139                 qc = NULL;
2140 
2141         spin_lock_irqsave(ap->lock, flags);
2142 
2143         /*
2144          * We *MUST* do FIFO draining before we issue a reset as
2145          * several devices helpfully clear their internal state and
2146          * will lock solid if we touch the data port post reset. Pass
2147          * qc in case anyone wants to do different PIO/DMA recovery or
2148          * has per command fixups
2149          */
2150         if (ap->ops->sff_drain_fifo)
2151                 ap->ops->sff_drain_fifo(qc);
2152 
2153         spin_unlock_irqrestore(ap->lock, flags);
2154 
2155         /* ignore built-in hardresets if SCR access is not available */
2156         if ((hardreset == sata_std_hardreset ||
2157              hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2158                 hardreset = NULL;
2159 
2160         ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2161                   ap->ops->postreset);
2162 }
2163 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2164 
2165 /**
2166  *      ata_sff_std_ports - initialize ioaddr with standard port offsets.
2167  *      @ioaddr: IO address structure to be initialized
2168  *
2169  *      Utility function which initializes data_addr, error_addr,
2170  *      feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2171  *      device_addr, status_addr, and command_addr to standard offsets
2172  *      relative to cmd_addr.
2173  *
2174  *      Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2175  */
2176 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2177 {
2178         ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2179         ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2180         ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2181         ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2182         ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2183         ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2184         ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2185         ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2186         ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2187         ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2188 }
2189 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2190 
2191 #ifdef CONFIG_PCI
2192 
2193 static int ata_resources_present(struct pci_dev *pdev, int port)
2194 {
2195         int i;
2196 
2197         /* Check the PCI resources for this channel are enabled */
2198         port = port * 2;
2199         for (i = 0; i < 2; i++) {
2200                 if (pci_resource_start(pdev, port + i) == 0 ||
2201                     pci_resource_len(pdev, port + i) == 0)
2202                         return 0;
2203         }
2204         return 1;
2205 }
2206 
2207 /**
2208  *      ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2209  *      @host: target ATA host
2210  *
2211  *      Acquire native PCI ATA resources for @host and initialize the
2212  *      first two ports of @host accordingly.  Ports marked dummy are
2213  *      skipped and allocation failure makes the port dummy.
2214  *
2215  *      Note that native PCI resources are valid even for legacy hosts
2216  *      as we fix up pdev resources array early in boot, so this
2217  *      function can be used for both native and legacy SFF hosts.
2218  *
2219  *      LOCKING:
2220  *      Inherited from calling layer (may sleep).
2221  *
2222  *      RETURNS:
2223  *      0 if at least one port is initialized, -ENODEV if no port is
2224  *      available.
2225  */
2226 int ata_pci_sff_init_host(struct ata_host *host)
2227 {
2228         struct device *gdev = host->dev;
2229         struct pci_dev *pdev = to_pci_dev(gdev);
2230         unsigned int mask = 0;
2231         int i, rc;
2232 
2233         /* request, iomap BARs and init port addresses accordingly */
2234         for (i = 0; i < 2; i++) {
2235                 struct ata_port *ap = host->ports[i];
2236                 int base = i * 2;
2237                 void __iomem * const *iomap;
2238 
2239                 if (ata_port_is_dummy(ap))
2240                         continue;
2241 
2242                 /* Discard disabled ports.  Some controllers show
2243                  * their unused channels this way.  Disabled ports are
2244                  * made dummy.
2245                  */
2246                 if (!ata_resources_present(pdev, i)) {
2247                         ap->ops = &ata_dummy_port_ops;
2248                         continue;
2249                 }
2250 
2251                 rc = pcim_iomap_regions(pdev, 0x3 << base,
2252                                         dev_driver_string(gdev));
2253                 if (rc) {
2254                         dev_warn(gdev,
2255                                  "failed to request/iomap BARs for port %d (errno=%d)\n",
2256                                  i, rc);
2257                         if (rc == -EBUSY)
2258                                 pcim_pin_device(pdev);
2259                         ap->ops = &ata_dummy_port_ops;
2260                         continue;
2261                 }
2262                 host->iomap = iomap = pcim_iomap_table(pdev);
2263 
2264                 ap->ioaddr.cmd_addr = iomap[base];
2265                 ap->ioaddr.altstatus_addr =
2266                 ap->ioaddr.ctl_addr = (void __iomem *)
2267                         ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2268                 ata_sff_std_ports(&ap->ioaddr);
2269 
2270                 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2271                         (unsigned long long)pci_resource_start(pdev, base),
2272                         (unsigned long long)pci_resource_start(pdev, base + 1));
2273 
2274                 mask |= 1 << i;
2275         }
2276 
2277         if (!mask) {
2278                 dev_err(gdev, "no available native port\n");
2279                 return -ENODEV;
2280         }
2281 
2282         return 0;
2283 }
2284 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2285 
2286 /**
2287  *      ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2288  *      @pdev: target PCI device
2289  *      @ppi: array of port_info, must be enough for two ports
2290  *      @r_host: out argument for the initialized ATA host
2291  *
2292  *      Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2293  *      all PCI resources and initialize it accordingly in one go.
2294  *
2295  *      LOCKING:
2296  *      Inherited from calling layer (may sleep).
2297  *
2298  *      RETURNS:
2299  *      0 on success, -errno otherwise.
2300  */
2301 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2302                              const struct ata_port_info * const *ppi,
2303                              struct ata_host **r_host)
2304 {
2305         struct ata_host *host;
2306         int rc;
2307 
2308         if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2309                 return -ENOMEM;
2310 
2311         host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2312         if (!host) {
2313                 dev_err(&pdev->dev, "failed to allocate ATA host\n");
2314                 rc = -ENOMEM;
2315                 goto err_out;
2316         }
2317 
2318         rc = ata_pci_sff_init_host(host);
2319         if (rc)
2320                 goto err_out;
2321 
2322         devres_remove_group(&pdev->dev, NULL);
2323         *r_host = host;
2324         return 0;
2325 
2326 err_out:
2327         devres_release_group(&pdev->dev, NULL);
2328         return rc;
2329 }
2330 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2331 
2332 /**
2333  *      ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2334  *      @host: target SFF ATA host
2335  *      @irq_handler: irq_handler used when requesting IRQ(s)
2336  *      @sht: scsi_host_template to use when registering the host
2337  *
2338  *      This is the counterpart of ata_host_activate() for SFF ATA
2339  *      hosts.  This separate helper is necessary because SFF hosts
2340  *      use two separate interrupts in legacy mode.
2341  *
2342  *      LOCKING:
2343  *      Inherited from calling layer (may sleep).
2344  *
2345  *      RETURNS:
2346  *      0 on success, -errno otherwise.
2347  */
2348 int ata_pci_sff_activate_host(struct ata_host *host,
2349                               irq_handler_t irq_handler,
2350                               struct scsi_host_template *sht)
2351 {
2352         struct device *dev = host->dev;
2353         struct pci_dev *pdev = to_pci_dev(dev);
2354         const char *drv_name = dev_driver_string(host->dev);
2355         int legacy_mode = 0, rc;
2356 
2357         rc = ata_host_start(host);
2358         if (rc)
2359                 return rc;
2360 
2361         if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2362                 u8 tmp8, mask = 0;
2363 
2364                 /*
2365                  * ATA spec says we should use legacy mode when one
2366                  * port is in legacy mode, but disabled ports on some
2367                  * PCI hosts appear as fixed legacy ports, e.g SB600/700
2368                  * on which the secondary port is not wired, so
2369                  * ignore ports that are marked as 'dummy' during
2370                  * this check
2371                  */
2372                 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2373                 if (!ata_port_is_dummy(host->ports[0]))
2374                         mask |= (1 << 0);
2375                 if (!ata_port_is_dummy(host->ports[1]))
2376                         mask |= (1 << 2);
2377                 if ((tmp8 & mask) != mask)
2378                         legacy_mode = 1;
2379         }
2380 
2381         if (!devres_open_group(dev, NULL, GFP_KERNEL))
2382                 return -ENOMEM;
2383 
2384         if (!legacy_mode && pdev->irq) {
2385                 int i;
2386 
2387                 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2388                                       IRQF_SHARED, drv_name, host);
2389                 if (rc)
2390                         goto out;
2391 
2392                 for (i = 0; i < 2; i++) {
2393                         if (ata_port_is_dummy(host->ports[i]))
2394                                 continue;
2395                         ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2396                 }
2397         } else if (legacy_mode) {
2398                 if (!ata_port_is_dummy(host->ports[0])) {
2399                         rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2400                                               irq_handler, IRQF_SHARED,
2401                                               drv_name, host);
2402                         if (rc)
2403                                 goto out;
2404 
2405                         ata_port_desc(host->ports[0], "irq %d",
2406                                       ATA_PRIMARY_IRQ(pdev));
2407                 }
2408 
2409                 if (!ata_port_is_dummy(host->ports[1])) {
2410                         rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2411                                               irq_handler, IRQF_SHARED,
2412                                               drv_name, host);
2413                         if (rc)
2414                                 goto out;
2415 
2416                         ata_port_desc(host->ports[1], "irq %d",
2417                                       ATA_SECONDARY_IRQ(pdev));
2418                 }
2419         }
2420 
2421         rc = ata_host_register(host, sht);
2422 out:
2423         if (rc == 0)
2424                 devres_remove_group(dev, NULL);
2425         else
2426                 devres_release_group(dev, NULL);
2427 
2428         return rc;
2429 }
2430 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2431 
2432 static const struct ata_port_info *ata_sff_find_valid_pi(
2433                                         const struct ata_port_info * const *ppi)
2434 {
2435         int i;
2436 
2437         /* look up the first valid port_info */
2438         for (i = 0; i < 2 && ppi[i]; i++)
2439                 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2440                         return ppi[i];
2441 
2442         return NULL;
2443 }
2444 
2445 static int ata_pci_init_one(struct pci_dev *pdev,
2446                 const struct ata_port_info * const *ppi,
2447                 struct scsi_host_template *sht, void *host_priv,
2448                 int hflags, bool bmdma)
2449 {
2450         struct device *dev = &pdev->dev;
2451         const struct ata_port_info *pi;
2452         struct ata_host *host = NULL;
2453         int rc;
2454 
2455         DPRINTK("ENTER\n");
2456 
2457         pi = ata_sff_find_valid_pi(ppi);
2458         if (!pi) {
2459                 dev_err(&pdev->dev, "no valid port_info specified\n");
2460                 return -EINVAL;
2461         }
2462 
2463         if (!devres_open_group(dev, NULL, GFP_KERNEL))
2464                 return -ENOMEM;
2465 
2466         rc = pcim_enable_device(pdev);
2467         if (rc)
2468                 goto out;
2469 
2470 #ifdef CONFIG_ATA_BMDMA
2471         if (bmdma)
2472                 /* prepare and activate BMDMA host */
2473                 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2474         else
2475 #endif
2476                 /* prepare and activate SFF host */
2477                 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2478         if (rc)
2479                 goto out;
2480         host->private_data = host_priv;
2481         host->flags |= hflags;
2482 
2483 #ifdef CONFIG_ATA_BMDMA
2484         if (bmdma) {
2485                 pci_set_master(pdev);
2486                 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2487         } else
2488 #endif
2489                 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2490 out:
2491         if (rc == 0)
2492                 devres_remove_group(&pdev->dev, NULL);
2493         else
2494                 devres_release_group(&pdev->dev, NULL);
2495 
2496         return rc;
2497 }
2498 
2499 /**
2500  *      ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2501  *      @pdev: Controller to be initialized
2502  *      @ppi: array of port_info, must be enough for two ports
2503  *      @sht: scsi_host_template to use when registering the host
2504  *      @host_priv: host private_data
2505  *      @hflag: host flags
2506  *
2507  *      This is a helper function which can be called from a driver's
2508  *      xxx_init_one() probe function if the hardware uses traditional
2509  *      IDE taskfile registers and is PIO only.
2510  *
2511  *      ASSUMPTION:
2512  *      Nobody makes a single channel controller that appears solely as
2513  *      the secondary legacy port on PCI.
2514  *
2515  *      LOCKING:
2516  *      Inherited from PCI layer (may sleep).
2517  *
2518  *      RETURNS:
2519  *      Zero on success, negative on errno-based value on error.
2520  */
2521 int ata_pci_sff_init_one(struct pci_dev *pdev,
2522                  const struct ata_port_info * const *ppi,
2523                  struct scsi_host_template *sht, void *host_priv, int hflag)
2524 {
2525         return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2526 }
2527 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2528 
2529 #endif /* CONFIG_PCI */
2530 
2531 /*
2532  *      BMDMA support
2533  */
2534 
2535 #ifdef CONFIG_ATA_BMDMA
2536 
2537 const struct ata_port_operations ata_bmdma_port_ops = {
2538         .inherits               = &ata_sff_port_ops,
2539 
2540         .error_handler          = ata_bmdma_error_handler,
2541         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
2542 
2543         .qc_prep                = ata_bmdma_qc_prep,
2544         .qc_issue               = ata_bmdma_qc_issue,
2545 
2546         .sff_irq_clear          = ata_bmdma_irq_clear,
2547         .bmdma_setup            = ata_bmdma_setup,
2548         .bmdma_start            = ata_bmdma_start,
2549         .bmdma_stop             = ata_bmdma_stop,
2550         .bmdma_status           = ata_bmdma_status,
2551 
2552         .port_start             = ata_bmdma_port_start,
2553 };
2554 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2555 
2556 const struct ata_port_operations ata_bmdma32_port_ops = {
2557         .inherits               = &ata_bmdma_port_ops,
2558 
2559         .sff_data_xfer          = ata_sff_data_xfer32,
2560         .port_start             = ata_bmdma_port_start32,
2561 };
2562 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2563 
2564 /**
2565  *      ata_bmdma_fill_sg - Fill PCI IDE PRD table
2566  *      @qc: Metadata associated with taskfile to be transferred
2567  *
2568  *      Fill PCI IDE PRD (scatter-gather) table with segments
2569  *      associated with the current disk command.
2570  *
2571  *      LOCKING:
2572  *      spin_lock_irqsave(host lock)
2573  *
2574  */
2575 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2576 {
2577         struct ata_port *ap = qc->ap;
2578         struct ata_bmdma_prd *prd = ap->bmdma_prd;
2579         struct scatterlist *sg;
2580         unsigned int si, pi;
2581 
2582         pi = 0;
2583         for_each_sg(qc->sg, sg, qc->n_elem, si) {
2584                 u32 addr, offset;
2585                 u32 sg_len, len;
2586 
2587                 /* determine if physical DMA addr spans 64K boundary.
2588                  * Note h/w doesn't support 64-bit, so we unconditionally
2589                  * truncate dma_addr_t to u32.
2590                  */
2591                 addr = (u32) sg_dma_address(sg);
2592                 sg_len = sg_dma_len(sg);
2593 
2594                 while (sg_len) {
2595                         offset = addr & 0xffff;
2596                         len = sg_len;
2597                         if ((offset + sg_len) > 0x10000)
2598                                 len = 0x10000 - offset;
2599 
2600                         prd[pi].addr = cpu_to_le32(addr);
2601                         prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2602                         VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2603 
2604                         pi++;
2605                         sg_len -= len;
2606                         addr += len;
2607                 }
2608         }
2609 
2610         prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2611 }
2612 
2613 /**
2614  *      ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2615  *      @qc: Metadata associated with taskfile to be transferred
2616  *
2617  *      Fill PCI IDE PRD (scatter-gather) table with segments
2618  *      associated with the current disk command. Perform the fill
2619  *      so that we avoid writing any length 64K records for
2620  *      controllers that don't follow the spec.
2621  *
2622  *      LOCKING:
2623  *      spin_lock_irqsave(host lock)
2624  *
2625  */
2626 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2627 {
2628         struct ata_port *ap = qc->ap;
2629         struct ata_bmdma_prd *prd = ap->bmdma_prd;
2630         struct scatterlist *sg;
2631         unsigned int si, pi;
2632 
2633         pi = 0;
2634         for_each_sg(qc->sg, sg, qc->n_elem, si) {
2635                 u32 addr, offset;
2636                 u32 sg_len, len, blen;
2637 
2638                 /* determine if physical DMA addr spans 64K boundary.
2639                  * Note h/w doesn't support 64-bit, so we unconditionally
2640                  * truncate dma_addr_t to u32.
2641                  */
2642                 addr = (u32) sg_dma_address(sg);
2643                 sg_len = sg_dma_len(sg);
2644 
2645                 while (sg_len) {
2646                         offset = addr & 0xffff;
2647                         len = sg_len;
2648                         if ((offset + sg_len) > 0x10000)
2649                                 len = 0x10000 - offset;
2650 
2651                         blen = len & 0xffff;
2652                         prd[pi].addr = cpu_to_le32(addr);
2653                         if (blen == 0) {
2654                                 /* Some PATA chipsets like the CS5530 can't
2655                                    cope with 0x0000 meaning 64K as the spec
2656                                    says */
2657                                 prd[pi].flags_len = cpu_to_le32(0x8000);
2658                                 blen = 0x8000;
2659                                 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2660                         }
2661                         prd[pi].flags_len = cpu_to_le32(blen);
2662                         VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2663 
2664                         pi++;
2665                         sg_len -= len;
2666                         addr += len;
2667                 }
2668         }
2669 
2670         prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2671 }
2672 
2673 /**
2674  *      ata_bmdma_qc_prep - Prepare taskfile for submission
2675  *      @qc: Metadata associated with taskfile to be prepared
2676  *
2677  *      Prepare ATA taskfile for submission.
2678  *
2679  *      LOCKING:
2680  *      spin_lock_irqsave(host lock)
2681  */
2682 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2683 {
2684         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2685                 return;
2686 
2687         ata_bmdma_fill_sg(qc);
2688 }
2689 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2690 
2691 /**
2692  *      ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2693  *      @qc: Metadata associated with taskfile to be prepared
2694  *
2695  *      Prepare ATA taskfile for submission.
2696  *
2697  *      LOCKING:
2698  *      spin_lock_irqsave(host lock)
2699  */
2700 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2701 {
2702         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2703                 return;
2704 
2705         ata_bmdma_fill_sg_dumb(qc);
2706 }
2707 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2708 
2709 /**
2710  *      ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2711  *      @qc: command to issue to device
2712  *
2713  *      This function issues a PIO, NODATA or DMA command to a
2714  *      SFF/BMDMA controller.  PIO and NODATA are handled by
2715  *      ata_sff_qc_issue().
2716  *
2717  *      LOCKING:
2718  *      spin_lock_irqsave(host lock)
2719  *
2720  *      RETURNS:
2721  *      Zero on success, AC_ERR_* mask on failure
2722  */
2723 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2724 {
2725         struct ata_port *ap = qc->ap;
2726         struct ata_link *link = qc->dev->link;
2727 
2728         /* defer PIO handling to sff_qc_issue */
2729         if (!ata_is_dma(qc->tf.protocol))
2730                 return ata_sff_qc_issue(qc);
2731 
2732         /* select the device */
2733         ata_dev_select(ap, qc->dev->devno, 1, 0);
2734 
2735         /* start the command */
2736         switch (qc->tf.protocol) {
2737         case ATA_PROT_DMA:
2738                 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2739 
2740                 ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2741                 ap->ops->bmdma_setup(qc);           /* set up bmdma */
2742                 ap->ops->bmdma_start(qc);           /* initiate bmdma */
2743                 ap->hsm_task_state = HSM_ST_LAST;
2744                 break;
2745 
2746         case ATAPI_PROT_DMA:
2747                 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2748 
2749                 ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2750                 ap->ops->bmdma_setup(qc);           /* set up bmdma */
2751                 ap->hsm_task_state = HSM_ST_FIRST;
2752 
2753                 /* send cdb by polling if no cdb interrupt */
2754                 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2755                         ata_sff_queue_pio_task(link, 0);
2756                 break;
2757 
2758         default:
2759                 WARN_ON(1);
2760                 return AC_ERR_SYSTEM;
2761         }
2762 
2763         return 0;
2764 }
2765 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2766 
2767 /**
2768  *      ata_bmdma_port_intr - Handle BMDMA port interrupt
2769  *      @ap: Port on which interrupt arrived (possibly...)
2770  *      @qc: Taskfile currently active in engine
2771  *
2772  *      Handle port interrupt for given queued command.
2773  *
2774  *      LOCKING:
2775  *      spin_lock_irqsave(host lock)
2776  *
2777  *      RETURNS:
2778  *      One if interrupt was handled, zero if not (shared irq).
2779  */
2780 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2781 {
2782         struct ata_eh_info *ehi = &ap->link.eh_info;
2783         u8 host_stat = 0;
2784         bool bmdma_stopped = false;
2785         unsigned int handled;
2786 
2787         if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2788                 /* check status of DMA engine */
2789                 host_stat = ap->ops->bmdma_status(ap);
2790                 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2791 
2792                 /* if it's not our irq... */
2793                 if (!(host_stat & ATA_DMA_INTR))
2794                         return ata_sff_idle_irq(ap);
2795 
2796                 /* before we do anything else, clear DMA-Start bit */
2797                 ap->ops->bmdma_stop(qc);
2798                 bmdma_stopped = true;
2799 
2800                 if (unlikely(host_stat & ATA_DMA_ERR)) {
2801                         /* error when transferring data to/from memory */
2802                         qc->err_mask |= AC_ERR_HOST_BUS;
2803                         ap->hsm_task_state = HSM_ST_ERR;
2804                 }
2805         }
2806 
2807         handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2808 
2809         if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2810                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2811 
2812         return handled;
2813 }
2814 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2815 
2816 /**
2817  *      ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2818  *      @irq: irq line (unused)
2819  *      @dev_instance: pointer to our ata_host information structure
2820  *
2821  *      Default interrupt handler for PCI IDE devices.  Calls
2822  *      ata_bmdma_port_intr() for each port that is not disabled.
2823  *
2824  *      LOCKING:
2825  *      Obtains host lock during operation.
2826  *
2827  *      RETURNS:
2828  *      IRQ_NONE or IRQ_HANDLED.
2829  */
2830 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2831 {
2832         return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2833 }
2834 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2835 
2836 /**
2837  *      ata_bmdma_error_handler - Stock error handler for BMDMA controller
2838  *      @ap: port to handle error for
2839  *
2840  *      Stock error handler for BMDMA controller.  It can handle both
2841  *      PATA and SATA controllers.  Most BMDMA controllers should be
2842  *      able to use this EH as-is or with some added handling before
2843  *      and after.
2844  *
2845  *      LOCKING:
2846  *      Kernel thread context (may sleep)
2847  */
2848 void ata_bmdma_error_handler(struct ata_port *ap)
2849 {
2850         struct ata_queued_cmd *qc;
2851         unsigned long flags;
2852         bool thaw = false;
2853 
2854         qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2855         if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2856                 qc = NULL;
2857 
2858         /* reset PIO HSM and stop DMA engine */
2859         spin_lock_irqsave(ap->lock, flags);
2860 
2861         if (qc && ata_is_dma(qc->tf.protocol)) {
2862                 u8 host_stat;
2863 
2864                 host_stat = ap->ops->bmdma_status(ap);
2865 
2866                 /* BMDMA controllers indicate host bus error by
2867                  * setting DMA_ERR bit and timing out.  As it wasn't
2868                  * really a timeout event, adjust error mask and
2869                  * cancel frozen state.
2870                  */
2871                 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2872                         qc->err_mask = AC_ERR_HOST_BUS;
2873                         thaw = true;
2874                 }
2875 
2876                 ap->ops->bmdma_stop(qc);
2877 
2878                 /* if we're gonna thaw, make sure IRQ is clear */
2879                 if (thaw) {
2880                         ap->ops->sff_check_status(ap);
2881                         if (ap->ops->sff_irq_clear)
2882                                 ap->ops->sff_irq_clear(ap);
2883                 }
2884         }
2885 
2886         spin_unlock_irqrestore(ap->lock, flags);
2887 
2888         if (thaw)
2889                 ata_eh_thaw_port(ap);
2890 
2891         ata_sff_error_handler(ap);
2892 }
2893 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2894 
2895 /**
2896  *      ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2897  *      @qc: internal command to clean up
2898  *
2899  *      LOCKING:
2900  *      Kernel thread context (may sleep)
2901  */
2902 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2903 {
2904         struct ata_port *ap = qc->ap;
2905         unsigned long flags;
2906 
2907         if (ata_is_dma(qc->tf.protocol)) {
2908                 spin_lock_irqsave(ap->lock, flags);
2909                 ap->ops->bmdma_stop(qc);
2910                 spin_unlock_irqrestore(ap->lock, flags);
2911         }
2912 }
2913 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2914 
2915 /**
2916  *      ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2917  *      @ap: Port associated with this ATA transaction.
2918  *
2919  *      Clear interrupt and error flags in DMA status register.
2920  *
2921  *      May be used as the irq_clear() entry in ata_port_operations.
2922  *
2923  *      LOCKING:
2924  *      spin_lock_irqsave(host lock)
2925  */
2926 void ata_bmdma_irq_clear(struct ata_port *ap)
2927 {
2928         void __iomem *mmio = ap->ioaddr.bmdma_addr;
2929 
2930         if (!mmio)
2931                 return;
2932 
2933         iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2934 }
2935 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2936 
2937 /**
2938  *      ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2939  *      @qc: Info associated with this ATA transaction.
2940  *
2941  *      LOCKING:
2942  *      spin_lock_irqsave(host lock)
2943  */
2944 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2945 {
2946         struct ata_port *ap = qc->ap;
2947         unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2948         u8 dmactl;
2949 
2950         /* load PRD table addr. */
2951         mb();   /* make sure PRD table writes are visible to controller */
2952         iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2953 
2954         /* specify data direction, triple-check start bit is clear */
2955         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2956         dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2957         if (!rw)
2958                 dmactl |= ATA_DMA_WR;
2959         iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2960 
2961         /* issue r/w command */
2962         ap->ops->sff_exec_command(ap, &qc->tf);
2963 }
2964 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2965 
2966 /**
2967  *      ata_bmdma_start - Start a PCI IDE BMDMA transaction
2968  *      @qc: Info associated with this ATA transaction.
2969  *
2970  *      LOCKING:
2971  *      spin_lock_irqsave(host lock)
2972  */
2973 void ata_bmdma_start(struct ata_queued_cmd *qc)
2974 {
2975         struct ata_port *ap = qc->ap;
2976         u8 dmactl;
2977 
2978         /* start host DMA transaction */
2979         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2980         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2981 
2982         /* Strictly, one may wish to issue an ioread8() here, to
2983          * flush the mmio write.  However, control also passes
2984          * to the hardware at this point, and it will interrupt
2985          * us when we are to resume control.  So, in effect,
2986          * we don't care when the mmio write flushes.
2987          * Further, a read of the DMA status register _immediately_
2988          * following the write may not be what certain flaky hardware
2989          * is expected, so I think it is best to not add a readb()
2990          * without first all the MMIO ATA cards/mobos.
2991          * Or maybe I'm just being paranoid.
2992          *
2993          * FIXME: The posting of this write means I/O starts are
2994          * unnecessarily delayed for MMIO
2995          */
2996 }
2997 EXPORT_SYMBOL_GPL(ata_bmdma_start);
2998 
2999 /**
3000  *      ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3001  *      @qc: Command we are ending DMA for
3002  *
3003  *      Clears the ATA_DMA_START flag in the dma control register
3004  *
3005  *      May be used as the bmdma_stop() entry in ata_port_operations.
3006  *
3007  *      LOCKING:
3008  *      spin_lock_irqsave(host lock)
3009  */
3010 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3011 {
3012         struct ata_port *ap = qc->ap;
3013         void __iomem *mmio = ap->ioaddr.bmdma_addr;
3014 
3015         /* clear start/stop bit */
3016         iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3017                  mmio + ATA_DMA_CMD);
3018 
3019         /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3020         ata_sff_dma_pause(ap);
3021 }
3022 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3023 
3024 /**
3025  *      ata_bmdma_status - Read PCI IDE BMDMA status
3026  *      @ap: Port associated with this ATA transaction.
3027  *
3028  *      Read and return BMDMA status register.
3029  *
3030  *      May be used as the bmdma_status() entry in ata_port_operations.
3031  *
3032  *      LOCKING:
3033  *      spin_lock_irqsave(host lock)
3034  */
3035 u8 ata_bmdma_status(struct ata_port *ap)
3036 {
3037         return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3038 }
3039 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3040 
3041 
3042 /**
3043  *      ata_bmdma_port_start - Set port up for bmdma.
3044  *      @ap: Port to initialize
3045  *
3046  *      Called just after data structures for each port are
3047  *      initialized.  Allocates space for PRD table.
3048  *
3049  *      May be used as the port_start() entry in ata_port_operations.
3050  *
3051  *      LOCKING:
3052  *      Inherited from caller.
3053  */
3054 int ata_bmdma_port_start(struct ata_port *ap)
3055 {
3056         if (ap->mwdma_mask || ap->udma_mask) {
3057                 ap->bmdma_prd =
3058                         dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3059                                             &ap->bmdma_prd_dma, GFP_KERNEL);
3060                 if (!ap->bmdma_prd)
3061                         return -ENOMEM;
3062         }
3063 
3064         return 0;
3065 }
3066 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3067 
3068 /**
3069  *      ata_bmdma_port_start32 - Set port up for dma.
3070  *      @ap: Port to initialize
3071  *
3072  *      Called just after data structures for each port are
3073  *      initialized.  Enables 32bit PIO and allocates space for PRD
3074  *      table.
3075  *
3076  *      May be used as the port_start() entry in ata_port_operations for
3077  *      devices that are capable of 32bit PIO.
3078  *
3079  *      LOCKING:
3080  *      Inherited from caller.
3081  */
3082 int ata_bmdma_port_start32(struct ata_port *ap)
3083 {
3084         ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3085         return ata_bmdma_port_start(ap);
3086 }
3087 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3088 
3089 #ifdef CONFIG_PCI
3090 
3091 /**
3092  *      ata_pci_bmdma_clear_simplex -   attempt to kick device out of simplex
3093  *      @pdev: PCI device
3094  *
3095  *      Some PCI ATA devices report simplex mode but in fact can be told to
3096  *      enter non simplex mode. This implements the necessary logic to
3097  *      perform the task on such devices. Calling it on other devices will
3098  *      have -undefined- behaviour.
3099  */
3100 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3101 {
3102         unsigned long bmdma = pci_resource_start(pdev, 4);
3103         u8 simplex;
3104 
3105         if (bmdma == 0)
3106                 return -ENOENT;
3107 
3108         simplex = inb(bmdma + 0x02);
3109         outb(simplex & 0x60, bmdma + 0x02);
3110         simplex = inb(bmdma + 0x02);
3111         if (simplex & 0x80)
3112                 return -EOPNOTSUPP;
3113         return 0;
3114 }
3115 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3116 
3117 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3118 {
3119         int i;
3120 
3121         dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3122 
3123         for (i = 0; i < 2; i++) {
3124                 host->ports[i]->mwdma_mask = 0;
3125                 host->ports[i]->udma_mask = 0;
3126         }
3127 }
3128 
3129 /**
3130  *      ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3131  *      @host: target ATA host
3132  *
3133  *      Acquire PCI BMDMA resources and initialize @host accordingly.
3134  *
3135  *      LOCKING:
3136  *      Inherited from calling layer (may sleep).
3137  */
3138 void ata_pci_bmdma_init(struct ata_host *host)
3139 {
3140         struct device *gdev = host->dev;
3141         struct pci_dev *pdev = to_pci_dev(gdev);
3142         int i, rc;
3143 
3144         /* No BAR4 allocation: No DMA */
3145         if (pci_resource_start(pdev, 4) == 0) {
3146                 ata_bmdma_nodma(host, "BAR4 is zero");
3147                 return;
3148         }
3149 
3150         /*
3151          * Some controllers require BMDMA region to be initialized
3152          * even if DMA is not in use to clear IRQ status via
3153          * ->sff_irq_clear method.  Try to initialize bmdma_addr
3154          * regardless of dma masks.
3155          */
3156         rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3157         if (rc)
3158                 ata_bmdma_nodma(host, "failed to set dma mask");
3159 
3160         /* request and iomap DMA region */
3161         rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3162         if (rc) {
3163                 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3164                 return;
3165         }
3166         host->iomap = pcim_iomap_table(pdev);
3167 
3168         for (i = 0; i < 2; i++) {
3169                 struct ata_port *ap = host->ports[i];
3170                 void __iomem *bmdma = host->iomap[4] + 8 * i;
3171 
3172                 if (ata_port_is_dummy(ap))
3173                         continue;
3174 
3175                 ap->ioaddr.bmdma_addr = bmdma;
3176                 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3177                     (ioread8(bmdma + 2) & 0x80))
3178                         host->flags |= ATA_HOST_SIMPLEX;
3179 
3180                 ata_port_desc(ap, "bmdma 0x%llx",
3181                     (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3182         }
3183 }
3184 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3185 
3186 /**
3187  *      ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3188  *      @pdev: target PCI device
3189  *      @ppi: array of port_info, must be enough for two ports
3190  *      @r_host: out argument for the initialized ATA host
3191  *
3192  *      Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3193  *      resources and initialize it accordingly in one go.
3194  *
3195  *      LOCKING:
3196  *      Inherited from calling layer (may sleep).
3197  *
3198  *      RETURNS:
3199  *      0 on success, -errno otherwise.
3200  */
3201 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3202                                const struct ata_port_info * const * ppi,
3203                                struct ata_host **r_host)
3204 {
3205         int rc;
3206 
3207         rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3208         if (rc)
3209                 return rc;
3210 
3211         ata_pci_bmdma_init(*r_host);
3212         return 0;
3213 }
3214 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3215 
3216 /**
3217  *      ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3218  *      @pdev: Controller to be initialized
3219  *      @ppi: array of port_info, must be enough for two ports
3220  *      @sht: scsi_host_template to use when registering the host
3221  *      @host_priv: host private_data
3222  *      @hflags: host flags
3223  *
3224  *      This function is similar to ata_pci_sff_init_one() but also
3225  *      takes care of BMDMA initialization.
3226  *
3227  *      LOCKING:
3228  *      Inherited from PCI layer (may sleep).
3229  *
3230  *      RETURNS:
3231  *      Zero on success, negative on errno-based value on error.
3232  */
3233 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3234                            const struct ata_port_info * const * ppi,
3235                            struct scsi_host_template *sht, void *host_priv,
3236                            int hflags)
3237 {
3238         return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3239 }
3240 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3241 
3242 #endif /* CONFIG_PCI */
3243 #endif /* CONFIG_ATA_BMDMA */
3244 
3245 /**
3246  *      ata_sff_port_init - Initialize SFF/BMDMA ATA port
3247  *      @ap: Port to initialize
3248  *
3249  *      Called on port allocation to initialize SFF/BMDMA specific
3250  *      fields.
3251  *
3252  *      LOCKING:
3253  *      None.
3254  */
3255 void ata_sff_port_init(struct ata_port *ap)
3256 {
3257         INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3258         ap->ctl = ATA_DEVCTL_OBS;
3259         ap->last_ctl = 0xFF;
3260 }
3261 
3262 int __init ata_sff_init(void)
3263 {
3264         ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3265         if (!ata_sff_wq)
3266                 return -ENOMEM;
3267 
3268         return 0;
3269 }
3270 
3271 void ata_sff_exit(void)
3272 {
3273         destroy_workqueue(ata_sff_wq);
3274 }

/* [<][>][^][v][top][bottom][index][help] */