root/drivers/ata/ahci_xgene.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xgene_ahci_init_memram
  2. xgene_ahci_poll_reg_val
  3. xgene_ahci_restart_engine
  4. xgene_ahci_qc_issue
  5. xgene_ahci_is_memram_inited
  6. xgene_ahci_read_id
  7. xgene_ahci_set_phy_cfg
  8. xgene_ahci_do_hardreset
  9. xgene_ahci_hardreset
  10. xgene_ahci_host_stop
  11. xgene_ahci_pmp_softreset
  12. xgene_ahci_softreset
  13. xgene_ahci_handle_broken_edge_irq
  14. xgene_ahci_irq_intr
  15. xgene_ahci_hw_init
  16. xgene_ahci_mux_select
  17. xgene_ahci_probe

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * AppliedMicro X-Gene SoC SATA Host Controller Driver
   4  *
   5  * Copyright (c) 2014, Applied Micro Circuits Corporation
   6  * Author: Loc Ho <lho@apm.com>
   7  *         Tuan Phan <tphan@apm.com>
   8  *         Suman Tripathi <stripathi@apm.com>
   9  *
  10  * NOTE: PM support is not currently available.
  11  */
  12 #include <linux/acpi.h>
  13 #include <linux/module.h>
  14 #include <linux/platform_device.h>
  15 #include <linux/ahci_platform.h>
  16 #include <linux/of_address.h>
  17 #include <linux/of_device.h>
  18 #include <linux/of_irq.h>
  19 #include <linux/phy/phy.h>
  20 #include "ahci.h"
  21 
  22 #define DRV_NAME "xgene-ahci"
  23 
  24 /* Max # of disk per a controller */
  25 #define MAX_AHCI_CHN_PERCTR             2
  26 
  27 /* MUX CSR */
  28 #define SATA_ENET_CONFIG_REG            0x00000000
  29 #define  CFG_SATA_ENET_SELECT_MASK      0x00000001
  30 
  31 /* SATA core host controller CSR */
  32 #define SLVRDERRATTRIBUTES              0x00000000
  33 #define SLVWRERRATTRIBUTES              0x00000004
  34 #define MSTRDERRATTRIBUTES              0x00000008
  35 #define MSTWRERRATTRIBUTES              0x0000000c
  36 #define BUSCTLREG                       0x00000014
  37 #define IOFMSTRWAUX                     0x00000018
  38 #define INTSTATUSMASK                   0x0000002c
  39 #define ERRINTSTATUS                    0x00000030
  40 #define ERRINTSTATUSMASK                0x00000034
  41 
  42 /* SATA host AHCI CSR */
  43 #define PORTCFG                         0x000000a4
  44 #define  PORTADDR_SET(dst, src) \
  45                 (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
  46 #define PORTPHY1CFG             0x000000a8
  47 #define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
  48                 (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
  49 #define PORTPHY2CFG                     0x000000ac
  50 #define PORTPHY3CFG                     0x000000b0
  51 #define PORTPHY4CFG                     0x000000b4
  52 #define PORTPHY5CFG                     0x000000b8
  53 #define SCTL0                           0x0000012C
  54 #define PORTPHY5CFG_RTCHG_SET(dst, src) \
  55                 (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
  56 #define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
  57                 (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
  58 #define PORTAXICFG                      0x000000bc
  59 #define PORTAXICFG_OUTTRANS_SET(dst, src) \
  60                 (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
  61 #define PORTRANSCFG                     0x000000c8
  62 #define PORTRANSCFG_RXWM_SET(dst, src)          \
  63                 (((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
  64 
  65 /* SATA host controller AXI CSR */
  66 #define INT_SLV_TMOMASK                 0x00000010
  67 
  68 /* SATA diagnostic CSR */
  69 #define CFG_MEM_RAM_SHUTDOWN            0x00000070
  70 #define BLOCK_MEM_RDY                   0x00000074
  71 
  72 /* Max retry for link down */
  73 #define MAX_LINK_DOWN_RETRY 3
  74 
  75 enum xgene_ahci_version {
  76         XGENE_AHCI_V1 = 1,
  77         XGENE_AHCI_V2,
  78 };
  79 
  80 struct xgene_ahci_context {
  81         struct ahci_host_priv *hpriv;
  82         struct device *dev;
  83         u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
  84         u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
  85         void __iomem *csr_core;         /* Core CSR address of IP */
  86         void __iomem *csr_diag;         /* Diag CSR address of IP */
  87         void __iomem *csr_axi;          /* AXI CSR address of IP */
  88         void __iomem *csr_mux;          /* MUX CSR address of IP */
  89 };
  90 
  91 static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
  92 {
  93         dev_dbg(ctx->dev, "Release memory from shutdown\n");
  94         writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
  95         readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
  96         msleep(1);      /* reset may take up to 1ms */
  97         if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
  98                 dev_err(ctx->dev, "failed to release memory from shutdown\n");
  99                 return -ENODEV;
 100         }
 101         return 0;
 102 }
 103 
 104 /**
 105  * xgene_ahci_poll_reg_val- Poll a register on a specific value.
 106  * @ap : ATA port of interest.
 107  * @reg : Register of interest.
 108  * @val : Value to be attained.
 109  * @interval : waiting interval for polling.
 110  * @timeout : timeout for achieving the value.
 111  */
 112 static int xgene_ahci_poll_reg_val(struct ata_port *ap,
 113                                    void __iomem *reg, unsigned
 114                                    int val, unsigned long interval,
 115                                    unsigned long timeout)
 116 {
 117         unsigned long deadline;
 118         unsigned int tmp;
 119 
 120         tmp = ioread32(reg);
 121         deadline = ata_deadline(jiffies, timeout);
 122 
 123         while (tmp != val && time_before(jiffies, deadline)) {
 124                 ata_msleep(ap, interval);
 125                 tmp = ioread32(reg);
 126         }
 127 
 128         return tmp;
 129 }
 130 
 131 /**
 132  * xgene_ahci_restart_engine - Restart the dma engine.
 133  * @ap : ATA port of interest
 134  *
 135  * Waits for completion of multiple commands and restarts
 136  * the DMA engine inside the controller.
 137  */
 138 static int xgene_ahci_restart_engine(struct ata_port *ap)
 139 {
 140         struct ahci_host_priv *hpriv = ap->host->private_data;
 141         struct ahci_port_priv *pp = ap->private_data;
 142         void __iomem *port_mmio = ahci_port_base(ap);
 143         u32 fbs;
 144 
 145         /*
 146          * In case of PMP multiple IDENTIFY DEVICE commands can be
 147          * issued inside PxCI. So need to poll PxCI for the
 148          * completion of outstanding IDENTIFY DEVICE commands before
 149          * we restart the DMA engine.
 150          */
 151         if (xgene_ahci_poll_reg_val(ap, port_mmio +
 152                                     PORT_CMD_ISSUE, 0x0, 1, 100))
 153                   return -EBUSY;
 154 
 155         hpriv->stop_engine(ap);
 156         ahci_start_fis_rx(ap);
 157 
 158         /*
 159          * Enable the PxFBS.FBS_EN bit as it
 160          * gets cleared due to stopping the engine.
 161          */
 162         if (pp->fbs_supported) {
 163                 fbs = readl(port_mmio + PORT_FBS);
 164                 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
 165                 fbs = readl(port_mmio + PORT_FBS);
 166         }
 167 
 168         hpriv->start_engine(ap);
 169 
 170         return 0;
 171 }
 172 
 173 /**
 174  * xgene_ahci_qc_issue - Issue commands to the device
 175  * @qc: Command to issue
 176  *
 177  * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
 178  * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
 179  * state machine goes into the CMFatalErrorUpdate state and locks up. By
 180  * restarting the dma engine, it removes the controller out of lock up state.
 181  *
 182  * Due to H/W errata, the controller is unable to save the PMP
 183  * field fetched from command header before sending the H2D FIS.
 184  * When the device returns the PMP port field in the D2H FIS, there is
 185  * a mismatch and results in command completion failure. The
 186  * workaround is to write the pmp value to PxFBS.DEV field before issuing
 187  * any command to PMP.
 188  */
 189 static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
 190 {
 191         struct ata_port *ap = qc->ap;
 192         struct ahci_host_priv *hpriv = ap->host->private_data;
 193         struct xgene_ahci_context *ctx = hpriv->plat_data;
 194         int rc = 0;
 195         u32 port_fbs;
 196         void *port_mmio = ahci_port_base(ap);
 197 
 198         /*
 199          * Write the pmp value to PxFBS.DEV
 200          * for case of Port Mulitplier.
 201          */
 202         if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
 203                 port_fbs = readl(port_mmio + PORT_FBS);
 204                 port_fbs &= ~PORT_FBS_DEV_MASK;
 205                 port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
 206                 writel(port_fbs, port_mmio + PORT_FBS);
 207         }
 208 
 209         if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
 210             (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
 211             (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
 212                 xgene_ahci_restart_engine(ap);
 213 
 214         rc = ahci_qc_issue(qc);
 215 
 216         /* Save the last command issued */
 217         ctx->last_cmd[ap->port_no] = qc->tf.command;
 218 
 219         return rc;
 220 }
 221 
 222 static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
 223 {
 224         void __iomem *diagcsr = ctx->csr_diag;
 225 
 226         return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
 227                 readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
 228 }
 229 
 230 /**
 231  * xgene_ahci_read_id - Read ID data from the specified device
 232  * @dev: device
 233  * @tf: proposed taskfile
 234  * @id: data buffer
 235  *
 236  * This custom read ID function is required due to the fact that the HW
 237  * does not support DEVSLP.
 238  */
 239 static unsigned int xgene_ahci_read_id(struct ata_device *dev,
 240                                        struct ata_taskfile *tf, u16 *id)
 241 {
 242         u32 err_mask;
 243 
 244         err_mask = ata_do_dev_read_id(dev, tf, id);
 245         if (err_mask)
 246                 return err_mask;
 247 
 248         /*
 249          * Mask reserved area. Word78 spec of Link Power Management
 250          * bit15-8: reserved
 251          * bit7: NCQ autosence
 252          * bit6: Software settings preservation supported
 253          * bit5: reserved
 254          * bit4: In-order sata delivery supported
 255          * bit3: DIPM requests supported
 256          * bit2: DMA Setup FIS Auto-Activate optimization supported
 257          * bit1: DMA Setup FIX non-Zero buffer offsets supported
 258          * bit0: Reserved
 259          *
 260          * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
 261          */
 262         id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
 263 
 264         return 0;
 265 }
 266 
 267 static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
 268 {
 269         void __iomem *mmio = ctx->hpriv->mmio;
 270         u32 val;
 271 
 272         dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
 273                 mmio, channel);
 274         val = readl(mmio + PORTCFG);
 275         val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
 276         writel(val, mmio + PORTCFG);
 277         readl(mmio + PORTCFG);  /* Force a barrier */
 278         /* Disable fix rate */
 279         writel(0x0001fffe, mmio + PORTPHY1CFG);
 280         readl(mmio + PORTPHY1CFG); /* Force a barrier */
 281         writel(0x28183219, mmio + PORTPHY2CFG);
 282         readl(mmio + PORTPHY2CFG); /* Force a barrier */
 283         writel(0x13081008, mmio + PORTPHY3CFG);
 284         readl(mmio + PORTPHY3CFG); /* Force a barrier */
 285         writel(0x00480815, mmio + PORTPHY4CFG);
 286         readl(mmio + PORTPHY4CFG); /* Force a barrier */
 287         /* Set window negotiation */
 288         val = readl(mmio + PORTPHY5CFG);
 289         val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
 290         writel(val, mmio + PORTPHY5CFG);
 291         readl(mmio + PORTPHY5CFG); /* Force a barrier */
 292         val = readl(mmio + PORTAXICFG);
 293         val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
 294         val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
 295         writel(val, mmio + PORTAXICFG);
 296         readl(mmio + PORTAXICFG); /* Force a barrier */
 297         /* Set the watermark threshold of the receive FIFO */
 298         val = readl(mmio + PORTRANSCFG);
 299         val = PORTRANSCFG_RXWM_SET(val, 0x30);
 300         writel(val, mmio + PORTRANSCFG);
 301 }
 302 
 303 /**
 304  * xgene_ahci_do_hardreset - Issue the actual COMRESET
 305  * @link: link to reset
 306  * @deadline: deadline jiffies for the operation
 307  * @online: Return value to indicate if device online
 308  *
 309  * Due to the limitation of the hardware PHY, a difference set of setting is
 310  * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
 311  * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
 312  * report disparity error and etc. In addition, during COMRESET, there can
 313  * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
 314  * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
 315  * reboot cycle regression, sometimes the PHY reports link down even if the
 316  * device is present because of speed negotiation failure. so need to retry
 317  * the COMRESET to get the link up. The following algorithm is followed to
 318  * proper configure the hardware PHY during COMRESET:
 319  *
 320  * Alg Part 1:
 321  * 1. Start the PHY at Gen3 speed (default setting)
 322  * 2. Issue the COMRESET
 323  * 3. If no link, go to Alg Part 3
 324  * 4. If link up, determine if the negotiated speed matches the PHY
 325  *    configured speed
 326  * 5. If they matched, go to Alg Part 2
 327  * 6. If they do not matched and first time, configure the PHY for the linked
 328  *    up disk speed and repeat step 2
 329  * 7. Go to Alg Part 2
 330  *
 331  * Alg Part 2:
 332  * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
 333  *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
 334  * 2. Go to Alg Part 4
 335  *
 336  * Alg Part 3:
 337  * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
 338  *    communication establishment failed and maximum link down attempts are
 339  *    less than Max attempts 3 then goto Alg Part 1.
 340  * 2. Go to Alg Part 4.
 341  *
 342  * Alg Part 4:
 343  * 1. Clear any pending from register PORT_SCR_ERR.
 344  *
 345  * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
 346  *       and until the underlying PHY supports an method to reset the receiver
 347  *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
 348  *       an warning message will be printed.
 349  */
 350 static int xgene_ahci_do_hardreset(struct ata_link *link,
 351                                    unsigned long deadline, bool *online)
 352 {
 353         const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
 354         struct ata_port *ap = link->ap;
 355         struct ahci_host_priv *hpriv = ap->host->private_data;
 356         struct xgene_ahci_context *ctx = hpriv->plat_data;
 357         struct ahci_port_priv *pp = ap->private_data;
 358         u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
 359         void __iomem *port_mmio = ahci_port_base(ap);
 360         struct ata_taskfile tf;
 361         int link_down_retry = 0;
 362         int rc;
 363         u32 val, sstatus;
 364 
 365         do {
 366                 /* clear D2H reception area to properly wait for D2H FIS */
 367                 ata_tf_init(link->device, &tf);
 368                 tf.command = ATA_BUSY;
 369                 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
 370                 rc = sata_link_hardreset(link, timing, deadline, online,
 371                                  ahci_check_ready);
 372                 if (*online) {
 373                         val = readl(port_mmio + PORT_SCR_ERR);
 374                         if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
 375                                 dev_warn(ctx->dev, "link has error\n");
 376                         break;
 377                 }
 378 
 379                 sata_scr_read(link, SCR_STATUS, &sstatus);
 380         } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
 381                  (sstatus & 0xff) == 0x1);
 382 
 383         /* clear all errors if any pending */
 384         val = readl(port_mmio + PORT_SCR_ERR);
 385         writel(val, port_mmio + PORT_SCR_ERR);
 386 
 387         return rc;
 388 }
 389 
 390 static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
 391                                 unsigned long deadline)
 392 {
 393         struct ata_port *ap = link->ap;
 394         struct ahci_host_priv *hpriv = ap->host->private_data;
 395         void __iomem *port_mmio = ahci_port_base(ap);
 396         bool online;
 397         int rc;
 398         u32 portcmd_saved;
 399         u32 portclb_saved;
 400         u32 portclbhi_saved;
 401         u32 portrxfis_saved;
 402         u32 portrxfishi_saved;
 403 
 404         /* As hardreset resets these CSR, save it to restore later */
 405         portcmd_saved = readl(port_mmio + PORT_CMD);
 406         portclb_saved = readl(port_mmio + PORT_LST_ADDR);
 407         portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
 408         portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
 409         portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
 410 
 411         hpriv->stop_engine(ap);
 412 
 413         rc = xgene_ahci_do_hardreset(link, deadline, &online);
 414 
 415         /* As controller hardreset clears them, restore them */
 416         writel(portcmd_saved, port_mmio + PORT_CMD);
 417         writel(portclb_saved, port_mmio + PORT_LST_ADDR);
 418         writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
 419         writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
 420         writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
 421 
 422         hpriv->start_engine(ap);
 423 
 424         if (online)
 425                 *class = ahci_dev_classify(ap);
 426 
 427         return rc;
 428 }
 429 
 430 static void xgene_ahci_host_stop(struct ata_host *host)
 431 {
 432         struct ahci_host_priv *hpriv = host->private_data;
 433 
 434         ahci_platform_disable_resources(hpriv);
 435 }
 436 
 437 /**
 438  * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
 439  *                            to Port Multiplier.
 440  * @link: link to reset
 441  * @class: Return value to indicate class of device
 442  * @deadline: deadline jiffies for the operation
 443  *
 444  * Due to H/W errata, the controller is unable to save the PMP
 445  * field fetched from command header before sending the H2D FIS.
 446  * When the device returns the PMP port field in the D2H FIS, there is
 447  * a mismatch and results in command completion failure. The workaround
 448  * is to write the pmp value to PxFBS.DEV field before issuing any command
 449  * to PMP.
 450  */
 451 static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
 452                           unsigned long deadline)
 453 {
 454         int pmp = sata_srst_pmp(link);
 455         struct ata_port *ap = link->ap;
 456         u32 rc;
 457         void *port_mmio = ahci_port_base(ap);
 458         u32 port_fbs;
 459 
 460         /*
 461          * Set PxFBS.DEV field with pmp
 462          * value.
 463          */
 464         port_fbs = readl(port_mmio + PORT_FBS);
 465         port_fbs &= ~PORT_FBS_DEV_MASK;
 466         port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
 467         writel(port_fbs, port_mmio + PORT_FBS);
 468 
 469         rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
 470 
 471         return rc;
 472 }
 473 
 474 /**
 475  * xgene_ahci_softreset - Issue the softreset to the drive.
 476  * @link: link to reset
 477  * @class: Return value to indicate class of device
 478  * @deadline: deadline jiffies for the operation
 479  *
 480  * Due to H/W errata, the controller is unable to save the PMP
 481  * field fetched from command header before sending the H2D FIS.
 482  * When the device returns the PMP port field in the D2H FIS, there is
 483  * a mismatch and results in command completion failure. The workaround
 484  * is to write the pmp value to PxFBS.DEV field before issuing any command
 485  * to PMP. Here is the algorithm to detect PMP :
 486  *
 487  * 1. Save the PxFBS value
 488  * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
 489  *    0xF for both PMP/NON-PMP initially
 490  * 3. Issue softreset
 491  * 4. If signature class is PMP goto 6
 492  * 5. restore the original PxFBS and goto 3
 493  * 6. return
 494  */
 495 static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
 496                           unsigned long deadline)
 497 {
 498         int pmp = sata_srst_pmp(link);
 499         struct ata_port *ap = link->ap;
 500         struct ahci_host_priv *hpriv = ap->host->private_data;
 501         struct xgene_ahci_context *ctx = hpriv->plat_data;
 502         void *port_mmio = ahci_port_base(ap);
 503         u32 port_fbs;
 504         u32 port_fbs_save;
 505         u32 retry = 1;
 506         u32 rc;
 507 
 508         port_fbs_save = readl(port_mmio + PORT_FBS);
 509 
 510         /*
 511          * Set PxFBS.DEV field with pmp
 512          * value.
 513          */
 514         port_fbs = readl(port_mmio + PORT_FBS);
 515         port_fbs &= ~PORT_FBS_DEV_MASK;
 516         port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
 517         writel(port_fbs, port_mmio + PORT_FBS);
 518 
 519 softreset_retry:
 520         rc = ahci_do_softreset(link, class, pmp,
 521                                deadline, ahci_check_ready);
 522 
 523         ctx->class[ap->port_no] = *class;
 524         if (*class != ATA_DEV_PMP) {
 525                 /*
 526                  * Retry for normal drives without
 527                  * setting PxFBS.DEV field with pmp value.
 528                  */
 529                 if (retry--) {
 530                         writel(port_fbs_save, port_mmio + PORT_FBS);
 531                         goto softreset_retry;
 532                 }
 533         }
 534 
 535         return rc;
 536 }
 537 
 538 /**
 539  * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
 540  * @ata_host: Host that recieved the irq
 541  * @irq_masked: HOST_IRQ_STAT value
 542  *
 543  * For hardware with broken edge trigger latch
 544  * the HOST_IRQ_STAT register misses the edge interrupt
 545  * when clearing of HOST_IRQ_STAT register and hardware
 546  * reporting the PORT_IRQ_STAT register at the
 547  * same clock cycle.
 548  * As such, the algorithm below outlines the workaround.
 549  *
 550  * 1. Read HOST_IRQ_STAT register and save the state.
 551  * 2. Clear the HOST_IRQ_STAT register.
 552  * 3. Read back the HOST_IRQ_STAT register.
 553  * 4. If HOST_IRQ_STAT register equals to zero, then
 554  *    traverse the rest of port's PORT_IRQ_STAT register
 555  *    to check if an interrupt is triggered at that point else
 556  *    go to step 6.
 557  * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
 558  *    then update the state of HOST_IRQ_STAT saved in step 1.
 559  * 6. Handle port interrupts.
 560  * 7. Exit
 561  */
 562 static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
 563                                              u32 irq_masked)
 564 {
 565         struct ahci_host_priv *hpriv = host->private_data;
 566         void __iomem *port_mmio;
 567         int i;
 568 
 569         if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
 570                 for (i = 0; i < host->n_ports; i++) {
 571                         if (irq_masked & (1 << i))
 572                                 continue;
 573 
 574                         port_mmio = ahci_port_base(host->ports[i]);
 575                         if (readl(port_mmio + PORT_IRQ_STAT))
 576                                 irq_masked |= (1 << i);
 577                 }
 578         }
 579 
 580         return ahci_handle_port_intr(host, irq_masked);
 581 }
 582 
 583 static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
 584 {
 585         struct ata_host *host = dev_instance;
 586         struct ahci_host_priv *hpriv;
 587         unsigned int rc = 0;
 588         void __iomem *mmio;
 589         u32 irq_stat, irq_masked;
 590 
 591         VPRINTK("ENTER\n");
 592 
 593         hpriv = host->private_data;
 594         mmio = hpriv->mmio;
 595 
 596         /* sigh.  0xffffffff is a valid return from h/w */
 597         irq_stat = readl(mmio + HOST_IRQ_STAT);
 598         if (!irq_stat)
 599                 return IRQ_NONE;
 600 
 601         irq_masked = irq_stat & hpriv->port_map;
 602 
 603         spin_lock(&host->lock);
 604 
 605         /*
 606          * HOST_IRQ_STAT behaves as edge triggered latch meaning that
 607          * it should be cleared before all the port events are cleared.
 608          */
 609         writel(irq_stat, mmio + HOST_IRQ_STAT);
 610 
 611         rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
 612 
 613         spin_unlock(&host->lock);
 614 
 615         VPRINTK("EXIT\n");
 616 
 617         return IRQ_RETVAL(rc);
 618 }
 619 
 620 static struct ata_port_operations xgene_ahci_v1_ops = {
 621         .inherits = &ahci_ops,
 622         .host_stop = xgene_ahci_host_stop,
 623         .hardreset = xgene_ahci_hardreset,
 624         .read_id = xgene_ahci_read_id,
 625         .qc_issue = xgene_ahci_qc_issue,
 626         .softreset = xgene_ahci_softreset,
 627         .pmp_softreset = xgene_ahci_pmp_softreset
 628 };
 629 
 630 static const struct ata_port_info xgene_ahci_v1_port_info = {
 631         .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
 632         .pio_mask = ATA_PIO4,
 633         .udma_mask = ATA_UDMA6,
 634         .port_ops = &xgene_ahci_v1_ops,
 635 };
 636 
 637 static struct ata_port_operations xgene_ahci_v2_ops = {
 638         .inherits = &ahci_ops,
 639         .host_stop = xgene_ahci_host_stop,
 640         .hardreset = xgene_ahci_hardreset,
 641         .read_id = xgene_ahci_read_id,
 642 };
 643 
 644 static const struct ata_port_info xgene_ahci_v2_port_info = {
 645         .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
 646         .pio_mask = ATA_PIO4,
 647         .udma_mask = ATA_UDMA6,
 648         .port_ops = &xgene_ahci_v2_ops,
 649 };
 650 
 651 static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
 652 {
 653         struct xgene_ahci_context *ctx = hpriv->plat_data;
 654         int i;
 655         int rc;
 656         u32 val;
 657 
 658         /* Remove IP RAM out of shutdown */
 659         rc = xgene_ahci_init_memram(ctx);
 660         if (rc)
 661                 return rc;
 662 
 663         for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
 664                 xgene_ahci_set_phy_cfg(ctx, i);
 665 
 666         /* AXI disable Mask */
 667         writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
 668         readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
 669         writel(0, ctx->csr_core + INTSTATUSMASK);
 670         val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
 671         dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
 672                 INTSTATUSMASK, val);
 673 
 674         writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
 675         readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
 676         writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
 677         readl(ctx->csr_axi + INT_SLV_TMOMASK);
 678 
 679         /* Enable AXI Interrupt */
 680         writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
 681         writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
 682         writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
 683         writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
 684 
 685         /* Enable coherency */
 686         val = readl(ctx->csr_core + BUSCTLREG);
 687         val &= ~0x00000002;     /* Enable write coherency */
 688         val &= ~0x00000001;     /* Enable read coherency */
 689         writel(val, ctx->csr_core + BUSCTLREG);
 690 
 691         val = readl(ctx->csr_core + IOFMSTRWAUX);
 692         val |= (1 << 3);        /* Enable read coherency */
 693         val |= (1 << 9);        /* Enable write coherency */
 694         writel(val, ctx->csr_core + IOFMSTRWAUX);
 695         val = readl(ctx->csr_core + IOFMSTRWAUX);
 696         dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
 697                 IOFMSTRWAUX, val);
 698 
 699         return rc;
 700 }
 701 
 702 static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
 703 {
 704         u32 val;
 705 
 706         /* Check for optional MUX resource */
 707         if (!ctx->csr_mux)
 708                 return 0;
 709 
 710         val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
 711         val &= ~CFG_SATA_ENET_SELECT_MASK;
 712         writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
 713         val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
 714         return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
 715 }
 716 
 717 static struct scsi_host_template ahci_platform_sht = {
 718         AHCI_SHT(DRV_NAME),
 719 };
 720 
 721 #ifdef CONFIG_ACPI
 722 static const struct acpi_device_id xgene_ahci_acpi_match[] = {
 723         { "APMC0D0D", XGENE_AHCI_V1},
 724         { "APMC0D32", XGENE_AHCI_V2},
 725         {},
 726 };
 727 MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
 728 #endif
 729 
 730 static const struct of_device_id xgene_ahci_of_match[] = {
 731         {.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
 732         {.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
 733         {},
 734 };
 735 MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
 736 
 737 static int xgene_ahci_probe(struct platform_device *pdev)
 738 {
 739         struct device *dev = &pdev->dev;
 740         struct ahci_host_priv *hpriv;
 741         struct xgene_ahci_context *ctx;
 742         struct resource *res;
 743         const struct of_device_id *of_devid;
 744         enum xgene_ahci_version version = XGENE_AHCI_V1;
 745         const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
 746                                               &xgene_ahci_v2_port_info };
 747         int rc;
 748 
 749         hpriv = ahci_platform_get_resources(pdev, 0);
 750         if (IS_ERR(hpriv))
 751                 return PTR_ERR(hpriv);
 752 
 753         ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 754         if (!ctx)
 755                 return -ENOMEM;
 756 
 757         hpriv->plat_data = ctx;
 758         ctx->hpriv = hpriv;
 759         ctx->dev = dev;
 760 
 761         /* Retrieve the IP core resource */
 762         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 763         ctx->csr_core = devm_ioremap_resource(dev, res);
 764         if (IS_ERR(ctx->csr_core))
 765                 return PTR_ERR(ctx->csr_core);
 766 
 767         /* Retrieve the IP diagnostic resource */
 768         res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
 769         ctx->csr_diag = devm_ioremap_resource(dev, res);
 770         if (IS_ERR(ctx->csr_diag))
 771                 return PTR_ERR(ctx->csr_diag);
 772 
 773         /* Retrieve the IP AXI resource */
 774         res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
 775         ctx->csr_axi = devm_ioremap_resource(dev, res);
 776         if (IS_ERR(ctx->csr_axi))
 777                 return PTR_ERR(ctx->csr_axi);
 778 
 779         /* Retrieve the optional IP mux resource */
 780         res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
 781         if (res) {
 782                 void __iomem *csr = devm_ioremap_resource(dev, res);
 783                 if (IS_ERR(csr))
 784                         return PTR_ERR(csr);
 785 
 786                 ctx->csr_mux = csr;
 787         }
 788 
 789         of_devid = of_match_device(xgene_ahci_of_match, dev);
 790         if (of_devid) {
 791                 if (of_devid->data)
 792                         version = (enum xgene_ahci_version) of_devid->data;
 793         }
 794 #ifdef CONFIG_ACPI
 795         else {
 796                 const struct acpi_device_id *acpi_id;
 797                 struct acpi_device_info *info;
 798                 acpi_status status;
 799 
 800                 acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
 801                 if (!acpi_id) {
 802                         dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
 803                         version = XGENE_AHCI_V1;
 804                 } else if (acpi_id->driver_data) {
 805                         version = (enum xgene_ahci_version) acpi_id->driver_data;
 806                         status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
 807                         if (ACPI_FAILURE(status)) {
 808                                 dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
 809                                         __func__);
 810                                 version = XGENE_AHCI_V1;
 811                         } else {
 812                                 if (info->valid & ACPI_VALID_CID)
 813                                         version = XGENE_AHCI_V2;
 814                                 kfree(info);
 815                         }
 816                 }
 817         }
 818 #endif
 819 
 820         dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
 821                 hpriv->mmio);
 822 
 823         /* Select ATA */
 824         if ((rc = xgene_ahci_mux_select(ctx))) {
 825                 dev_err(dev, "SATA mux selection failed error %d\n", rc);
 826                 return -ENODEV;
 827         }
 828 
 829         if (xgene_ahci_is_memram_inited(ctx)) {
 830                 dev_info(dev, "skip clock and PHY initialization\n");
 831                 goto skip_clk_phy;
 832         }
 833 
 834         /* Due to errata, HW requires full toggle transition */
 835         rc = ahci_platform_enable_clks(hpriv);
 836         if (rc)
 837                 goto disable_resources;
 838         ahci_platform_disable_clks(hpriv);
 839 
 840         rc = ahci_platform_enable_resources(hpriv);
 841         if (rc)
 842                 goto disable_resources;
 843 
 844         /* Configure the host controller */
 845         xgene_ahci_hw_init(hpriv);
 846 skip_clk_phy:
 847 
 848         switch (version) {
 849         case XGENE_AHCI_V1:
 850                 hpriv->flags = AHCI_HFLAG_NO_NCQ;
 851                 break;
 852         case XGENE_AHCI_V2:
 853                 hpriv->flags |= AHCI_HFLAG_YES_FBS;
 854                 hpriv->irq_handler = xgene_ahci_irq_intr;
 855                 break;
 856         default:
 857                 break;
 858         }
 859 
 860         rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
 861                                      &ahci_platform_sht);
 862         if (rc)
 863                 goto disable_resources;
 864 
 865         dev_dbg(dev, "X-Gene SATA host controller initialized\n");
 866         return 0;
 867 
 868 disable_resources:
 869         ahci_platform_disable_resources(hpriv);
 870         return rc;
 871 }
 872 
 873 static struct platform_driver xgene_ahci_driver = {
 874         .probe = xgene_ahci_probe,
 875         .remove = ata_platform_remove_one,
 876         .driver = {
 877                 .name = DRV_NAME,
 878                 .of_match_table = xgene_ahci_of_match,
 879                 .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
 880         },
 881 };
 882 
 883 module_platform_driver(xgene_ahci_driver);
 884 
 885 MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
 886 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
 887 MODULE_LICENSE("GPL");
 888 MODULE_VERSION("0.4");

/* [<][>][^][v][top][bottom][index][help] */