1/* 2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 15#include <linux/time.h> 16#include <linux/of.h> 17#include <linux/platform_device.h> 18#include <linux/phy/phy.h> 19 20#include <linux/phy/phy-qcom-ufs.h> 21#include "ufshcd.h" 22#include "ufshcd-pltfrm.h" 23#include "unipro.h" 24#include "ufs-qcom.h" 25#include "ufshci.h" 26#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ 27 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 28 29enum { 30 TSTBUS_UAWM, 31 TSTBUS_UARM, 32 TSTBUS_TXUC, 33 TSTBUS_RXUC, 34 TSTBUS_DFC, 35 TSTBUS_TRLUT, 36 TSTBUS_TMRLUT, 37 TSTBUS_OCSC, 38 TSTBUS_UTP_HCI, 39 TSTBUS_COMBINED, 40 TSTBUS_WRAPPER, 41 TSTBUS_UNIPRO, 42 TSTBUS_MAX, 43}; 44 45static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; 46 47static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); 48static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); 49static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 50 u32 clk_cycles); 51 52static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, 53 char *prefix) 54{ 55 print_hex_dump(KERN_ERR, prefix, 56 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, 57 16, 4, (void __force *)hba->mmio_base + offset, 58 len * 4, false); 59} 60 61static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) 62{ 63 int err = 0; 64 65 err = ufshcd_dme_get(hba, 66 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); 67 if (err) 68 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", 69 __func__, err); 70 71 return err; 72} 73 74static int ufs_qcom_host_clk_get(struct device *dev, 75 const char *name, struct clk **clk_out) 76{ 77 struct clk *clk; 78 int err = 0; 79 80 clk = devm_clk_get(dev, name); 81 if (IS_ERR(clk)) { 82 err = PTR_ERR(clk); 83 dev_err(dev, "%s: failed to get %s err %d", 84 __func__, name, err); 85 } else { 86 *clk_out = clk; 87 } 88 89 return err; 90} 91 92static int ufs_qcom_host_clk_enable(struct device *dev, 93 const char *name, struct clk *clk) 94{ 95 int err = 0; 96 97 err = clk_prepare_enable(clk); 98 if (err) 99 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); 100 101 return err; 102} 103 104static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) 105{ 106 if (!host->is_lane_clks_enabled) 107 return; 108 109 clk_disable_unprepare(host->tx_l1_sync_clk); 110 clk_disable_unprepare(host->tx_l0_sync_clk); 111 clk_disable_unprepare(host->rx_l1_sync_clk); 112 clk_disable_unprepare(host->rx_l0_sync_clk); 113 114 host->is_lane_clks_enabled = false; 115} 116 117static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) 118{ 119 int err = 0; 120 struct device *dev = host->hba->dev; 121 122 if (host->is_lane_clks_enabled) 123 return 0; 124 125 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", 126 host->rx_l0_sync_clk); 127 if (err) 128 goto out; 129 130 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", 131 host->tx_l0_sync_clk); 132 if (err) 133 goto disable_rx_l0; 134 135 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", 136 host->rx_l1_sync_clk); 137 if (err) 138 goto disable_tx_l0; 139 140 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 141 host->tx_l1_sync_clk); 142 if (err) 143 goto disable_rx_l1; 144 145 host->is_lane_clks_enabled = true; 146 goto out; 147 148disable_rx_l1: 149 clk_disable_unprepare(host->rx_l1_sync_clk); 150disable_tx_l0: 151 clk_disable_unprepare(host->tx_l0_sync_clk); 152disable_rx_l0: 153 clk_disable_unprepare(host->rx_l0_sync_clk); 154out: 155 return err; 156} 157 158static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) 159{ 160 int err = 0; 161 struct device *dev = host->hba->dev; 162 163 err = ufs_qcom_host_clk_get(dev, 164 "rx_lane0_sync_clk", &host->rx_l0_sync_clk); 165 if (err) 166 goto out; 167 168 err = ufs_qcom_host_clk_get(dev, 169 "tx_lane0_sync_clk", &host->tx_l0_sync_clk); 170 if (err) 171 goto out; 172 173 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 174 &host->rx_l1_sync_clk); 175 if (err) 176 goto out; 177 178 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 179 &host->tx_l1_sync_clk); 180 181out: 182 return err; 183} 184 185static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) 186{ 187 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 188 struct phy *phy = host->generic_phy; 189 u32 tx_lanes; 190 int err = 0; 191 192 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); 193 if (err) 194 goto out; 195 196 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); 197 if (err) 198 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", 199 __func__); 200 201out: 202 return err; 203} 204 205static int ufs_qcom_check_hibern8(struct ufs_hba *hba) 206{ 207 int err; 208 u32 tx_fsm_val = 0; 209 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); 210 211 do { 212 err = ufshcd_dme_get(hba, 213 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 214 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 215 &tx_fsm_val); 216 if (err || tx_fsm_val == TX_FSM_HIBERN8) 217 break; 218 219 /* sleep for max. 200us */ 220 usleep_range(100, 200); 221 } while (time_before(jiffies, timeout)); 222 223 /* 224 * we might have scheduled out for long during polling so 225 * check the state again. 226 */ 227 if (time_after(jiffies, timeout)) 228 err = ufshcd_dme_get(hba, 229 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 230 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 231 &tx_fsm_val); 232 233 if (err) { 234 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", 235 __func__, err); 236 } else if (tx_fsm_val != TX_FSM_HIBERN8) { 237 err = tx_fsm_val; 238 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", 239 __func__, err); 240 } 241 242 return err; 243} 244 245static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) 246{ 247 ufshcd_rmwl(host->hba, QUNIPRO_SEL, 248 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, 249 REG_UFS_CFG1); 250 /* make sure above configuration is applied before we return */ 251 mb(); 252} 253 254static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) 255{ 256 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 257 struct phy *phy = host->generic_phy; 258 int ret = 0; 259 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) 260 ? true : false; 261 262 /* Assert PHY reset and apply PHY calibration values */ 263 ufs_qcom_assert_reset(hba); 264 /* provide 1ms delay to let the reset pulse propagate */ 265 usleep_range(1000, 1100); 266 267 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); 268 269 if (ret) { 270 dev_err(hba->dev, 271 "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", 272 __func__, ret); 273 goto out; 274 } 275 276 /* De-assert PHY reset and start serdes */ 277 ufs_qcom_deassert_reset(hba); 278 279 /* 280 * after reset deassertion, phy will need all ref clocks, 281 * voltage, current to settle down before starting serdes. 282 */ 283 usleep_range(1000, 1100); 284 ret = ufs_qcom_phy_start_serdes(phy); 285 if (ret) { 286 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n", 287 __func__, ret); 288 goto out; 289 } 290 291 ret = ufs_qcom_phy_is_pcs_ready(phy); 292 if (ret) 293 dev_err(hba->dev, 294 "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", 295 __func__, ret); 296 297 ufs_qcom_select_unipro_mode(host); 298 299out: 300 return ret; 301} 302 303/* 304 * The UTP controller has a number of internal clock gating cells (CGCs). 305 * Internal hardware sub-modules within the UTP controller control the CGCs. 306 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved 307 * in a specific operation, UTP controller CGCs are by default disabled and 308 * this function enables them (after every UFS link startup) to save some power 309 * leakage. 310 */ 311static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) 312{ 313 ufshcd_writel(hba, 314 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, 315 REG_UFS_CFG2); 316 317 /* Ensure that HW clock gating is enabled before next operations */ 318 mb(); 319} 320 321static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, 322 enum ufs_notify_change_status status) 323{ 324 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 325 int err = 0; 326 327 switch (status) { 328 case PRE_CHANGE: 329 ufs_qcom_power_up_sequence(hba); 330 /* 331 * The PHY PLL output is the source of tx/rx lane symbol 332 * clocks, hence, enable the lane clocks only after PHY 333 * is initialized. 334 */ 335 err = ufs_qcom_enable_lane_clks(host); 336 break; 337 case POST_CHANGE: 338 /* check if UFS PHY moved from DISABLED to HIBERN8 */ 339 err = ufs_qcom_check_hibern8(hba); 340 ufs_qcom_enable_hw_clk_gating(hba); 341 342 break; 343 default: 344 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); 345 err = -EINVAL; 346 break; 347 } 348 return err; 349} 350 351/** 352 * Returns zero for success and non-zero in case of a failure 353 */ 354static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, 355 u32 hs, u32 rate, bool update_link_startup_timer) 356{ 357 int ret = 0; 358 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 359 struct ufs_clk_info *clki; 360 u32 core_clk_period_in_ns; 361 u32 tx_clk_cycles_per_us = 0; 362 unsigned long core_clk_rate = 0; 363 u32 core_clk_cycles_per_us = 0; 364 365 static u32 pwm_fr_table[][2] = { 366 {UFS_PWM_G1, 0x1}, 367 {UFS_PWM_G2, 0x1}, 368 {UFS_PWM_G3, 0x1}, 369 {UFS_PWM_G4, 0x1}, 370 }; 371 372 static u32 hs_fr_table_rA[][2] = { 373 {UFS_HS_G1, 0x1F}, 374 {UFS_HS_G2, 0x3e}, 375 {UFS_HS_G3, 0x7D}, 376 }; 377 378 static u32 hs_fr_table_rB[][2] = { 379 {UFS_HS_G1, 0x24}, 380 {UFS_HS_G2, 0x49}, 381 {UFS_HS_G3, 0x92}, 382 }; 383 384 /* 385 * The Qunipro controller does not use following registers: 386 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & 387 * UFS_REG_PA_LINK_STARTUP_TIMER 388 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt 389 * Aggregation logic. 390 */ 391 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) 392 goto out; 393 394 if (gear == 0) { 395 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); 396 goto out_error; 397 } 398 399 list_for_each_entry(clki, &hba->clk_list_head, list) { 400 if (!strcmp(clki->name, "core_clk")) 401 core_clk_rate = clk_get_rate(clki->clk); 402 } 403 404 /* If frequency is smaller than 1MHz, set to 1MHz */ 405 if (core_clk_rate < DEFAULT_CLK_RATE_HZ) 406 core_clk_rate = DEFAULT_CLK_RATE_HZ; 407 408 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; 409 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { 410 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); 411 /* 412 * make sure above write gets applied before we return from 413 * this function. 414 */ 415 mb(); 416 } 417 418 if (ufs_qcom_cap_qunipro(host)) 419 goto out; 420 421 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; 422 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; 423 core_clk_period_in_ns &= MASK_CLK_NS_REG; 424 425 switch (hs) { 426 case FASTAUTO_MODE: 427 case FAST_MODE: 428 if (rate == PA_HS_MODE_A) { 429 if (gear > ARRAY_SIZE(hs_fr_table_rA)) { 430 dev_err(hba->dev, 431 "%s: index %d exceeds table size %zu\n", 432 __func__, gear, 433 ARRAY_SIZE(hs_fr_table_rA)); 434 goto out_error; 435 } 436 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; 437 } else if (rate == PA_HS_MODE_B) { 438 if (gear > ARRAY_SIZE(hs_fr_table_rB)) { 439 dev_err(hba->dev, 440 "%s: index %d exceeds table size %zu\n", 441 __func__, gear, 442 ARRAY_SIZE(hs_fr_table_rB)); 443 goto out_error; 444 } 445 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; 446 } else { 447 dev_err(hba->dev, "%s: invalid rate = %d\n", 448 __func__, rate); 449 goto out_error; 450 } 451 break; 452 case SLOWAUTO_MODE: 453 case SLOW_MODE: 454 if (gear > ARRAY_SIZE(pwm_fr_table)) { 455 dev_err(hba->dev, 456 "%s: index %d exceeds table size %zu\n", 457 __func__, gear, 458 ARRAY_SIZE(pwm_fr_table)); 459 goto out_error; 460 } 461 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; 462 break; 463 case UNCHANGED: 464 default: 465 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); 466 goto out_error; 467 } 468 469 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != 470 (core_clk_period_in_ns | tx_clk_cycles_per_us)) { 471 /* this register 2 fields shall be written at once */ 472 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, 473 REG_UFS_TX_SYMBOL_CLK_NS_US); 474 /* 475 * make sure above write gets applied before we return from 476 * this function. 477 */ 478 mb(); 479 } 480 481 if (update_link_startup_timer) { 482 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), 483 REG_UFS_PA_LINK_STARTUP_TIMER); 484 /* 485 * make sure that this configuration is applied before 486 * we return 487 */ 488 mb(); 489 } 490 goto out; 491 492out_error: 493 ret = -EINVAL; 494out: 495 return ret; 496} 497 498static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, 499 enum ufs_notify_change_status status) 500{ 501 int err = 0; 502 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 503 504 switch (status) { 505 case PRE_CHANGE: 506 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 507 0, true)) { 508 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 509 __func__); 510 err = -EINVAL; 511 goto out; 512 } 513 514 if (ufs_qcom_cap_qunipro(host)) 515 /* 516 * set unipro core clock cycles to 150 & clear clock 517 * divider 518 */ 519 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 520 150); 521 522 break; 523 case POST_CHANGE: 524 ufs_qcom_link_startup_post_change(hba); 525 break; 526 default: 527 break; 528 } 529 530out: 531 return err; 532} 533 534static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 535{ 536 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 537 struct phy *phy = host->generic_phy; 538 int ret = 0; 539 540 if (ufs_qcom_is_link_off(hba)) { 541 /* 542 * Disable the tx/rx lane symbol clocks before PHY is 543 * powered down as the PLL source should be disabled 544 * after downstream clocks are disabled. 545 */ 546 ufs_qcom_disable_lane_clks(host); 547 phy_power_off(phy); 548 549 /* Assert PHY soft reset */ 550 ufs_qcom_assert_reset(hba); 551 goto out; 552 } 553 554 /* 555 * If UniPro link is not active, PHY ref_clk, main PHY analog power 556 * rail and low noise analog power rail for PLL can be switched off. 557 */ 558 if (!ufs_qcom_is_link_active(hba)) { 559 ufs_qcom_disable_lane_clks(host); 560 phy_power_off(phy); 561 } 562 563out: 564 return ret; 565} 566 567static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 568{ 569 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 570 struct phy *phy = host->generic_phy; 571 int err; 572 573 err = phy_power_on(phy); 574 if (err) { 575 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", 576 __func__, err); 577 goto out; 578 } 579 580 err = ufs_qcom_enable_lane_clks(host); 581 if (err) 582 goto out; 583 584 hba->is_sys_suspended = false; 585 586out: 587 return err; 588} 589 590struct ufs_qcom_dev_params { 591 u32 pwm_rx_gear; /* pwm rx gear to work in */ 592 u32 pwm_tx_gear; /* pwm tx gear to work in */ 593 u32 hs_rx_gear; /* hs rx gear to work in */ 594 u32 hs_tx_gear; /* hs tx gear to work in */ 595 u32 rx_lanes; /* number of rx lanes */ 596 u32 tx_lanes; /* number of tx lanes */ 597 u32 rx_pwr_pwm; /* rx pwm working pwr */ 598 u32 tx_pwr_pwm; /* tx pwm working pwr */ 599 u32 rx_pwr_hs; /* rx hs working pwr */ 600 u32 tx_pwr_hs; /* tx hs working pwr */ 601 u32 hs_rate; /* rate A/B to work in HS */ 602 u32 desired_working_mode; 603}; 604 605static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, 606 struct ufs_pa_layer_attr *dev_max, 607 struct ufs_pa_layer_attr *agreed_pwr) 608{ 609 int min_qcom_gear; 610 int min_dev_gear; 611 bool is_dev_sup_hs = false; 612 bool is_qcom_max_hs = false; 613 614 if (dev_max->pwr_rx == FAST_MODE) 615 is_dev_sup_hs = true; 616 617 if (qcom_param->desired_working_mode == FAST) { 618 is_qcom_max_hs = true; 619 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear, 620 qcom_param->hs_tx_gear); 621 } else { 622 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear, 623 qcom_param->pwm_tx_gear); 624 } 625 626 /* 627 * device doesn't support HS but qcom_param->desired_working_mode is 628 * HS, thus device and qcom_param don't agree 629 */ 630 if (!is_dev_sup_hs && is_qcom_max_hs) { 631 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n", 632 __func__); 633 return -ENOTSUPP; 634 } else if (is_dev_sup_hs && is_qcom_max_hs) { 635 /* 636 * since device supports HS, it supports FAST_MODE. 637 * since qcom_param->desired_working_mode is also HS 638 * then final decision (FAST/FASTAUTO) is done according 639 * to qcom_params as it is the restricting factor 640 */ 641 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = 642 qcom_param->rx_pwr_hs; 643 } else { 644 /* 645 * here qcom_param->desired_working_mode is PWM. 646 * it doesn't matter whether device supports HS or PWM, 647 * in both cases qcom_param->desired_working_mode will 648 * determine the mode 649 */ 650 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = 651 qcom_param->rx_pwr_pwm; 652 } 653 654 /* 655 * we would like tx to work in the minimum number of lanes 656 * between device capability and vendor preferences. 657 * the same decision will be made for rx 658 */ 659 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, 660 qcom_param->tx_lanes); 661 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, 662 qcom_param->rx_lanes); 663 664 /* device maximum gear is the minimum between device rx and tx gears */ 665 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); 666 667 /* 668 * if both device capabilities and vendor pre-defined preferences are 669 * both HS or both PWM then set the minimum gear to be the chosen 670 * working gear. 671 * if one is PWM and one is HS then the one that is PWM get to decide 672 * what is the gear, as it is the one that also decided previously what 673 * pwr the device will be configured to. 674 */ 675 if ((is_dev_sup_hs && is_qcom_max_hs) || 676 (!is_dev_sup_hs && !is_qcom_max_hs)) 677 agreed_pwr->gear_rx = agreed_pwr->gear_tx = 678 min_t(u32, min_dev_gear, min_qcom_gear); 679 else if (!is_dev_sup_hs) 680 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear; 681 else 682 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear; 683 684 agreed_pwr->hs_rate = qcom_param->hs_rate; 685 return 0; 686} 687 688#ifdef CONFIG_MSM_BUS_SCALING 689static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, 690 const char *speed_mode) 691{ 692 struct device *dev = host->hba->dev; 693 struct device_node *np = dev->of_node; 694 int err; 695 const char *key = "qcom,bus-vector-names"; 696 697 if (!speed_mode) { 698 err = -EINVAL; 699 goto out; 700 } 701 702 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) 703 err = of_property_match_string(np, key, "MAX"); 704 else 705 err = of_property_match_string(np, key, speed_mode); 706 707out: 708 if (err < 0) 709 dev_err(dev, "%s: Invalid %s mode %d\n", 710 __func__, speed_mode, err); 711 return err; 712} 713 714static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) 715{ 716 int gear = max_t(u32, p->gear_rx, p->gear_tx); 717 int lanes = max_t(u32, p->lane_rx, p->lane_tx); 718 int pwr; 719 720 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ 721 if (!gear) 722 gear = 1; 723 724 if (!lanes) 725 lanes = 1; 726 727 if (!p->pwr_rx && !p->pwr_tx) { 728 pwr = SLOWAUTO_MODE; 729 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); 730 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || 731 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { 732 pwr = FAST_MODE; 733 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", 734 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); 735 } else { 736 pwr = SLOW_MODE; 737 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", 738 "PWM", gear, lanes); 739 } 740} 741 742static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) 743{ 744 int err = 0; 745 746 if (vote != host->bus_vote.curr_vote) { 747 err = msm_bus_scale_client_update_request( 748 host->bus_vote.client_handle, vote); 749 if (err) { 750 dev_err(host->hba->dev, 751 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n", 752 __func__, host->bus_vote.client_handle, 753 vote, err); 754 goto out; 755 } 756 757 host->bus_vote.curr_vote = vote; 758 } 759out: 760 return err; 761} 762 763static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) 764{ 765 int vote; 766 int err = 0; 767 char mode[BUS_VECTOR_NAME_LEN]; 768 769 ufs_qcom_get_speed_mode(&host->dev_req_params, mode); 770 771 vote = ufs_qcom_get_bus_vote(host, mode); 772 if (vote >= 0) 773 err = ufs_qcom_set_bus_vote(host, vote); 774 else 775 err = vote; 776 777 if (err) 778 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); 779 else 780 host->bus_vote.saved_vote = vote; 781 return err; 782} 783 784static ssize_t 785show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, 786 char *buf) 787{ 788 struct ufs_hba *hba = dev_get_drvdata(dev); 789 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 790 791 return snprintf(buf, PAGE_SIZE, "%u\n", 792 host->bus_vote.is_max_bw_needed); 793} 794 795static ssize_t 796store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, 797 const char *buf, size_t count) 798{ 799 struct ufs_hba *hba = dev_get_drvdata(dev); 800 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 801 uint32_t value; 802 803 if (!kstrtou32(buf, 0, &value)) { 804 host->bus_vote.is_max_bw_needed = !!value; 805 ufs_qcom_update_bus_bw_vote(host); 806 } 807 808 return count; 809} 810 811static int ufs_qcom_bus_register(struct ufs_qcom_host *host) 812{ 813 int err; 814 struct msm_bus_scale_pdata *bus_pdata; 815 struct device *dev = host->hba->dev; 816 struct platform_device *pdev = to_platform_device(dev); 817 struct device_node *np = dev->of_node; 818 819 bus_pdata = msm_bus_cl_get_pdata(pdev); 820 if (!bus_pdata) { 821 dev_err(dev, "%s: failed to get bus vectors\n", __func__); 822 err = -ENODATA; 823 goto out; 824 } 825 826 err = of_property_count_strings(np, "qcom,bus-vector-names"); 827 if (err < 0 || err != bus_pdata->num_usecases) { 828 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", 829 __func__, err); 830 goto out; 831 } 832 833 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata); 834 if (!host->bus_vote.client_handle) { 835 dev_err(dev, "%s: msm_bus_scale_register_client failed\n", 836 __func__); 837 err = -EFAULT; 838 goto out; 839 } 840 841 /* cache the vote index for minimum and maximum bandwidth */ 842 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); 843 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); 844 845 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; 846 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; 847 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); 848 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; 849 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; 850 err = device_create_file(dev, &host->bus_vote.max_bus_bw); 851out: 852 return err; 853} 854#else /* CONFIG_MSM_BUS_SCALING */ 855static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) 856{ 857 return 0; 858} 859 860static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) 861{ 862 return 0; 863} 864 865static int ufs_qcom_bus_register(struct ufs_qcom_host *host) 866{ 867 return 0; 868} 869#endif /* CONFIG_MSM_BUS_SCALING */ 870 871static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) 872{ 873 if (host->dev_ref_clk_ctrl_mmio && 874 (enable ^ host->is_dev_ref_clk_enabled)) { 875 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); 876 877 if (enable) 878 temp |= host->dev_ref_clk_en_mask; 879 else 880 temp &= ~host->dev_ref_clk_en_mask; 881 882 /* 883 * If we are here to disable this clock it might be immediately 884 * after entering into hibern8 in which case we need to make 885 * sure that device ref_clk is active at least 1us after the 886 * hibern8 enter. 887 */ 888 if (!enable) 889 udelay(1); 890 891 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); 892 893 /* ensure that ref_clk is enabled/disabled before we return */ 894 wmb(); 895 896 /* 897 * If we call hibern8 exit after this, we need to make sure that 898 * device ref_clk is stable for at least 1us before the hibern8 899 * exit command. 900 */ 901 if (enable) 902 udelay(1); 903 904 host->is_dev_ref_clk_enabled = enable; 905 } 906} 907 908static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 909 enum ufs_notify_change_status status, 910 struct ufs_pa_layer_attr *dev_max_params, 911 struct ufs_pa_layer_attr *dev_req_params) 912{ 913 u32 val; 914 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 915 struct phy *phy = host->generic_phy; 916 struct ufs_qcom_dev_params ufs_qcom_cap; 917 int ret = 0; 918 int res = 0; 919 920 if (!dev_req_params) { 921 pr_err("%s: incoming dev_req_params is NULL\n", __func__); 922 ret = -EINVAL; 923 goto out; 924 } 925 926 switch (status) { 927 case PRE_CHANGE: 928 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX; 929 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX; 930 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX; 931 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX; 932 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX; 933 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX; 934 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM; 935 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM; 936 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS; 937 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS; 938 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; 939 ufs_qcom_cap.desired_working_mode = 940 UFS_QCOM_LIMIT_DESIRED_MODE; 941 942 if (host->hw_ver.major == 0x1) { 943 /* 944 * HS-G3 operations may not reliably work on legacy QCOM 945 * UFS host controller hardware even though capability 946 * exchange during link startup phase may end up 947 * negotiating maximum supported gear as G3. 948 * Hence downgrade the maximum supported gear to HS-G2. 949 */ 950 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) 951 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; 952 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) 953 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; 954 } 955 956 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, 957 dev_max_params, 958 dev_req_params); 959 if (ret) { 960 pr_err("%s: failed to determine capabilities\n", 961 __func__); 962 goto out; 963 } 964 965 break; 966 case POST_CHANGE: 967 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, 968 dev_req_params->pwr_rx, 969 dev_req_params->hs_rate, false)) { 970 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 971 __func__); 972 /* 973 * we return error code at the end of the routine, 974 * but continue to configure UFS_PHY_TX_LANE_ENABLE 975 * and bus voting as usual 976 */ 977 ret = -EINVAL; 978 } 979 980 val = ~(MAX_U32 << dev_req_params->lane_tx); 981 res = ufs_qcom_phy_set_tx_lane_enable(phy, val); 982 if (res) { 983 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n", 984 __func__, res); 985 ret = res; 986 } 987 988 /* cache the power mode parameters to use internally */ 989 memcpy(&host->dev_req_params, 990 dev_req_params, sizeof(*dev_req_params)); 991 ufs_qcom_update_bus_bw_vote(host); 992 break; 993 default: 994 ret = -EINVAL; 995 break; 996 } 997out: 998 return ret; 999} 1000 1001static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) 1002{ 1003 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1004 1005 if (host->hw_ver.major == 0x1) 1006 return UFSHCI_VERSION_11; 1007 else 1008 return UFSHCI_VERSION_20; 1009} 1010 1011/** 1012 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks 1013 * @hba: host controller instance 1014 * 1015 * QCOM UFS host controller might have some non standard behaviours (quirks) 1016 * than what is specified by UFSHCI specification. Advertise all such 1017 * quirks to standard UFS host controller driver so standard takes them into 1018 * account. 1019 */ 1020static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) 1021{ 1022 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1023 1024 if (host->hw_ver.major == 0x01) { 1025 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 1026 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 1027 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; 1028 1029 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) 1030 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; 1031 1032 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; 1033 } 1034 1035 if (host->hw_ver.major >= 0x2) { 1036 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; 1037 1038 if (!ufs_qcom_cap_qunipro(host)) 1039 /* Legacy UniPro mode still need following quirks */ 1040 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 1041 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 1042 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); 1043 } 1044} 1045 1046static void ufs_qcom_set_caps(struct ufs_hba *hba) 1047{ 1048 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1049 1050 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 1051 hba->caps |= UFSHCD_CAP_CLK_SCALING; 1052 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 1053 1054 if (host->hw_ver.major >= 0x2) { 1055 host->caps = UFS_QCOM_CAP_QUNIPRO | 1056 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; 1057 } 1058} 1059 1060/** 1061 * ufs_qcom_setup_clocks - enables/disable clocks 1062 * @hba: host controller instance 1063 * @on: If true, enable clocks else disable them. 1064 * 1065 * Returns 0 on success, non-zero on failure. 1066 */ 1067static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) 1068{ 1069 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1070 int err; 1071 int vote = 0; 1072 1073 /* 1074 * In case ufs_qcom_init() is not yet done, simply ignore. 1075 * This ufs_qcom_setup_clocks() shall be called from 1076 * ufs_qcom_init() after init is done. 1077 */ 1078 if (!host) 1079 return 0; 1080 1081 if (on) { 1082 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); 1083 if (err) 1084 goto out; 1085 1086 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy); 1087 if (err) { 1088 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n", 1089 __func__, err); 1090 ufs_qcom_phy_disable_iface_clk(host->generic_phy); 1091 goto out; 1092 } 1093 vote = host->bus_vote.saved_vote; 1094 if (vote == host->bus_vote.min_bw_vote) 1095 ufs_qcom_update_bus_bw_vote(host); 1096 1097 } else { 1098 1099 /* M-PHY RMMI interface clocks can be turned off */ 1100 ufs_qcom_phy_disable_iface_clk(host->generic_phy); 1101 if (!ufs_qcom_is_link_active(hba)) 1102 /* disable device ref_clk */ 1103 ufs_qcom_dev_ref_clk_ctrl(host, false); 1104 1105 vote = host->bus_vote.min_bw_vote; 1106 } 1107 1108 err = ufs_qcom_set_bus_vote(host, vote); 1109 if (err) 1110 dev_err(hba->dev, "%s: set bus vote failed %d\n", 1111 __func__, err); 1112 1113out: 1114 return err; 1115} 1116 1117#define ANDROID_BOOT_DEV_MAX 30 1118static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; 1119 1120#ifndef MODULE 1121static int __init get_android_boot_dev(char *str) 1122{ 1123 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); 1124 return 1; 1125} 1126__setup("androidboot.bootdevice=", get_android_boot_dev); 1127#endif 1128 1129/** 1130 * ufs_qcom_init - bind phy with controller 1131 * @hba: host controller instance 1132 * 1133 * Binds PHY with controller and powers up PHY enabling clocks 1134 * and regulators. 1135 * 1136 * Returns -EPROBE_DEFER if binding fails, returns negative error 1137 * on phy power up failure and returns zero on success. 1138 */ 1139static int ufs_qcom_init(struct ufs_hba *hba) 1140{ 1141 int err; 1142 struct device *dev = hba->dev; 1143 struct platform_device *pdev = to_platform_device(dev); 1144 struct ufs_qcom_host *host; 1145 struct resource *res; 1146 1147 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) 1148 return -ENODEV; 1149 1150 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 1151 if (!host) { 1152 err = -ENOMEM; 1153 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); 1154 goto out; 1155 } 1156 1157 /* Make a two way bind between the qcom host and the hba */ 1158 host->hba = hba; 1159 ufshcd_set_variant(hba, host); 1160 1161 /* 1162 * voting/devoting device ref_clk source is time consuming hence 1163 * skip devoting it during aggressive clock gating. This clock 1164 * will still be gated off during runtime suspend. 1165 */ 1166 host->generic_phy = devm_phy_get(dev, "ufsphy"); 1167 1168 if (IS_ERR(host->generic_phy)) { 1169 err = PTR_ERR(host->generic_phy); 1170 dev_err(dev, "%s: PHY get failed %d\n", __func__, err); 1171 goto out; 1172 } 1173 1174 err = ufs_qcom_bus_register(host); 1175 if (err) 1176 goto out_host_free; 1177 1178 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1179 &host->hw_ver.minor, &host->hw_ver.step); 1180 1181 /* 1182 * for newer controllers, device reference clock control bit has 1183 * moved inside UFS controller register address space itself. 1184 */ 1185 if (host->hw_ver.major >= 0x02) { 1186 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; 1187 host->dev_ref_clk_en_mask = BIT(26); 1188 } else { 1189 /* "dev_ref_clk_ctrl_mem" is optional resource */ 1190 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1191 if (res) { 1192 host->dev_ref_clk_ctrl_mmio = 1193 devm_ioremap_resource(dev, res); 1194 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) { 1195 dev_warn(dev, 1196 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n", 1197 __func__, 1198 PTR_ERR(host->dev_ref_clk_ctrl_mmio)); 1199 host->dev_ref_clk_ctrl_mmio = NULL; 1200 } 1201 host->dev_ref_clk_en_mask = BIT(5); 1202 } 1203 } 1204 1205 /* update phy revision information before calling phy_init() */ 1206 ufs_qcom_phy_save_controller_version(host->generic_phy, 1207 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); 1208 1209 phy_init(host->generic_phy); 1210 err = phy_power_on(host->generic_phy); 1211 if (err) 1212 goto out_unregister_bus; 1213 1214 err = ufs_qcom_init_lane_clks(host); 1215 if (err) 1216 goto out_disable_phy; 1217 1218 ufs_qcom_set_caps(hba); 1219 ufs_qcom_advertise_quirks(hba); 1220 1221 ufs_qcom_setup_clocks(hba, true); 1222 1223 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) 1224 ufs_qcom_hosts[hba->dev->id] = host; 1225 1226 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; 1227 ufs_qcom_get_default_testbus_cfg(host); 1228 err = ufs_qcom_testbus_config(host); 1229 if (err) { 1230 dev_warn(dev, "%s: failed to configure the testbus %d\n", 1231 __func__, err); 1232 err = 0; 1233 } 1234 1235 goto out; 1236 1237out_disable_phy: 1238 phy_power_off(host->generic_phy); 1239out_unregister_bus: 1240 phy_exit(host->generic_phy); 1241out_host_free: 1242 devm_kfree(dev, host); 1243 ufshcd_set_variant(hba, NULL); 1244out: 1245 return err; 1246} 1247 1248static void ufs_qcom_exit(struct ufs_hba *hba) 1249{ 1250 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1251 1252 ufs_qcom_disable_lane_clks(host); 1253 phy_power_off(host->generic_phy); 1254} 1255 1256static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 1257 u32 clk_cycles) 1258{ 1259 int err; 1260 u32 core_clk_ctrl_reg; 1261 1262 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) 1263 return -EINVAL; 1264 1265 err = ufshcd_dme_get(hba, 1266 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1267 &core_clk_ctrl_reg); 1268 if (err) 1269 goto out; 1270 1271 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; 1272 core_clk_ctrl_reg |= clk_cycles; 1273 1274 /* Clear CORE_CLK_DIV_EN */ 1275 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1276 1277 err = ufshcd_dme_set(hba, 1278 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1279 core_clk_ctrl_reg); 1280out: 1281 return err; 1282} 1283 1284static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) 1285{ 1286 /* nothing to do as of now */ 1287 return 0; 1288} 1289 1290static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) 1291{ 1292 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1293 1294 if (!ufs_qcom_cap_qunipro(host)) 1295 return 0; 1296 1297 /* set unipro core clock cycles to 150 and clear clock divider */ 1298 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); 1299} 1300 1301static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) 1302{ 1303 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1304 int err; 1305 u32 core_clk_ctrl_reg; 1306 1307 if (!ufs_qcom_cap_qunipro(host)) 1308 return 0; 1309 1310 err = ufshcd_dme_get(hba, 1311 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1312 &core_clk_ctrl_reg); 1313 1314 /* make sure CORE_CLK_DIV_EN is cleared */ 1315 if (!err && 1316 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { 1317 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1318 err = ufshcd_dme_set(hba, 1319 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1320 core_clk_ctrl_reg); 1321 } 1322 1323 return err; 1324} 1325 1326static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) 1327{ 1328 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1329 1330 if (!ufs_qcom_cap_qunipro(host)) 1331 return 0; 1332 1333 /* set unipro core clock cycles to 75 and clear clock divider */ 1334 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); 1335} 1336 1337static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, 1338 bool scale_up, enum ufs_notify_change_status status) 1339{ 1340 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1341 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; 1342 int err = 0; 1343 1344 if (status == PRE_CHANGE) { 1345 if (scale_up) 1346 err = ufs_qcom_clk_scale_up_pre_change(hba); 1347 else 1348 err = ufs_qcom_clk_scale_down_pre_change(hba); 1349 } else { 1350 if (scale_up) 1351 err = ufs_qcom_clk_scale_up_post_change(hba); 1352 else 1353 err = ufs_qcom_clk_scale_down_post_change(hba); 1354 1355 if (err || !dev_req_params) 1356 goto out; 1357 1358 ufs_qcom_cfg_timers(hba, 1359 dev_req_params->gear_rx, 1360 dev_req_params->pwr_rx, 1361 dev_req_params->hs_rate, 1362 false); 1363 ufs_qcom_update_bus_bw_vote(host); 1364 } 1365 1366out: 1367 return err; 1368} 1369 1370static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1371{ 1372 /* provide a legal default configuration */ 1373 host->testbus.select_major = TSTBUS_UAWM; 1374 host->testbus.select_minor = 1; 1375} 1376 1377static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) 1378{ 1379 if (host->testbus.select_major >= TSTBUS_MAX) { 1380 dev_err(host->hba->dev, 1381 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", 1382 __func__, host->testbus.select_major); 1383 return false; 1384 } 1385 1386 /* 1387 * Not performing check for each individual select_major 1388 * mappings of select_minor, since there is no harm in 1389 * configuring a non-existent select_minor 1390 */ 1391 if (host->testbus.select_minor > 0x1F) { 1392 dev_err(host->hba->dev, 1393 "%s: 0x%05X is not a legal testbus option\n", 1394 __func__, host->testbus.select_minor); 1395 return false; 1396 } 1397 1398 return true; 1399} 1400 1401int ufs_qcom_testbus_config(struct ufs_qcom_host *host) 1402{ 1403 int reg; 1404 int offset; 1405 u32 mask = TEST_BUS_SUB_SEL_MASK; 1406 1407 if (!host) 1408 return -EINVAL; 1409 1410 if (!ufs_qcom_testbus_cfg_is_ok(host)) 1411 return -EPERM; 1412 1413 switch (host->testbus.select_major) { 1414 case TSTBUS_UAWM: 1415 reg = UFS_TEST_BUS_CTRL_0; 1416 offset = 24; 1417 break; 1418 case TSTBUS_UARM: 1419 reg = UFS_TEST_BUS_CTRL_0; 1420 offset = 16; 1421 break; 1422 case TSTBUS_TXUC: 1423 reg = UFS_TEST_BUS_CTRL_0; 1424 offset = 8; 1425 break; 1426 case TSTBUS_RXUC: 1427 reg = UFS_TEST_BUS_CTRL_0; 1428 offset = 0; 1429 break; 1430 case TSTBUS_DFC: 1431 reg = UFS_TEST_BUS_CTRL_1; 1432 offset = 24; 1433 break; 1434 case TSTBUS_TRLUT: 1435 reg = UFS_TEST_BUS_CTRL_1; 1436 offset = 16; 1437 break; 1438 case TSTBUS_TMRLUT: 1439 reg = UFS_TEST_BUS_CTRL_1; 1440 offset = 8; 1441 break; 1442 case TSTBUS_OCSC: 1443 reg = UFS_TEST_BUS_CTRL_1; 1444 offset = 0; 1445 break; 1446 case TSTBUS_WRAPPER: 1447 reg = UFS_TEST_BUS_CTRL_2; 1448 offset = 16; 1449 break; 1450 case TSTBUS_COMBINED: 1451 reg = UFS_TEST_BUS_CTRL_2; 1452 offset = 8; 1453 break; 1454 case TSTBUS_UTP_HCI: 1455 reg = UFS_TEST_BUS_CTRL_2; 1456 offset = 0; 1457 break; 1458 case TSTBUS_UNIPRO: 1459 reg = UFS_UNIPRO_CFG; 1460 offset = 1; 1461 break; 1462 /* 1463 * No need for a default case, since 1464 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration 1465 * is legal 1466 */ 1467 } 1468 mask <<= offset; 1469 1470 pm_runtime_get_sync(host->hba->dev); 1471 ufshcd_hold(host->hba, false); 1472 ufshcd_rmwl(host->hba, TEST_BUS_SEL, 1473 (u32)host->testbus.select_major << 19, 1474 REG_UFS_CFG1); 1475 ufshcd_rmwl(host->hba, mask, 1476 (u32)host->testbus.select_minor << offset, 1477 reg); 1478 ufshcd_release(host->hba); 1479 pm_runtime_put_sync(host->hba->dev); 1480 1481 return 0; 1482} 1483 1484static void ufs_qcom_testbus_read(struct ufs_hba *hba) 1485{ 1486 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); 1487} 1488 1489static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) 1490{ 1491 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, 1492 "HCI Vendor Specific Registers "); 1493 1494 ufs_qcom_testbus_read(hba); 1495} 1496/** 1497 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 1498 * 1499 * The variant operations configure the necessary controller and PHY 1500 * handshake during initialization. 1501 */ 1502static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { 1503 .name = "qcom", 1504 .init = ufs_qcom_init, 1505 .exit = ufs_qcom_exit, 1506 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, 1507 .clk_scale_notify = ufs_qcom_clk_scale_notify, 1508 .setup_clocks = ufs_qcom_setup_clocks, 1509 .hce_enable_notify = ufs_qcom_hce_enable_notify, 1510 .link_startup_notify = ufs_qcom_link_startup_notify, 1511 .pwr_change_notify = ufs_qcom_pwr_change_notify, 1512 .suspend = ufs_qcom_suspend, 1513 .resume = ufs_qcom_resume, 1514 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 1515}; 1516 1517/** 1518 * ufs_qcom_probe - probe routine of the driver 1519 * @pdev: pointer to Platform device handle 1520 * 1521 * Return zero for success and non-zero for failure 1522 */ 1523static int ufs_qcom_probe(struct platform_device *pdev) 1524{ 1525 int err; 1526 struct device *dev = &pdev->dev; 1527 1528 /* Perform generic probe */ 1529 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); 1530 if (err) 1531 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); 1532 1533 return err; 1534} 1535 1536/** 1537 * ufs_qcom_remove - set driver_data of the device to NULL 1538 * @pdev: pointer to platform device handle 1539 * 1540 * Always return 0 1541 */ 1542static int ufs_qcom_remove(struct platform_device *pdev) 1543{ 1544 struct ufs_hba *hba = platform_get_drvdata(pdev); 1545 1546 pm_runtime_get_sync(&(pdev)->dev); 1547 ufshcd_remove(hba); 1548 return 0; 1549} 1550 1551static const struct of_device_id ufs_qcom_of_match[] = { 1552 { .compatible = "qcom,ufshc"}, 1553 {}, 1554}; 1555 1556static const struct dev_pm_ops ufs_qcom_pm_ops = { 1557 .suspend = ufshcd_pltfrm_suspend, 1558 .resume = ufshcd_pltfrm_resume, 1559 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 1560 .runtime_resume = ufshcd_pltfrm_runtime_resume, 1561 .runtime_idle = ufshcd_pltfrm_runtime_idle, 1562}; 1563 1564static struct platform_driver ufs_qcom_pltform = { 1565 .probe = ufs_qcom_probe, 1566 .remove = ufs_qcom_remove, 1567 .shutdown = ufshcd_pltfrm_shutdown, 1568 .driver = { 1569 .name = "ufshcd-qcom", 1570 .pm = &ufs_qcom_pm_ops, 1571 .of_match_table = of_match_ptr(ufs_qcom_of_match), 1572 }, 1573}; 1574module_platform_driver(ufs_qcom_pltform); 1575 1576MODULE_LICENSE("GPL v2"); 1577