1/* 2 * The file intends to implement the platform dependent EEH operations on pseries. 3 * Actually, the pseries platform is built based on RTAS heavily. That means the 4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 5 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 6 * been done. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 9 * Copyright IBM Corporation 2001, 2005, 2006 10 * Copyright Dave Engebretsen & Todd Inglett 2001 11 * Copyright Linas Vepstas 2005, 2006 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28#include <linux/atomic.h> 29#include <linux/delay.h> 30#include <linux/export.h> 31#include <linux/init.h> 32#include <linux/list.h> 33#include <linux/of.h> 34#include <linux/pci.h> 35#include <linux/proc_fs.h> 36#include <linux/rbtree.h> 37#include <linux/sched.h> 38#include <linux/seq_file.h> 39#include <linux/spinlock.h> 40 41#include <asm/eeh.h> 42#include <asm/eeh_event.h> 43#include <asm/io.h> 44#include <asm/machdep.h> 45#include <asm/ppc-pci.h> 46#include <asm/rtas.h> 47 48/* RTAS tokens */ 49static int ibm_set_eeh_option; 50static int ibm_set_slot_reset; 51static int ibm_read_slot_reset_state; 52static int ibm_read_slot_reset_state2; 53static int ibm_slot_error_detail; 54static int ibm_get_config_addr_info; 55static int ibm_get_config_addr_info2; 56static int ibm_configure_bridge; 57static int ibm_configure_pe; 58 59/* 60 * Buffer for reporting slot-error-detail rtas calls. Its here 61 * in BSS, and not dynamically alloced, so that it ends up in 62 * RMO where RTAS can access it. 63 */ 64static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 65static DEFINE_SPINLOCK(slot_errbuf_lock); 66static int eeh_error_buf_size; 67 68/** 69 * pseries_eeh_init - EEH platform dependent initialization 70 * 71 * EEH platform dependent initialization on pseries. 72 */ 73static int pseries_eeh_init(void) 74{ 75 /* figure out EEH RTAS function call tokens */ 76 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 77 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 78 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 79 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 80 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 85 86 /* 87 * Necessary sanity check. We needn't check "get-config-addr-info" 88 * and its variant since the old firmware probably support address 89 * of domain/bus/slot/function for EEH RTAS operations. 90 */ 91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || 92 ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || 93 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 94 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || 95 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || 96 (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 97 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) { 98 pr_info("EEH functionality not supported\n"); 99 return -EINVAL; 100 } 101 102 /* Initialize error log lock and size */ 103 spin_lock_init(&slot_errbuf_lock); 104 eeh_error_buf_size = rtas_token("rtas-error-log-max"); 105 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { 106 pr_info("%s: unknown EEH error log size\n", 107 __func__); 108 eeh_error_buf_size = 1024; 109 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { 110 pr_info("%s: EEH error log size %d exceeds the maximal %d\n", 111 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); 112 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 113 } 114 115 /* Set EEH probe mode */ 116 eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); 117 118 return 0; 119} 120 121static int pseries_eeh_cap_start(struct pci_dn *pdn) 122{ 123 u32 status; 124 125 if (!pdn) 126 return 0; 127 128 rtas_read_config(pdn, PCI_STATUS, 2, &status); 129 if (!(status & PCI_STATUS_CAP_LIST)) 130 return 0; 131 132 return PCI_CAPABILITY_LIST; 133} 134 135 136static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap) 137{ 138 int pos = pseries_eeh_cap_start(pdn); 139 int cnt = 48; /* Maximal number of capabilities */ 140 u32 id; 141 142 if (!pos) 143 return 0; 144 145 while (cnt--) { 146 rtas_read_config(pdn, pos, 1, &pos); 147 if (pos < 0x40) 148 break; 149 pos &= ~3; 150 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 151 if (id == 0xff) 152 break; 153 if (id == cap) 154 return pos; 155 pos += PCI_CAP_LIST_NEXT; 156 } 157 158 return 0; 159} 160 161static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) 162{ 163 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 164 u32 header; 165 int pos = 256; 166 int ttl = (4096 - 256) / 8; 167 168 if (!edev || !edev->pcie_cap) 169 return 0; 170 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 171 return 0; 172 else if (!header) 173 return 0; 174 175 while (ttl-- > 0) { 176 if (PCI_EXT_CAP_ID(header) == cap && pos) 177 return pos; 178 179 pos = PCI_EXT_CAP_NEXT(header); 180 if (pos < 256) 181 break; 182 183 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 184 break; 185 } 186 187 return 0; 188} 189 190/** 191 * pseries_eeh_probe - EEH probe on the given device 192 * @pdn: PCI device node 193 * @data: Unused 194 * 195 * When EEH module is installed during system boot, all PCI devices 196 * are checked one by one to see if it supports EEH. The function 197 * is introduced for the purpose. 198 */ 199static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) 200{ 201 struct eeh_dev *edev; 202 struct eeh_pe pe; 203 u32 pcie_flags; 204 int enable = 0; 205 int ret; 206 207 /* Retrieve OF node and eeh device */ 208 edev = pdn_to_eeh_dev(pdn); 209 if (!edev || edev->pe) 210 return NULL; 211 212 /* Check class/vendor/device IDs */ 213 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code) 214 return NULL; 215 216 /* Skip for PCI-ISA bridge */ 217 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 218 return NULL; 219 220 /* 221 * Update class code and mode of eeh device. We need 222 * correctly reflects that current device is root port 223 * or PCIe switch downstream port. 224 */ 225 edev->class_code = pdn->class_code; 226 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 227 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 228 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 229 edev->mode &= 0xFFFFFF00; 230 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 231 edev->mode |= EEH_DEV_BRIDGE; 232 if (edev->pcie_cap) { 233 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 234 2, &pcie_flags); 235 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 236 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 237 edev->mode |= EEH_DEV_ROOT_PORT; 238 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 239 edev->mode |= EEH_DEV_DS_PORT; 240 } 241 } 242 243 /* Initialize the fake PE */ 244 memset(&pe, 0, sizeof(struct eeh_pe)); 245 pe.phb = edev->phb; 246 pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 247 248 /* Enable EEH on the device */ 249 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 250 if (!ret) { 251 /* Retrieve PE address */ 252 edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 253 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 254 pe.addr = edev->pe_config_addr; 255 256 /* Some older systems (Power4) allow the ibm,set-eeh-option 257 * call to succeed even on nodes where EEH is not supported. 258 * Verify support explicitly. 259 */ 260 ret = eeh_ops->get_state(&pe, NULL); 261 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) 262 enable = 1; 263 264 if (enable) { 265 eeh_add_flag(EEH_ENABLED); 266 eeh_add_to_parent_pe(edev); 267 268 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n", 269 __func__, pdn->busno, PCI_SLOT(pdn->devfn), 270 PCI_FUNC(pdn->devfn), pe.phb->global_number, 271 pe.addr); 272 } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && 273 (pdn_to_eeh_dev(pdn->parent))->pe) { 274 /* This device doesn't support EEH, but it may have an 275 * EEH parent, in which case we mark it as supported. 276 */ 277 edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr; 278 edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; 279 eeh_add_to_parent_pe(edev); 280 } 281 } 282 283 /* Save memory bars */ 284 eeh_save_bars(edev); 285 286 return NULL; 287} 288 289/** 290 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 291 * @pe: EEH PE 292 * @option: operation to be issued 293 * 294 * The function is used to control the EEH functionality globally. 295 * Currently, following options are support according to PAPR: 296 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 297 */ 298static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 299{ 300 int ret = 0; 301 int config_addr; 302 303 /* 304 * When we're enabling or disabling EEH functioality on 305 * the particular PE, the PE config address is possibly 306 * unavailable. Therefore, we have to figure it out from 307 * the FDT node. 308 */ 309 switch (option) { 310 case EEH_OPT_DISABLE: 311 case EEH_OPT_ENABLE: 312 case EEH_OPT_THAW_MMIO: 313 case EEH_OPT_THAW_DMA: 314 config_addr = pe->config_addr; 315 if (pe->addr) 316 config_addr = pe->addr; 317 break; 318 case EEH_OPT_FREEZE_PE: 319 /* Not support */ 320 return 0; 321 default: 322 pr_err("%s: Invalid option %d\n", 323 __func__, option); 324 return -EINVAL; 325 } 326 327 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 328 config_addr, BUID_HI(pe->phb->buid), 329 BUID_LO(pe->phb->buid), option); 330 331 return ret; 332} 333 334/** 335 * pseries_eeh_get_pe_addr - Retrieve PE address 336 * @pe: EEH PE 337 * 338 * Retrieve the assocated PE address. Actually, there're 2 RTAS 339 * function calls dedicated for the purpose. We need implement 340 * it through the new function and then the old one. Besides, 341 * you should make sure the config address is figured out from 342 * FDT node before calling the function. 343 * 344 * It's notable that zero'ed return value means invalid PE config 345 * address. 346 */ 347static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) 348{ 349 int ret = 0; 350 int rets[3]; 351 352 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 353 /* 354 * First of all, we need to make sure there has one PE 355 * associated with the device. Otherwise, PE address is 356 * meaningless. 357 */ 358 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 359 pe->config_addr, BUID_HI(pe->phb->buid), 360 BUID_LO(pe->phb->buid), 1); 361 if (ret || (rets[0] == 0)) 362 return 0; 363 364 /* Retrieve the associated PE config address */ 365 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 366 pe->config_addr, BUID_HI(pe->phb->buid), 367 BUID_LO(pe->phb->buid), 0); 368 if (ret) { 369 pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n", 370 __func__, pe->phb->global_number, pe->config_addr); 371 return 0; 372 } 373 374 return rets[0]; 375 } 376 377 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 378 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 379 pe->config_addr, BUID_HI(pe->phb->buid), 380 BUID_LO(pe->phb->buid), 0); 381 if (ret) { 382 pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n", 383 __func__, pe->phb->global_number, pe->config_addr); 384 return 0; 385 } 386 387 return rets[0]; 388 } 389 390 return ret; 391} 392 393/** 394 * pseries_eeh_get_state - Retrieve PE state 395 * @pe: EEH PE 396 * @state: return value 397 * 398 * Retrieve the state of the specified PE. On RTAS compliant 399 * pseries platform, there already has one dedicated RTAS function 400 * for the purpose. It's notable that the associated PE config address 401 * might be ready when calling the function. Therefore, endeavour to 402 * use the PE config address if possible. Further more, there're 2 403 * RTAS calls for the purpose, we need to try the new one and back 404 * to the old one if the new one couldn't work properly. 405 */ 406static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) 407{ 408 int config_addr; 409 int ret; 410 int rets[4]; 411 int result; 412 413 /* Figure out PE config address if possible */ 414 config_addr = pe->config_addr; 415 if (pe->addr) 416 config_addr = pe->addr; 417 418 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 419 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 420 config_addr, BUID_HI(pe->phb->buid), 421 BUID_LO(pe->phb->buid)); 422 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 423 /* Fake PE unavailable info */ 424 rets[2] = 0; 425 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 426 config_addr, BUID_HI(pe->phb->buid), 427 BUID_LO(pe->phb->buid)); 428 } else { 429 return EEH_STATE_NOT_SUPPORT; 430 } 431 432 if (ret) 433 return ret; 434 435 /* Parse the result out */ 436 result = 0; 437 if (rets[1]) { 438 switch(rets[0]) { 439 case 0: 440 result &= ~EEH_STATE_RESET_ACTIVE; 441 result |= EEH_STATE_MMIO_ACTIVE; 442 result |= EEH_STATE_DMA_ACTIVE; 443 break; 444 case 1: 445 result |= EEH_STATE_RESET_ACTIVE; 446 result |= EEH_STATE_MMIO_ACTIVE; 447 result |= EEH_STATE_DMA_ACTIVE; 448 break; 449 case 2: 450 result &= ~EEH_STATE_RESET_ACTIVE; 451 result &= ~EEH_STATE_MMIO_ACTIVE; 452 result &= ~EEH_STATE_DMA_ACTIVE; 453 break; 454 case 4: 455 result &= ~EEH_STATE_RESET_ACTIVE; 456 result &= ~EEH_STATE_MMIO_ACTIVE; 457 result &= ~EEH_STATE_DMA_ACTIVE; 458 result |= EEH_STATE_MMIO_ENABLED; 459 break; 460 case 5: 461 if (rets[2]) { 462 if (state) *state = rets[2]; 463 result = EEH_STATE_UNAVAILABLE; 464 } else { 465 result = EEH_STATE_NOT_SUPPORT; 466 } 467 break; 468 default: 469 result = EEH_STATE_NOT_SUPPORT; 470 } 471 } else { 472 result = EEH_STATE_NOT_SUPPORT; 473 } 474 475 return result; 476} 477 478/** 479 * pseries_eeh_reset - Reset the specified PE 480 * @pe: EEH PE 481 * @option: reset option 482 * 483 * Reset the specified PE 484 */ 485static int pseries_eeh_reset(struct eeh_pe *pe, int option) 486{ 487 int config_addr; 488 int ret; 489 490 /* Figure out PE address */ 491 config_addr = pe->config_addr; 492 if (pe->addr) 493 config_addr = pe->addr; 494 495 /* Reset PE through RTAS call */ 496 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 497 config_addr, BUID_HI(pe->phb->buid), 498 BUID_LO(pe->phb->buid), option); 499 500 /* If fundamental-reset not supported, try hot-reset */ 501 if (option == EEH_RESET_FUNDAMENTAL && 502 ret == -8) { 503 option = EEH_RESET_HOT; 504 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 505 config_addr, BUID_HI(pe->phb->buid), 506 BUID_LO(pe->phb->buid), option); 507 } 508 509 /* We need reset hold or settlement delay */ 510 if (option == EEH_RESET_FUNDAMENTAL || 511 option == EEH_RESET_HOT) 512 msleep(EEH_PE_RST_HOLD_TIME); 513 else 514 msleep(EEH_PE_RST_SETTLE_TIME); 515 516 return ret; 517} 518 519/** 520 * pseries_eeh_wait_state - Wait for PE state 521 * @pe: EEH PE 522 * @max_wait: maximal period in microsecond 523 * 524 * Wait for the state of associated PE. It might take some time 525 * to retrieve the PE's state. 526 */ 527static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) 528{ 529 int ret; 530 int mwait; 531 532 /* 533 * According to PAPR, the state of PE might be temporarily 534 * unavailable. Under the circumstance, we have to wait 535 * for indicated time determined by firmware. The maximal 536 * wait time is 5 minutes, which is acquired from the original 537 * EEH implementation. Also, the original implementation 538 * also defined the minimal wait time as 1 second. 539 */ 540#define EEH_STATE_MIN_WAIT_TIME (1000) 541#define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 542 543 while (1) { 544 ret = pseries_eeh_get_state(pe, &mwait); 545 546 /* 547 * If the PE's state is temporarily unavailable, 548 * we have to wait for the specified time. Otherwise, 549 * the PE's state will be returned immediately. 550 */ 551 if (ret != EEH_STATE_UNAVAILABLE) 552 return ret; 553 554 if (max_wait <= 0) { 555 pr_warn("%s: Timeout when getting PE's state (%d)\n", 556 __func__, max_wait); 557 return EEH_STATE_NOT_SUPPORT; 558 } 559 560 if (mwait <= 0) { 561 pr_warn("%s: Firmware returned bad wait value %d\n", 562 __func__, mwait); 563 mwait = EEH_STATE_MIN_WAIT_TIME; 564 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 565 pr_warn("%s: Firmware returned too long wait value %d\n", 566 __func__, mwait); 567 mwait = EEH_STATE_MAX_WAIT_TIME; 568 } 569 570 max_wait -= mwait; 571 msleep(mwait); 572 } 573 574 return EEH_STATE_NOT_SUPPORT; 575} 576 577/** 578 * pseries_eeh_get_log - Retrieve error log 579 * @pe: EEH PE 580 * @severity: temporary or permanent error log 581 * @drv_log: driver log to be combined with retrieved error log 582 * @len: length of driver log 583 * 584 * Retrieve the temporary or permanent error from the PE. 585 * Actually, the error will be retrieved through the dedicated 586 * RTAS call. 587 */ 588static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 589{ 590 int config_addr; 591 unsigned long flags; 592 int ret; 593 594 spin_lock_irqsave(&slot_errbuf_lock, flags); 595 memset(slot_errbuf, 0, eeh_error_buf_size); 596 597 /* Figure out the PE address */ 598 config_addr = pe->config_addr; 599 if (pe->addr) 600 config_addr = pe->addr; 601 602 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 603 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 604 virt_to_phys(drv_log), len, 605 virt_to_phys(slot_errbuf), eeh_error_buf_size, 606 severity); 607 if (!ret) 608 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 609 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 610 611 return ret; 612} 613 614/** 615 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 616 * @pe: EEH PE 617 * 618 * The function will be called to reconfigure the bridges included 619 * in the specified PE so that the mulfunctional PE would be recovered 620 * again. 621 */ 622static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 623{ 624 int config_addr; 625 int ret; 626 /* Waiting 0.2s maximum before skipping configuration */ 627 int max_wait = 200; 628 629 /* Figure out the PE address */ 630 config_addr = pe->config_addr; 631 if (pe->addr) 632 config_addr = pe->addr; 633 634 while (max_wait > 0) { 635 /* Use new configure-pe function, if supported */ 636 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { 637 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 638 config_addr, BUID_HI(pe->phb->buid), 639 BUID_LO(pe->phb->buid)); 640 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { 641 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, 642 config_addr, BUID_HI(pe->phb->buid), 643 BUID_LO(pe->phb->buid)); 644 } else { 645 return -EFAULT; 646 } 647 648 if (!ret) 649 return ret; 650 651 /* 652 * If RTAS returns a delay value that's above 100ms, cut it 653 * down to 100ms in case firmware made a mistake. For more 654 * on how these delay values work see rtas_busy_delay_time 655 */ 656 if (ret > RTAS_EXTENDED_DELAY_MIN+2 && 657 ret <= RTAS_EXTENDED_DELAY_MAX) 658 ret = RTAS_EXTENDED_DELAY_MIN+2; 659 660 max_wait -= rtas_busy_delay_time(ret); 661 662 if (max_wait < 0) 663 break; 664 665 rtas_busy_delay(ret); 666 } 667 668 pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", 669 __func__, pe->phb->global_number, pe->addr, ret); 670 return ret; 671} 672 673/** 674 * pseries_eeh_read_config - Read PCI config space 675 * @pdn: PCI device node 676 * @where: PCI address 677 * @size: size to read 678 * @val: return value 679 * 680 * Read config space from the speicifed device 681 */ 682static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val) 683{ 684 return rtas_read_config(pdn, where, size, val); 685} 686 687/** 688 * pseries_eeh_write_config - Write PCI config space 689 * @pdn: PCI device node 690 * @where: PCI address 691 * @size: size to write 692 * @val: value to be written 693 * 694 * Write config space to the specified device 695 */ 696static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val) 697{ 698 return rtas_write_config(pdn, where, size, val); 699} 700 701static struct eeh_ops pseries_eeh_ops = { 702 .name = "pseries", 703 .init = pseries_eeh_init, 704 .probe = pseries_eeh_probe, 705 .set_option = pseries_eeh_set_option, 706 .get_pe_addr = pseries_eeh_get_pe_addr, 707 .get_state = pseries_eeh_get_state, 708 .reset = pseries_eeh_reset, 709 .wait_state = pseries_eeh_wait_state, 710 .get_log = pseries_eeh_get_log, 711 .configure_bridge = pseries_eeh_configure_bridge, 712 .err_inject = NULL, 713 .read_config = pseries_eeh_read_config, 714 .write_config = pseries_eeh_write_config, 715 .next_error = NULL, 716 .restore_config = NULL 717}; 718 719/** 720 * eeh_pseries_init - Register platform dependent EEH operations 721 * 722 * EEH initialization on pseries platform. This function should be 723 * called before any EEH related functions. 724 */ 725static int __init eeh_pseries_init(void) 726{ 727 int ret; 728 729 ret = eeh_ops_register(&pseries_eeh_ops); 730 if (!ret) 731 pr_info("EEH: pSeries platform initialized\n"); 732 else 733 pr_info("EEH: pSeries platform initialization failure (%d)\n", 734 ret); 735 736 return ret; 737} 738machine_early_initcall(pseries, eeh_pseries_init); 739