2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/msi.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
26 #include <asm/eeh_event.h>
28 #include <asm/iommu.h>
29 #include <asm/msi_bitmap.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/ppc-pci.h>
38 static int ioda_eeh_nb_init = 0;
40 static int ioda_eeh_event(struct notifier_block *nb,
41 unsigned long events, void *change)
43 uint64_t changed_evts = (uint64_t)change;
45 /* We simply send special EEH event */
46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
47 (events & OPAL_EVENT_PCI_ERROR) &&
49 eeh_send_failure_event(NULL);
54 static struct notifier_block ioda_eeh_nb = {
55 .notifier_call = ioda_eeh_event,
60 #ifdef CONFIG_DEBUG_FS
61 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
63 struct pci_controller *hose = data;
64 struct pnv_phb *phb = hose->private_data;
66 out_be64(phb->regs + offset, val);
70 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
72 struct pci_controller *hose = data;
73 struct pnv_phb *phb = hose->private_data;
75 *val = in_be64(phb->regs + offset);
79 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
81 return ioda_eeh_dbgfs_set(data, 0xD10, val);
84 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
86 return ioda_eeh_dbgfs_get(data, 0xD10, val);
89 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
91 return ioda_eeh_dbgfs_set(data, 0xD90, val);
94 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
96 return ioda_eeh_dbgfs_get(data, 0xD90, val);
99 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
101 return ioda_eeh_dbgfs_set(data, 0xE10, val);
104 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
106 return ioda_eeh_dbgfs_get(data, 0xE10, val);
109 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
110 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
111 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
112 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
113 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
114 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
115 #endif /* CONFIG_DEBUG_FS */
119 * ioda_eeh_post_init - Chip dependent post initialization
120 * @hose: PCI controller
122 * The function will be called after eeh PEs and devices
123 * have been built. That means the EEH is ready to supply
124 * service with I/O cache.
126 static int ioda_eeh_post_init(struct pci_controller *hose)
128 struct pnv_phb *phb = hose->private_data;
131 /* Register OPAL event notifier */
132 if (!ioda_eeh_nb_init) {
133 ret = opal_notifier_register(&ioda_eeh_nb);
135 pr_err("%s: Can't register OPAL event notifier (%d)\n",
140 ioda_eeh_nb_init = 1;
143 #ifdef CONFIG_DEBUG_FS
145 debugfs_create_file("err_injct_outbound", 0600,
147 &ioda_eeh_outb_dbgfs_ops);
148 debugfs_create_file("err_injct_inboundA", 0600,
150 &ioda_eeh_inbA_dbgfs_ops);
151 debugfs_create_file("err_injct_inboundB", 0600,
153 &ioda_eeh_inbB_dbgfs_ops);
157 phb->eeh_state |= PNV_EEH_STATE_ENABLED;
163 * ioda_eeh_set_option - Set EEH operation or I/O setting
167 * Enable or disable EEH option for the indicated PE. The
168 * function also can be used to enable I/O or DMA for the
171 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
175 struct pci_controller *hose = pe->phb;
176 struct pnv_phb *phb = hose->private_data;
178 /* Check on PE number */
179 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
180 pr_err("%s: PE address %x out of range [0, %x] "
182 __func__, pe->addr, phb->ioda.total_pe,
183 hose->global_number);
189 case EEH_OPT_DISABLE:
195 case EEH_OPT_THAW_MMIO:
196 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
197 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
199 pr_warning("%s: Failed to enable MMIO for "
200 "PHB#%x-PE#%x, err=%lld\n",
201 __func__, hose->global_number, pe_no, ret);
206 case EEH_OPT_THAW_DMA:
207 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
208 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
210 pr_warning("%s: Failed to enable DMA for "
211 "PHB#%x-PE#%x, err=%lld\n",
212 __func__, hose->global_number, pe_no, ret);
218 pr_warning("%s: Invalid option %d\n", __func__, option);
225 static void ioda_eeh_phb_diag(struct pci_controller *hose)
227 struct pnv_phb *phb = hose->private_data;
230 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
231 PNV_PCI_DIAG_BUF_SIZE);
232 if (rc != OPAL_SUCCESS) {
233 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
234 __func__, hose->global_number, rc);
238 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
242 * ioda_eeh_get_state - Retrieve the state of PE
245 * The PE's state should be retrieved from the PEEV, PEST
246 * IODA tables. Since the OPAL has exported the function
247 * to do it, it'd better to use that.
249 static int ioda_eeh_get_state(struct eeh_pe *pe)
256 struct pci_controller *hose = pe->phb;
257 struct pnv_phb *phb = hose->private_data;
260 * Sanity check on PE address. The PHB PE address should
263 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
264 pr_err("%s: PE address %x out of range [0, %x] "
266 __func__, pe->addr, phb->ioda.total_pe,
267 hose->global_number);
268 return EEH_STATE_NOT_SUPPORT;
271 /* Retrieve PE status through OPAL */
273 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
274 &fstate, &pcierr, NULL);
276 pr_err("%s: Failed to get EEH status on "
277 "PHB#%x-PE#%x\n, err=%lld\n",
278 __func__, hose->global_number, pe_no, ret);
279 return EEH_STATE_NOT_SUPPORT;
282 /* Check PHB status */
283 if (pe->type & EEH_PE_PHB) {
285 result &= ~EEH_STATE_RESET_ACTIVE;
287 if (pcierr != OPAL_EEH_PHB_ERROR) {
288 result |= EEH_STATE_MMIO_ACTIVE;
289 result |= EEH_STATE_DMA_ACTIVE;
290 result |= EEH_STATE_MMIO_ENABLED;
291 result |= EEH_STATE_DMA_ENABLED;
292 } else if (!(pe->state & EEH_PE_ISOLATED)) {
293 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
294 ioda_eeh_phb_diag(hose);
300 /* Parse result out */
303 case OPAL_EEH_STOPPED_NOT_FROZEN:
304 result &= ~EEH_STATE_RESET_ACTIVE;
305 result |= EEH_STATE_MMIO_ACTIVE;
306 result |= EEH_STATE_DMA_ACTIVE;
307 result |= EEH_STATE_MMIO_ENABLED;
308 result |= EEH_STATE_DMA_ENABLED;
310 case OPAL_EEH_STOPPED_MMIO_FREEZE:
311 result &= ~EEH_STATE_RESET_ACTIVE;
312 result |= EEH_STATE_DMA_ACTIVE;
313 result |= EEH_STATE_DMA_ENABLED;
315 case OPAL_EEH_STOPPED_DMA_FREEZE:
316 result &= ~EEH_STATE_RESET_ACTIVE;
317 result |= EEH_STATE_MMIO_ACTIVE;
318 result |= EEH_STATE_MMIO_ENABLED;
320 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
321 result &= ~EEH_STATE_RESET_ACTIVE;
323 case OPAL_EEH_STOPPED_RESET:
324 result |= EEH_STATE_RESET_ACTIVE;
326 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
327 result |= EEH_STATE_UNAVAILABLE;
329 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
330 result |= EEH_STATE_NOT_SUPPORT;
333 pr_warning("%s: Unexpected EEH status 0x%x "
335 __func__, fstate, hose->global_number, pe_no);
338 /* Dump PHB diag-data for frozen PE */
339 if (result != EEH_STATE_NOT_SUPPORT &&
340 (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
341 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
342 !(pe->state & EEH_PE_ISOLATED)) {
343 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
344 ioda_eeh_phb_diag(hose);
350 static int ioda_eeh_pe_clear(struct eeh_pe *pe)
352 struct pci_controller *hose;
361 phb = pe->phb->private_data;
363 /* Clear the EEH error on the PE */
364 ret = opal_pci_eeh_freeze_clear(phb->opal_id,
365 pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
367 pr_err("%s: Failed to clear EEH error for "
368 "PHB#%x-PE#%x, err=%lld\n",
369 __func__, hose->global_number, pe_no, ret);
374 * Read the PE state back and verify that the frozen
375 * state has been removed.
377 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
378 &fstate, &pcierr, NULL);
380 pr_err("%s: Failed to get EEH status on "
381 "PHB#%x-PE#%x\n, err=%lld\n",
382 __func__, hose->global_number, pe_no, ret);
386 if (fstate != OPAL_EEH_STOPPED_NOT_FROZEN) {
387 pr_err("%s: Frozen state not cleared on "
388 "PHB#%x-PE#%x, sts=%x\n",
389 __func__, hose->global_number, pe_no, fstate);
396 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
398 s64 rc = OPAL_HARDWARE;
401 rc = opal_pci_poll(phb->opal_id);
411 static int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
413 struct pnv_phb *phb = hose->private_data;
414 s64 rc = OPAL_HARDWARE;
416 pr_debug("%s: Reset PHB#%x, option=%d\n",
417 __func__, hose->global_number, option);
419 /* Issue PHB complete reset request */
420 if (option == EEH_RESET_FUNDAMENTAL ||
421 option == EEH_RESET_HOT)
422 rc = opal_pci_reset(phb->opal_id,
425 else if (option == EEH_RESET_DEACTIVATE)
426 rc = opal_pci_reset(phb->opal_id,
428 OPAL_DEASSERT_RESET);
433 * Poll state of the PHB until the request is done
436 rc = ioda_eeh_phb_poll(phb);
438 if (rc != OPAL_SUCCESS)
444 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
446 struct pnv_phb *phb = hose->private_data;
447 s64 rc = OPAL_SUCCESS;
449 pr_debug("%s: Reset PHB#%x, option=%d\n",
450 __func__, hose->global_number, option);
453 * During the reset deassert time, we needn't care
454 * the reset scope because the firmware does nothing
455 * for fundamental or hot reset during deassert phase.
457 if (option == EEH_RESET_FUNDAMENTAL)
458 rc = opal_pci_reset(phb->opal_id,
459 OPAL_PCI_FUNDAMENTAL_RESET,
461 else if (option == EEH_RESET_HOT)
462 rc = opal_pci_reset(phb->opal_id,
465 else if (option == EEH_RESET_DEACTIVATE)
466 rc = opal_pci_reset(phb->opal_id,
468 OPAL_DEASSERT_RESET);
472 /* Poll state of the PHB until the request is done */
473 rc = ioda_eeh_phb_poll(phb);
475 if (rc != OPAL_SUCCESS)
481 static int ioda_eeh_bridge_reset(struct pci_controller *hose,
482 struct pci_dev *dev, int option)
486 pr_debug("%s: Reset device %04x:%02x:%02x.%01x with option %d\n",
487 __func__, hose->global_number, dev->bus->number,
488 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), option);
491 case EEH_RESET_FUNDAMENTAL:
493 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
494 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
495 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
497 case EEH_RESET_DEACTIVATE:
498 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
499 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
500 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
508 * ioda_eeh_reset - Reset the indicated PE
510 * @option: reset option
512 * Do reset on the indicated PE. For PCI bus sensitive PE,
513 * we need to reset the parent p2p bridge. The PHB has to
514 * be reinitialized if the p2p bridge is root bridge. For
515 * PCI device sensitive PE, we will try to reset the device
516 * through FLR. For now, we don't have OPAL APIs to do HARD
517 * reset yet, so all reset would be SOFT (HOT) reset.
519 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
521 struct pci_controller *hose = pe->phb;
526 * Anyway, we have to clear the problematic state for the
527 * corresponding PE. However, we needn't do it if the PE
528 * is PHB associated. That means the PHB is having fatal
529 * errors and it needs reset. Further more, the AIB interface
530 * isn't reliable any more.
532 if (!(pe->type & EEH_PE_PHB) &&
533 (option == EEH_RESET_HOT ||
534 option == EEH_RESET_FUNDAMENTAL)) {
535 ret = ioda_eeh_pe_clear(pe);
541 * The rules applied to reset, either fundamental or hot reset:
543 * We always reset the direct upstream bridge of the PE. If the
544 * direct upstream bridge isn't root bridge, we always take hot
545 * reset no matter what option (fundamental or hot) is. Otherwise,
546 * we should do the reset according to the required option.
548 if (pe->type & EEH_PE_PHB) {
549 ret = ioda_eeh_phb_reset(hose, option);
551 bus = eeh_pe_bus_get(pe);
552 if (pci_is_root_bus(bus) ||
553 pci_is_root_bus(bus->parent))
554 ret = ioda_eeh_root_reset(hose, option);
556 ret = ioda_eeh_bridge_reset(hose, bus->self, option);
563 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
566 * For particular PE, it might have included PCI bridges. In order
567 * to make the PE work properly, those PCI bridges should be configured
568 * correctly. However, we need do nothing on P7IOC since the reset
569 * function will do everything that should be covered by the function.
571 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
576 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
579 pr_info(" GEM XFIR: %016llx\n", data->gemXfir);
580 pr_info(" GEM RFIR: %016llx\n", data->gemRfir);
581 pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir);
582 pr_info(" GEM Mask: %016llx\n", data->gemMask);
583 pr_info(" GEM RWOF: %016llx\n", data->gemRwof);
586 pr_info(" LEM FIR: %016llx\n", data->lemFir);
587 pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask);
588 pr_info(" LEM Action 0: %016llx\n", data->lemAction0);
589 pr_info(" LEM Action 1: %016llx\n", data->lemAction1);
590 pr_info(" LEM WOF: %016llx\n", data->lemWof);
593 static void ioda_eeh_hub_diag(struct pci_controller *hose)
595 struct pnv_phb *phb = hose->private_data;
596 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
599 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
600 if (rc != OPAL_SUCCESS) {
601 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
602 __func__, phb->hub_id, rc);
606 switch (data->type) {
607 case OPAL_P7IOC_DIAG_TYPE_RGC:
608 pr_info("P7IOC diag-data for RGC\n\n");
609 ioda_eeh_hub_diag_common(data);
610 pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus);
611 pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp);
613 case OPAL_P7IOC_DIAG_TYPE_BI:
614 pr_info("P7IOC diag-data for BI %s\n\n",
615 data->bi.biDownbound ? "Downbound" : "Upbound");
616 ioda_eeh_hub_diag_common(data);
617 pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0);
618 pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1);
619 pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2);
620 pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus);
622 case OPAL_P7IOC_DIAG_TYPE_CI:
623 pr_info("P7IOC diag-data for CI Port %d\\nn",
625 ioda_eeh_hub_diag_common(data);
626 pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus);
627 pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp);
629 case OPAL_P7IOC_DIAG_TYPE_MISC:
630 pr_info("P7IOC diag-data for MISC\n\n");
631 ioda_eeh_hub_diag_common(data);
633 case OPAL_P7IOC_DIAG_TYPE_I2C:
634 pr_info("P7IOC diag-data for I2C\n\n");
635 ioda_eeh_hub_diag_common(data);
638 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
639 __func__, phb->hub_id, data->type);
643 static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
646 struct eeh_pe *phb_pe;
648 phb_pe = eeh_phb_pe_get(hose);
650 pr_warning("%s Can't find PE for PHB#%d\n",
651 __func__, hose->global_number);
659 static int ioda_eeh_get_pe(struct pci_controller *hose,
660 u16 pe_no, struct eeh_pe **pe)
662 struct eeh_pe *phb_pe, *dev_pe;
665 /* Find the PHB PE */
666 if (ioda_eeh_get_phb_pe(hose, &phb_pe))
669 /* Find the PE according to PE# */
670 memset(&dev, 0, sizeof(struct eeh_dev));
672 dev.pe_config_addr = pe_no;
673 dev_pe = eeh_pe_get(&dev);
674 if (!dev_pe) return -EEXIST;
681 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
682 * @pe: The affected PE
684 * The function is expected to be called by EEH core while it gets
685 * special EEH event (without binding PE). The function calls to
686 * OPAL APIs for next error to handle. The informational error is
687 * handled internally by platform. However, the dead IOC, dead PHB,
688 * fenced PHB and frozen PE should be handled by EEH core eventually.
690 static int ioda_eeh_next_error(struct eeh_pe **pe)
692 struct pci_controller *hose;
695 u16 err_type, severity;
697 int ret = EEH_NEXT_ERR_NONE;
700 * While running here, it's safe to purge the event queue.
701 * And we should keep the cached OPAL notifier event sychronized
702 * between the kernel and firmware.
704 eeh_remove_event(NULL);
705 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
707 list_for_each_entry(hose, &hose_list, list_node) {
709 * If the subordinate PCI buses of the PHB has been
710 * removed, we needn't take care of it any more.
712 phb = hose->private_data;
713 if (phb->eeh_state & PNV_EEH_STATE_REMOVED)
716 rc = opal_pci_next_error(phb->opal_id,
717 &frozen_pe_no, &err_type, &severity);
719 /* If OPAL API returns error, we needn't proceed */
720 if (rc != OPAL_SUCCESS) {
721 pr_devel("%s: Invalid return value on "
722 "PHB#%x (0x%lx) from opal_pci_next_error",
723 __func__, hose->global_number, rc);
727 /* If the PHB doesn't have error, stop processing */
728 if (err_type == OPAL_EEH_NO_ERROR ||
729 severity == OPAL_EEH_SEV_NO_ERROR) {
730 pr_devel("%s: No error found on PHB#%x\n",
731 __func__, hose->global_number);
736 * Processing the error. We're expecting the error with
737 * highest priority reported upon multiple errors on the
740 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
741 __func__, err_type, severity,
742 frozen_pe_no, hose->global_number);
744 case OPAL_EEH_IOC_ERROR:
745 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
746 list_for_each_entry(hose, &hose_list,
748 phb = hose->private_data;
749 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
752 pr_err("EEH: dead IOC detected\n");
753 ret = EEH_NEXT_ERR_DEAD_IOC;
754 } else if (severity == OPAL_EEH_SEV_INF) {
755 pr_info("EEH: IOC informative error "
757 ioda_eeh_hub_diag(hose);
758 ret = EEH_NEXT_ERR_NONE;
762 case OPAL_EEH_PHB_ERROR:
763 if (severity == OPAL_EEH_SEV_PHB_DEAD) {
764 if (ioda_eeh_get_phb_pe(hose, pe))
767 pr_err("EEH: dead PHB#%x detected\n",
768 hose->global_number);
769 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
770 ret = EEH_NEXT_ERR_DEAD_PHB;
771 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
772 if (ioda_eeh_get_phb_pe(hose, pe))
775 pr_err("EEH: fenced PHB#%x detected\n",
776 hose->global_number);
777 ret = EEH_NEXT_ERR_FENCED_PHB;
778 } else if (severity == OPAL_EEH_SEV_INF) {
779 pr_info("EEH: PHB#%x informative error "
781 hose->global_number);
782 ioda_eeh_phb_diag(hose);
783 ret = EEH_NEXT_ERR_NONE;
787 case OPAL_EEH_PE_ERROR:
789 * If we can't find the corresponding PE, the
790 * PEEV / PEST would be messy. So we force an
791 * fenced PHB so that it can be recovered.
793 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
794 if (!ioda_eeh_get_phb_pe(hose, pe)) {
795 pr_err("EEH: Escalated fenced PHB#%x "
796 "detected for PE#%llx\n",
799 ret = EEH_NEXT_ERR_FENCED_PHB;
801 ret = EEH_NEXT_ERR_NONE;
804 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
805 (*pe)->addr, (*pe)->phb->global_number);
806 ret = EEH_NEXT_ERR_FROZEN_PE;
811 pr_warn("%s: Unexpected error type %d\n",
816 * EEH core will try recover from fenced PHB or
817 * frozen PE. In the time for frozen PE, EEH core
818 * enable IO path for that before collecting logs,
819 * but it ruins the site. So we have to dump the
820 * log in advance here.
822 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
823 ret == EEH_NEXT_ERR_FENCED_PHB) &&
824 !((*pe)->state & EEH_PE_ISOLATED)) {
825 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
826 ioda_eeh_phb_diag(hose);
830 * If we have no errors on the specific PHB or only
831 * informative error there, we continue poking it.
832 * Otherwise, we need actions to be taken by upper
835 if (ret > EEH_NEXT_ERR_INF)
842 struct pnv_eeh_ops ioda_eeh_ops = {
843 .post_init = ioda_eeh_post_init,
844 .set_option = ioda_eeh_set_option,
845 .get_state = ioda_eeh_get_state,
846 .reset = ioda_eeh_reset,
847 .configure_bridge = ioda_eeh_configure_bridge,
848 .next_error = ioda_eeh_next_error