powerpc/eeh: Export confirm_error_lock
authorGavin Shan <shangw@linux.vnet.ibm.com>
Thu, 20 Jun 2013 05:21:03 +0000 (13:21 +0800)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Thu, 20 Jun 2013 07:06:11 +0000 (17:06 +1000)
An EEH event is created and queued to the event queue for each
ingress EEH error. When there're mutiple EEH errors, we need serialize
the process to keep consistent PE state (flags). The spinlock
"confirm_error_lock" was introduced for the purpose. We'll inject
EEH event upon error reporting interrupts on PowerNV platform. So
we export the spinlock for that to use for consistent PE state.

Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/eeh.h
arch/powerpc/kernel/eeh.c

index e1109fd87ff4b51b9f7a8978fd511c9ac593178d..0c0ac93f422ff78903b9a0b7c6626a3b68453580 100644 (file)
@@ -150,6 +150,7 @@ struct eeh_ops {
 extern struct eeh_ops *eeh_ops;
 extern int eeh_subsystem_enabled;
 extern struct mutex eeh_mutex;
+extern raw_spinlock_t confirm_error_lock;
 extern int eeh_probe_mode;
 
 #define EEH_PROBE_MODE_DEV     (1<<0)  /* From PCI device      */
@@ -180,6 +181,16 @@ static inline void eeh_unlock(void)
        mutex_unlock(&eeh_mutex);
 }
 
+static inline void eeh_serialize_lock(unsigned long *flags)
+{
+       raw_spin_lock_irqsave(&confirm_error_lock, *flags);
+}
+
+static inline void eeh_serialize_unlock(unsigned long flags)
+{
+       raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+}
+
 /*
  * Max number of EEH freezes allowed before we consider the device
  * to be permanently disabled.
index 777ecc06af19f5595105d1cd3e410c28ae4c1bf2..81cd0311dee86c3a71bfa79ba40c603b76faa90b 100644 (file)
@@ -107,7 +107,7 @@ int eeh_probe_mode;
 DEFINE_MUTEX(eeh_mutex);
 
 /* Lock to avoid races due to multiple reports of an error */
-static DEFINE_RAW_SPINLOCK(confirm_error_lock);
+DEFINE_RAW_SPINLOCK(confirm_error_lock);
 
 /* Buffer for reporting pci register dumps. Its here in BSS, and
  * not dynamically alloced, so that it ends up in RMO where RTAS
@@ -325,7 +325,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
         * in one slot might report errors simultaneously, and we
         * only want one error recovery routine running.
         */
-       raw_spin_lock_irqsave(&confirm_error_lock, flags);
+       eeh_serialize_lock(&flags);
        rc = 1;
        if (pe->state & EEH_PE_ISOLATED) {
                pe->check_count++;
@@ -374,7 +374,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
         * bridges.
         */
        eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
-       raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+       eeh_serialize_unlock(flags);
 
        eeh_send_failure_event(pe);
 
@@ -386,7 +386,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
        return 1;
 
 dn_unlock:
-       raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+       eeh_serialize_unlock(flags);
        return rc;
 }
 
@@ -702,8 +702,6 @@ int __init eeh_init(void)
                return ret;
        }
 
-       raw_spin_lock_init(&confirm_error_lock);
-
        /* Initialize EEH event */
        ret = eeh_event_init();
        if (ret)