Merge commit 'v3.0.58' into android-3.0
[firefly-linux-kernel-4.4.55.git] / arch / arm / vfp / vfpmodule.c
index 50dbe3545c041836c8eae64053eac4ce7f79dfcb..192e9dd4c39b139365777efb88a1d86497bcb93d 100644 (file)
@@ -34,52 +34,19 @@ void vfp_null_entry(void);
 
 void (*vfp_vector)(void) = vfp_null_entry;
 
-/*
- * Dual-use variable.
- * Used in startup: set to non-zero if VFP checks fail
- * After startup, holds VFP architecture
- */
-unsigned int VFP_arch;
-
 /*
  * The pointer to the vfpstate structure of the thread which currently
  * owns the context held in the VFP hardware, or NULL if the hardware
  * context is invalid.
- *
- * For UP, this is sufficient to tell which thread owns the VFP context.
- * However, for SMP, we also need to check the CPU number stored in the
- * saved state too to catch migrations.
  */
 union vfp_state *vfp_current_hw_state[NR_CPUS];
 
 /*
- * Is 'thread's most up to date state stored in this CPUs hardware?
- * Must be called from non-preemptible context.
- */
-static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
-{
-#ifdef CONFIG_SMP
-       if (thread->vfpstate.hard.cpu != cpu)
-               return false;
-#endif
-       return vfp_current_hw_state[cpu] == &thread->vfpstate;
-}
-
-/*
- * Force a reload of the VFP context from the thread structure.  We do
- * this by ensuring that access to the VFP hardware is disabled, and
- * clear last_VFP_context.  Must be called from non-preemptible context.
+ * Dual-use variable.
+ * Used in startup: set to non-zero if VFP checks fail
+ * After startup, holds VFP architecture
  */
-static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
-{
-       if (vfp_state_in_hw(cpu, thread)) {
-               fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
-               vfp_current_hw_state[cpu] = NULL;
-       }
-#ifdef CONFIG_SMP
-       thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
-}
+unsigned int VFP_arch;
 
 /*
  * Per-thread VFP initialization.
@@ -89,27 +56,21 @@ static void vfp_thread_flush(struct thread_info *thread)
        union vfp_state *vfp = &thread->vfpstate;
        unsigned int cpu;
 
+       memset(vfp, 0, sizeof(union vfp_state));
+
+       vfp->hard.fpexc = FPEXC_EN;
+       vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+
        /*
         * Disable VFP to ensure we initialize it first.  We must ensure
-        * that the modification of vfp_current_hw_state[] and hardware
-        * disable are done for the same CPU and without preemption.
-        *
-        * Do this first to ensure that preemption won't overwrite our
-        * state saving should access to the VFP be enabled at this point.
+        * that the modification of vfp_current_hw_state[] and hardware disable
+        * are done for the same CPU and without preemption.
         */
        cpu = get_cpu();
        if (vfp_current_hw_state[cpu] == vfp)
                vfp_current_hw_state[cpu] = NULL;
        fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
        put_cpu();
-
-       memset(vfp, 0, sizeof(union vfp_state));
-
-       vfp->hard.fpexc = FPEXC_EN;
-       vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-#ifdef CONFIG_SMP
-       vfp->hard.cpu = NR_CPUS;
-#endif
 }
 
 static void vfp_thread_exit(struct thread_info *thread)
@@ -129,9 +90,6 @@ static void vfp_thread_copy(struct thread_info *thread)
 
        vfp_sync_hwstate(parent);
        thread->vfpstate = parent->vfpstate;
-#ifdef CONFIG_SMP
-       thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
 }
 
 /*
@@ -177,8 +135,17 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
                 * case the thread migrates to a different CPU. The
                 * restoring is done lazily.
                 */
-               if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+               if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
                        vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+                       vfp_current_hw_state[cpu]->hard.cpu = cpu;
+               }
+               /*
+                * Thread migration, just force the reloading of the
+                * state on the new CPU in case the VFP registers
+                * contain stale data.
+                */
+               if (thread->vfpstate.hard.cpu != cpu)
+                       vfp_current_hw_state[cpu] = NULL;
 #endif
 
                /*
@@ -452,13 +419,15 @@ static int vfp_pm_suspend(void)
                /* disable, just in case */
                fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
        } else if (vfp_current_hw_state[ti->cpu]) {
+#ifndef CONFIG_SMP
                fmxr(FPEXC, fpexc | FPEXC_EN);
                vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
                fmxr(FPEXC, fpexc);
+#endif
        }
 
        /* clear any information we had about last context state */
-       memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
+       vfp_current_hw_state[ti->cpu] = NULL;
 
        return 0;
 }
@@ -486,15 +455,15 @@ static void vfp_pm_init(void)
 static inline void vfp_pm_init(void) { }
 #endif /* CONFIG_PM */
 
-/*
- * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
- * with the hardware state.
- */
 void vfp_sync_hwstate(struct thread_info *thread)
 {
        unsigned int cpu = get_cpu();
 
-       if (vfp_state_in_hw(cpu, thread)) {
+       /*
+        * If the thread we're interested in is the current owner of the
+        * hardware VFP state, then we need to save its state.
+        */
+       if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
                u32 fpexc = fmrx(FPEXC);
 
                /*
@@ -508,13 +477,36 @@ void vfp_sync_hwstate(struct thread_info *thread)
        put_cpu();
 }
 
-/* Ensure that the thread reloads the hardware VFP state on the next use. */
 void vfp_flush_hwstate(struct thread_info *thread)
 {
        unsigned int cpu = get_cpu();
 
-       vfp_force_reload(cpu, thread);
+       /*
+        * If the thread we're interested in is the current owner of the
+        * hardware VFP state, then we need to save its state.
+        */
+       if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
+               u32 fpexc = fmrx(FPEXC);
+
+               fmxr(FPEXC, fpexc & ~FPEXC_EN);
+
+               /*
+                * Set the context to NULL to force a reload the next time
+                * the thread uses the VFP.
+                */
+               vfp_current_hw_state[cpu] = NULL;
+       }
 
+#ifdef CONFIG_SMP
+       /*
+        * For SMP we still have to take care of the case where the thread
+        * migrates to another CPU and then back to the original CPU on which
+        * the last VFP user is still the same thread. Mark the thread VFP
+        * state as belonging to a non-existent CPU so that the saved one will
+        * be reloaded in the above case.
+        */
+       thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
        put_cpu();
 }
 
@@ -533,7 +525,8 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
        void *hcpu)
 {
        if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
-               vfp_force_reload((long)hcpu, current_thread_info());
+               unsigned int cpu = (long)hcpu;
+               vfp_current_hw_state[cpu] = NULL;
        } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
                vfp_enable(NULL);
        return NOTIFY_OK;
@@ -594,11 +587,14 @@ static int __init vfp_init(void)
                        elf_hwcap |= HWCAP_VFPv3;
 
                        /*
-                        * Check for VFPv3 D16. CPUs in this configuration
-                        * only have 16 x 64bit registers.
+                        * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
+                        * this configuration only have 16 x 64bit
+                        * registers.
                         */
                        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
-                               elf_hwcap |= HWCAP_VFPv3D16;
+                               elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+                       else
+                               elf_hwcap |= HWCAP_VFPD32;
                }
 #endif
 #ifdef CONFIG_NEON