void (*vfp_vector)(void) = vfp_null_entry;
-/*
- * Dual-use variable.
- * Used in startup: set to non-zero if VFP checks fail
- * After startup, holds VFP architecture
- */
-unsigned int VFP_arch;
-
/*
* The pointer to the vfpstate structure of the thread which currently
* owns the context held in the VFP hardware, or NULL if the hardware
* context is invalid.
- *
- * For UP, this is sufficient to tell which thread owns the VFP context.
- * However, for SMP, we also need to check the CPU number stored in the
- * saved state too to catch migrations.
*/
union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
- * Is 'thread's most up to date state stored in this CPUs hardware?
- * Must be called from non-preemptible context.
- */
-static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
-{
-#ifdef CONFIG_SMP
- if (thread->vfpstate.hard.cpu != cpu)
- return false;
-#endif
- return vfp_current_hw_state[cpu] == &thread->vfpstate;
-}
-
-/*
- * Force a reload of the VFP context from the thread structure. We do
- * this by ensuring that access to the VFP hardware is disabled, and
- * clear last_VFP_context. Must be called from non-preemptible context.
+ * Dual-use variable.
+ * Used in startup: set to non-zero if VFP checks fail
+ * After startup, holds VFP architecture
*/
-static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
-{
- if (vfp_state_in_hw(cpu, thread)) {
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- vfp_current_hw_state[cpu] = NULL;
- }
-#ifdef CONFIG_SMP
- thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
-}
+unsigned int VFP_arch;
/*
* Per-thread VFP initialization.
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu;
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+
/*
* Disable VFP to ensure we initialize it first. We must ensure
- * that the modification of vfp_current_hw_state[] and hardware
- * disable are done for the same CPU and without preemption.
- *
- * Do this first to ensure that preemption won't overwrite our
- * state saving should access to the VFP be enabled at this point.
+ * that the modification of vfp_current_hw_state[] and hardware disable
+ * are done for the same CPU and without preemption.
*/
cpu = get_cpu();
if (vfp_current_hw_state[cpu] == vfp)
vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
-
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-#ifdef CONFIG_SMP
- vfp->hard.cpu = NR_CPUS;
-#endif
}
static void vfp_thread_exit(struct thread_info *thread)
vfp_sync_hwstate(parent);
thread->vfpstate = parent->vfpstate;
-#ifdef CONFIG_SMP
- thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
}
/*
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
- if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+ vfp_current_hw_state[cpu]->hard.cpu = cpu;
+ }
+ /*
+ * Thread migration, just force the reloading of the
+ * state on the new CPU in case the VFP registers
+ * contain stale data.
+ */
+ if (thread->vfpstate.hard.cpu != cpu)
+ vfp_current_hw_state[cpu] = NULL;
#endif
/*
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
} else if (vfp_current_hw_state[ti->cpu]) {
+#ifndef CONFIG_SMP
fmxr(FPEXC, fpexc | FPEXC_EN);
vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
fmxr(FPEXC, fpexc);
+#endif
}
/* clear any information we had about last context state */
- memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
+ vfp_current_hw_state[ti->cpu] = NULL;
return 0;
}
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
-/*
- * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
- * with the hardware state.
- */
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- if (vfp_state_in_hw(cpu, thread)) {
+ /*
+ * If the thread we're interested in is the current owner of the
+ * hardware VFP state, then we need to save its state.
+ */
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
/*
put_cpu();
}
-/* Ensure that the thread reloads the hardware VFP state on the next use. */
void vfp_flush_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- vfp_force_reload(cpu, thread);
+ /*
+ * If the thread we're interested in is the current owner of the
+ * hardware VFP state, then we need to save its state.
+ */
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
+ u32 fpexc = fmrx(FPEXC);
+
+ fmxr(FPEXC, fpexc & ~FPEXC_EN);
+
+ /*
+ * Set the context to NULL to force a reload the next time
+ * the thread uses the VFP.
+ */
+ vfp_current_hw_state[cpu] = NULL;
+ }
+#ifdef CONFIG_SMP
+ /*
+ * For SMP we still have to take care of the case where the thread
+ * migrates to another CPU and then back to the original CPU on which
+ * the last VFP user is still the same thread. Mark the thread VFP
+ * state as belonging to a non-existent CPU so that the saved one will
+ * be reloaded in the above case.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
put_cpu();
}
void *hcpu)
{
if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
- vfp_force_reload((long)hcpu, current_thread_info());
+ unsigned int cpu = (long)hcpu;
+ vfp_current_hw_state[cpu] = NULL;
} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
elf_hwcap |= HWCAP_VFPv3;
/*
- * Check for VFPv3 D16. CPUs in this configuration
- * only have 16 x 64bit registers.
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
*/
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
- elf_hwcap |= HWCAP_VFPv3D16;
+ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+ else
+ elf_hwcap |= HWCAP_VFPD32;
}
#endif
#ifdef CONFIG_NEON