* non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
-#define NUM_USER_ASIDS ASID_FIRST_VERSION
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
+
+#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
+#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
-#ifdef CONFIG_ARM_ERRATA_798181
-void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
- cpumask_t *mask)
-{
- int cpu;
- unsigned long flags;
- u64 context_id, asid;
-
- raw_spin_lock_irqsave(&cpu_asid_lock, flags);
- context_id = mm->context.id.counter;
- for_each_online_cpu(cpu) {
- if (cpu == this_cpu)
- continue;
- /*
- * We only need to send an IPI if the other CPUs are
- * running the same ASID as the one being invalidated.
- */
- asid = per_cpu(active_asids, cpu).counter;
- if (asid == 0)
- asid = per_cpu(reserved_asids, cpu);
- if (context_id == asid)
- cpumask_set_cpu(cpu, mask);
- }
- raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
-}
-#endif
-
#ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void)
{
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
- /*
- * If this CPU has already been through a
- * rollover, but hasn't run another task in
- * the meantime, we must preserve its reserved
- * ASID, as this is the only trace we have of
- * the process it is still running.
- */
- if (asid == 0)
- asid = per_cpu(reserved_asids, i);
- __set_bit(asid & ~ASID_MASK, asid_map);
+ __set_bit(ASID_TO_IDX(asid), asid_map);
}
per_cpu(reserved_asids, i) = asid;
}
/*
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
- * as requiring flushes. We always count from ASID #1,
- * as we reserve ASID #0 to switch via TTBR0 and indicate
- * rollover events.
+ * as requiring flushes.
*/
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
}
__set_bit(asid, asid_map);
- asid |= generation;
+ asid = generation | IDX_TO_ASID(asid);
cpumask_clear(mm_cpumask(mm));
}