Merge remote-tracking branch 'lsk/v3.10/topic/arm64-lts' into lsk-v3.10-arm64-misc
authorMark Brown <broonie@linaro.org>
Thu, 15 May 2014 10:42:57 +0000 (11:42 +0100)
committerMark Brown <broonie@linaro.org>
Thu, 15 May 2014 10:42:57 +0000 (11:42 +0100)
24 files changed:
arch/arm/include/asm/arch_timer.h
arch/arm64/boot/dts/foundation-v8.dts
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/syscall.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/virt.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/gettimeofday.S
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
drivers/clocksource/arm_arch_timer.c
include/clocksource/arm_arch_timer.h

index 7c1bfc0aea0c200a268ec82c62fd4cb624c17ecf..accefe0991828e2984c5c1ff49b25dece76edcc0 100644 (file)
@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void)
        return val;
 }
 
-static inline u64 arch_counter_get_cntpct(void)
-{
-       u64 cval;
-
-       isb();
-       asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
-       return cval;
-}
-
 static inline u64 arch_counter_get_cntvct(void)
 {
        u64 cval;
index 84fcc5018284b6cee3dca436dd4ec7634e3b00cc..519c4b2c06873dc82f7ed2ebff9ccb698691e24f 100644 (file)
@@ -6,6 +6,8 @@
 
 /dts-v1/;
 
+/memreserve/ 0x80000000 0x00010000;
+
 / {
        model = "Foundation-v8A";
        compatible = "arm,foundation-aarch64", "arm,vexpress";
index bf6ab242f04725e56e8cf93d3ab6e930668651f0..d56ed11ba9a387be20f835d5f7675f9e95eb4574 100644 (file)
@@ -110,16 +110,6 @@ static inline void __cpuinit arch_counter_set_user_access(void)
        asm volatile("msr       cntkctl_el1, %0" : : "r" (cntkctl));
 }
 
-static inline u64 arch_counter_get_cntpct(void)
-{
-       u64 cval;
-
-       isb();
-       asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
-
-       return cval;
-}
-
 static inline u64 arch_counter_get_cntvct(void)
 {
        u64 cval;
index fea9ee32720678b348685ef4803bb2c622c3eb8e..889324981aa4f569a77e6a5385897b6493c85526 100644 (file)
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
 static inline void __flush_icache_all(void)
 {
        asm("ic ialluis");
+       dsb();
 }
 
 #define flush_dcache_mmap_lock(mapping) \
index e333a243bfccf4f6e1547ea2543e5a3fa3abd32b..3a710d7b14cec76f11cb3a2b69748a48096c114c 100644 (file)
@@ -161,7 +161,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
        if (pte_valid_user(pte)) {
-               if (pte_exec(pte))
+               if (!pte_special(pte) && pte_exec(pte))
                        __sync_icache_dcache(pte, addr);
                if (!pte_dirty(pte))
                        pte = pte_wrprotect(pte);
@@ -182,11 +182,11 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  * Mark the prot value as uncacheable and unbufferable.
  */
 #define pgprot_noncached(prot) \
-       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
 #define pgprot_writecombine(prot) \
-       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
 #define pgprot_dmacoherent(prot) \
-       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
 #define __HAVE_PHYS_MEM_ACCESS_PROT
 struct file;
 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
index 7065e920149d3d0ec6e9ed549e71a5040aa0a5d2..0defa0728a9b85f6c82d104f8c3bdb6ea3bf716f 100644 (file)
@@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
        unsigned int tmp;
 
        asm volatile(
-       "       ldaxr   %w0, %1\n"
+       "2:     ldaxr   %w0, %1\n"
        "       cbnz    %w0, 1f\n"
        "       stxr    %w0, %w2, %1\n"
+       "       cbnz    %w0, 2b\n"
        "1:\n"
        : "=&r" (tmp), "+Q" (lock->lock)
        : "r" (1)
index 89c047f9a9717efe1d3a83db3988c1c621aa2882..70ba9d4ee9782503fa8c626dd3d405d5e36b93c4 100644 (file)
@@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
                unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                pr_warning("%s called with max args %d, handling only %d\n",
                           __func__, i + n, SYSCALL_MAX_ARGS);
index 3659e460071ddfb1f84715ff7736c97ef9f9bb20..23a3c4791d86cb71f9a2520e2176212b1e107503 100644 (file)
 #include <linux/compiler.h>
 
 #ifndef CONFIG_ARM64_64K_PAGES
-#define THREAD_SIZE_ORDER      1
+#define THREAD_SIZE_ORDER      2
 #endif
 
-#define THREAD_SIZE            8192
+#define THREAD_SIZE            16384
 #define THREAD_START_SP                (THREAD_SIZE - 16)
 
 #ifndef __ASSEMBLY__
index 439827271e3d5bfada26390c9a226bbf6ec763e5..26e310c5434446c7bfc0a9a3f9b443139ffa3368 100644 (file)
@@ -21,6 +21,7 @@
 #define BOOT_CPU_MODE_EL2      (0x0e12b007)
 
 #ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
 
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
 void __hyp_set_vectors(phys_addr_t phys_vector_base);
 phys_addr_t __hyp_get_vectors(void);
 
+static inline void sync_boot_mode(void)
+{
+       /*
+        * As secondaries write to __boot_cpu_mode with caches disabled, we
+        * must flush the corresponding cache entries to ensure the visibility
+        * of their writes.
+        */
+       __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
+}
+
 /* Reports the availability of HYP mode */
 static inline bool is_hyp_mode_available(void)
 {
+       sync_boot_mode();
        return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
                __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
 }
@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
 /* Check if the bootloader has booted CPUs in different modes */
 static inline bool is_hyp_mode_mismatched(void)
 {
+       sync_boot_mode();
        return __boot_cpu_mode[0] != __boot_cpu_mode[1];
 }
 
index 1d1314280a03a678c4ef63092712d49079386cdd..6ad781b21c080a50c4c061dbd4037beafe8ba09b 100644 (file)
 
        .macro  get_thread_info, rd
        mov     \rd, sp
-       and     \rd, \rd, #~((1 << 13) - 1)     // top of 8K stack
+       and     \rd, \rd, #~(THREAD_SIZE - 1)   // top of stack
        .endm
 
 /*
index e8b8357aedb42e1fb23d6c736a7fcd2ea5003429..2fa308e4a1fad61448971f69be317249fb1c87b5 100644 (file)
@@ -79,8 +79,10 @@ void fpsimd_thread_switch(struct task_struct *next)
 
 void fpsimd_flush_thread(void)
 {
+       preempt_disable();
        memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
        fpsimd_load_state(&current->thread.fpsimd_state);
+       preempt_enable();
 }
 
 /*
index 9ba33c40cdf8f841e974f68e599f0f97e87138ff..cea1594ff933e92e304a6996a4642f18f322487a 100644 (file)
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -EINVAL;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -317,7 +322,13 @@ validate_event(struct pmu_hw_events *hw_events,
        struct hw_perf_event fake_event = event->hw;
        struct pmu *leader_pmu = event->group_leader->pmu;
 
-       if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+       if (is_software_event(event))
+               return 1;
+
+       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+               return 1;
+
+       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
                return 1;
 
        return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -773,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 /*
  * PMXEVTYPER: Event selection reg
  */
-#define        ARMV8_EVTYPE_MASK       0xc00000ff      /* Mask for writable bits */
+#define        ARMV8_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
 #define        ARMV8_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
 
 /*
index 6e1e77f1831c0cb0bf31306822ebc72ba8d3158a..5341534b6d04939d444c9c86d5491507d33c9511 100644 (file)
@@ -236,31 +236,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 {
        int err, len, type, disabled = !ctrl.enabled;
 
-       if (disabled) {
-               len = 0;
-               type = HW_BREAKPOINT_EMPTY;
-       } else {
-               err = arch_bp_generic_fields(ctrl, &len, &type);
-               if (err)
-                       return err;
-
-               switch (note_type) {
-               case NT_ARM_HW_BREAK:
-                       if ((type & HW_BREAKPOINT_X) != type)
-                               return -EINVAL;
-                       break;
-               case NT_ARM_HW_WATCH:
-                       if ((type & HW_BREAKPOINT_RW) != type)
-                               return -EINVAL;
-                       break;
-               default:
+       attr->disabled = disabled;
+       if (disabled)
+               return 0;
+
+       err = arch_bp_generic_fields(ctrl, &len, &type);
+       if (err)
+               return err;
+
+       switch (note_type) {
+       case NT_ARM_HW_BREAK:
+               if ((type & HW_BREAKPOINT_X) != type)
                        return -EINVAL;
-               }
+               break;
+       case NT_ARM_HW_WATCH:
+               if ((type & HW_BREAKPOINT_RW) != type)
+                       return -EINVAL;
+               break;
+       default:
+               return -EINVAL;
        }
 
        attr->bp_len    = len;
        attr->bp_type   = type;
-       attr->disabled  = disabled;
 
        return 0;
 }
index 5d54e3717bf81a54eedac2aabf0790745be85fd7..9c93e126328cfaa04ffdaa8612a43e4d9d09a6aa 100644 (file)
@@ -199,13 +199,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        raw_spin_lock(&boot_lock);
        raw_spin_unlock(&boot_lock);
 
-       /*
-        * Enable local interrupts.
-        */
-       notify_cpu_starting(cpu);
-       local_irq_enable();
-       local_fiq_enable();
-
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
         * the CPU migration code to notice that the CPU is online
@@ -214,6 +207,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        set_cpu_online(cpu, true);
        complete(&cpu_running);
 
+       /*
+        * Enable GIC and timers.
+        */
+       notify_cpu_starting(cpu);
+
+       local_irq_enable();
+       local_fiq_enable();
+
        /*
         * OK, it's off to the idle thread for us
         */
index d25459ff57fc18c387ad9a1d7130f0934dbbfc8e..048334bb2651a19ede00ac4a33323be67ea05e5a 100644 (file)
@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
 
        frame->sp = fp + 0x10;
        frame->fp = *(unsigned long *)(fp);
-       frame->pc = *(unsigned long *)(fp + 8);
+       /*
+        * -4 here because we care about the PC at time of bl,
+        * not where the return will go.
+        */
+       frame->pc = *(unsigned long *)(fp + 8) - 4;
 
        return 0;
 }
index 6a389dc1bd499c5de57c1a572bb22fb1a81c1494..0ea7a22bcdf2890b4d363163d89c13ac5a8a80f9 100644 (file)
@@ -235,6 +235,8 @@ void update_vsyscall(struct timekeeper *tk)
        vdso_data->use_syscall                  = use_syscall;
        vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
        vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->wtm_clock_sec                = tk->wall_to_monotonic.tv_sec;
+       vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
        if (!use_syscall) {
                vdso_data->cs_cycle_last        = tk->clock->cycle_last;
@@ -242,8 +244,6 @@ void update_vsyscall(struct timekeeper *tk)
                vdso_data->xtime_clock_nsec     = tk->xtime_nsec;
                vdso_data->cs_mult              = tk->mult;
                vdso_data->cs_shift             = tk->shift;
-               vdso_data->wtm_clock_sec        = tk->wall_to_monotonic.tv_sec;
-               vdso_data->wtm_clock_nsec       = tk->wall_to_monotonic.tv_nsec;
        }
 
        smp_wmb();
index d8064af42e6217ba173d559ec72999181c83776a..6d20b7d162d834da4f9364e340e81d6014ec566a 100644 (file)
@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
 
 # Actual build commands
 quiet_cmd_vdsold = VDSOL $@
-      cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
 quiet_cmd_vdsoas = VDSOA $@
       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
 
index f0a6d10b52114953dcfd818c66ad85f6cccccbd8..fe652ffd34c28090076b8d8358c6e40f7d77034d 100644 (file)
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
        bl      __do_get_tspec
        seqcnt_check w9, 1b
 
+       mov     x30, x2
+
        cmp     w0, #CLOCK_MONOTONIC
        b.ne    6f
 
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
        ccmp    w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
        b.ne    8f
 
+       /* xtime_coarse_nsec is already right-shifted */
+       mov     x12, #0
+
        /* Get coarse timespec. */
        adr     vdso_data, _vdso_data
 3:     seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
        lsr     x11, x11, x12
        stp     x10, x11, [x1, #TSPEC_TV_SEC]
        mov     x0, xzr
-       ret     x2
+       ret
 7:
        mov     x30, x2
 8:     /* Syscall fallback. */
index 1426468b77f3bb7b6df96d604851449db13ed0da..f51d669c8ebdd0397e2d1cb132e0cd7f4b9b7295 100644 (file)
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 #define ESR_CM                 (1 << 8)
 #define ESR_LNX_EXEC           (1 << 24)
 
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
-       unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
-       if (esr & ESR_WRITE)
-               mask = VM_WRITE;
-       if (esr & ESR_LNX_EXEC)
-               mask = VM_EXEC;
-
-       return vma->vm_flags & mask ? false : true;
-}
-
 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
-                          unsigned int esr, unsigned int flags,
+                          unsigned int mm_flags, unsigned long vm_flags,
                           struct task_struct *tsk)
 {
        struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
         * it.
         */
 good_area:
-       if (access_error(esr, vma)) {
+       /*
+        * Check that the permissions on the VMA allow for the fault which
+        * occurred. If we encountered a write or exec fault, we must have
+        * appropriate permissions, otherwise we allow any permission.
+        */
+       if (!(vma->vm_flags & vm_flags)) {
                fault = VM_FAULT_BADACCESS;
                goto out;
        }
 
-       return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+       return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
 
 check_stack:
        if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
-       bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-               (write ? FAULT_FLAG_WRITE : 0);
+       unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+       unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+       if (esr & ESR_LNX_EXEC) {
+               vm_flags = VM_EXEC;
+       } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+               vm_flags = VM_WRITE;
+               mm_flags |= FAULT_FLAG_WRITE;
+       }
 
        tsk = current;
        mm  = tsk->mm;
@@ -248,7 +242,7 @@ retry:
 #endif
        }
 
-       fault = __do_page_fault(mm, addr, esr, flags, tsk);
+       fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
 
        /*
         * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
         */
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+       if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
                if (fault & VM_FAULT_MAJOR) {
                        tsk->maj_flt++;
                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
                         * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
                         * starvation.
                         */
-                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
                        goto retry;
                }
        }
index d3751b5f7f07433ac625aff0ba72b4565805948d..e4193e3adc7f9dbbbe57c63eac0ee60b32aaa3e5 100644 (file)
@@ -72,14 +72,12 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 
 void __sync_icache_dcache(pte_t pte, unsigned long addr)
 {
-       unsigned long pfn;
-       struct page *page;
+       struct page *page = pte_page(pte);
 
-       pfn = pte_pfn(pte);
-       if (!pfn_valid(pfn))
+       /* no flushing needed for anonymous pages */
+       if (!page_mapping(page))
                return;
 
-       page = pfn_to_page(pfn);
        if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
                __flush_dcache_area(page_address(page), PAGE_SIZE);
                __flush_icache_all();
@@ -89,28 +87,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
 }
 
 /*
- * Ensure cache coherency between kernel mapping and userspace mapping of this
- * page.
+ * This function is called when a page has been modified by the kernel. Mark
+ * it as dirty for later flushing when mapped in user space (if executable,
+ * see __sync_icache_dcache).
  */
 void flush_dcache_page(struct page *page)
 {
-       struct address_space *mapping;
-
-       /*
-        * The zero page is never written to, so never has any dirty cache
-        * lines, and therefore never needs to be flushed.
-        */
-       if (page == ZERO_PAGE(0))
-               return;
-
-       mapping = page_mapping(page);
-       if (mapping && mapping_mapped(mapping)) {
-               __flush_dcache_page(page);
-               __flush_icache_all();
-               set_bit(PG_dcache_clean, &page->flags);
-       } else {
+       if (test_bit(PG_dcache_clean, &page->flags))
                clear_bit(PG_dcache_clean, &page->flags);
-       }
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
index eeecc9c8ed6860accacc36bb5f5aa0e6c7db7e58..ba7477efad5ca60912476d0bd93ca2e776b49d20 100644 (file)
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
        do {
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
-               if (((addr | next | phys) & ~SECTION_MASK) == 0)
+               if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+                       pmd_t old_pmd =*pmd;
                        set_pmd(pmd, __pmd(phys | prot_sect_kernel));
-               else
+                       /*
+                        * Check for previous table entries created during
+                        * boot (__create_page_tables) and flush them.
+                        */
+                       if (!pmd_none(old_pmd))
+                               flush_tlb_all();
+               } else {
                        alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+               }
                phys += next - addr;
        } while (pmd++, addr = next, addr != end);
 }
@@ -339,7 +347,6 @@ void __init paging_init(void)
        bootmem_init();
 
        empty_zero_page = virt_to_page(zero_page);
-       __flush_dcache_page(empty_zero_page);
 
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
index a82ae8868077f9f32749ccfcc490d166d6fddd1c..f84fcf71f12955bc41b57cbd84ed2b51ed768be2 100644 (file)
@@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm)
        ret
 ENDPROC(cpu_do_switch_mm)
 
-cpu_name:
-       .ascii  "AArch64 Processor"
-       .align
-
        .section ".text.init", #alloc, #execinstr
 
 /*
index a2b25418978244d1ae129cf74f031020951f35d4..053d846ab5b108931ccbeacb67ebc7c85696d0f6 100644 (file)
@@ -186,27 +186,19 @@ u32 arch_timer_get_rate(void)
        return arch_timer_rate;
 }
 
-/*
- * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
- * call it before it has been initialised. Rather than incur a performance
- * penalty checking for initialisation, provide a default implementation that
- * won't lead to time appearing to jump backwards.
- */
-static u64 arch_timer_read_zero(void)
+u64 arch_timer_read_counter(void)
 {
-       return 0;
+       return arch_counter_get_cntvct();
 }
 
-u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
-
 static cycle_t arch_counter_read(struct clocksource *cs)
 {
-       return arch_timer_read_counter();
+       return arch_counter_get_cntvct();
 }
 
 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
 {
-       return arch_timer_read_counter();
+       return arch_counter_get_cntvct();
 }
 
 static struct clocksource clocksource_counter = {
@@ -287,7 +279,7 @@ static int __init arch_timer_register(void)
        cyclecounter.mult = clocksource_counter.mult;
        cyclecounter.shift = clocksource_counter.shift;
        timecounter_init(&timecounter, &cyclecounter,
-                        arch_counter_get_cntpct());
+                        arch_counter_get_cntvct());
 
        if (arch_timer_use_virtual) {
                ppi = arch_timer_ppi[VIRT_PPI];
@@ -376,11 +368,6 @@ static void __init arch_timer_init(struct device_node *np)
                }
        }
 
-       if (arch_timer_use_virtual)
-               arch_timer_read_counter = arch_counter_get_cntvct;
-       else
-               arch_timer_read_counter = arch_counter_get_cntpct;
-
        arch_timer_register();
        arch_timer_arch_init();
 }
index e6c9c4cc9b23415fad39dfd142a6ce3bd44f4b7c..c463ce990c48b9a6f5742a45ac741b91371aae4f 100644 (file)
@@ -32,7 +32,7 @@
 #ifdef CONFIG_ARM_ARCH_TIMER
 
 extern u32 arch_timer_get_rate(void);
-extern u64 (*arch_timer_read_counter)(void);
+extern u64 arch_timer_read_counter(void);
 extern struct timecounter *arch_timer_get_timecounter(void);
 
 #else