Merge tag 'ronx-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux...
authorRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 3 Nov 2014 10:12:13 +0000 (10:12 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 3 Nov 2014 10:12:13 +0000 (10:12 +0000)
generic fixmaps
ARM support for CONFIG_DEBUG_RODATA

15 files changed:
Documentation/arm/memory.txt
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/fixmap.h
arch/arm/kernel/Makefile
arch/arm/kernel/ftrace.c
arch/arm/kernel/jump_label.c
arch/arm/kernel/kgdb.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/patch.c
arch/arm/kernel/patch.h
arch/arm/kernel/vmlinux.lds.S
arch/arm/mm/Kconfig
arch/arm/mm/highmem.c
arch/arm/mm/init.c
arch/arm/mm/mmu.c

index 38dc06d0a7910fa13fa830833d3ee473296c2052..4178ebda6e665c1c2e9dac6451e2137acfd41eb1 100644 (file)
@@ -41,7 +41,7 @@ fffe8000      fffeffff        DTCM mapping area for platforms with
 fffe0000       fffe7fff        ITCM mapping area for platforms with
                                ITCM mounted inside the CPU.
 
-ffc00000       ffdfffff        Fixmap mapping region.  Addresses provided
+ffc00000       ffefffff        Fixmap mapping region.  Addresses provided
                                by fix_to_virt() will be located here.
 
 fee00000       feffffff        Mapping of PCI I/O space. This is a static
index 10e78d00a0bb348a8e77aa8f04b484a6b2195bfd..2d46862e7bef7735216f78856924456a73091973 100644 (file)
@@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
                             void *kaddr, unsigned long len);
+
 #endif
index 74124b0d0d7952f60bee72c69ae49c30eb677c99..0415eae1df27419755c5cff78170ea3f6d029535 100644 (file)
@@ -2,27 +2,24 @@
 #define _ASM_FIXMAP_H
 
 #define FIXADDR_START          0xffc00000UL
-#define FIXADDR_TOP            0xffe00000UL
-#define FIXADDR_SIZE           (FIXADDR_TOP - FIXADDR_START)
+#define FIXADDR_END            0xfff00000UL
+#define FIXADDR_TOP            (FIXADDR_END - PAGE_SIZE)
 
-#define FIX_KMAP_NR_PTES       (FIXADDR_SIZE >> PAGE_SHIFT)
+#include <asm/kmap_types.h>
 
-#define __fix_to_virt(x)       (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x)       (((x) - FIXADDR_START) >> PAGE_SHIFT)
+enum fixed_addresses {
+       FIX_KMAP_BEGIN,
+       FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
-extern void __this_fixmap_does_not_exist(void);
+       /* Support writing RO kernel text via kprobes, jump labels, etc. */
+       FIX_TEXT_POKE0,
+       FIX_TEXT_POKE1,
 
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
-       if (idx >= FIX_KMAP_NR_PTES)
-               __this_fixmap_does_not_exist();
-       return __fix_to_virt(idx);
-}
+       __end_of_fixed_addresses
+};
 
-static inline unsigned int virt_to_fix(const unsigned long vaddr)
-{
-       BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
-       return __virt_to_fix(vaddr);
-}
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
 
 #endif
index 38ddd9f83d0e586289c56bdbf1e38a6a977ff1a7..70b730766330563121c7e354696f225e97cc7052 100644 (file)
@@ -67,7 +67,7 @@ test-kprobes-objs             += kprobes-test-arm.o
 endif
 obj-$(CONFIG_OABI_COMPAT)      += sys_oabi-compat.o
 obj-$(CONFIG_ARM_THUMBEE)      += thumbee.o
-obj-$(CONFIG_KGDB)             += kgdb.o
+obj-$(CONFIG_KGDB)             += kgdb.o patch.o
 obj-$(CONFIG_ARM_UNWIND)       += unwind.o
 obj-$(CONFIG_HAVE_TCM)         += tcm.o
 obj-$(CONFIG_OF)               += devtree.o
index af9a8a927a4e9624636f008cc28415e97482803a..b8c75e45a950af1e962f2b3fb9512d5ab01fb36d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/ftrace.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
+#include <linux/stop_machine.h>
 
 #include <asm/cacheflush.h>
 #include <asm/opcodes.h>
 
 #define        OLD_NOP         0xe1a00000      /* mov r0, r0 */
 
+static int __ftrace_modify_code(void *data)
+{
+       int *command = data;
+
+       set_kernel_text_rw();
+       ftrace_modify_all_code(*command);
+       set_kernel_text_ro();
+
+       return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+       stop_machine(__ftrace_modify_code, &command, NULL);
+}
+
 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
 {
        return rec->arch.old_mcount ? OLD_NOP : NOP;
@@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void)
 int ftrace_arch_code_modify_post_process(void)
 {
        set_all_modules_text_ro();
+       /* Make sure any TLB misses during machine stop are cleared. */
+       flush_tlb_all();
        return 0;
 }
 
index 4ce4f789446da5844b86f929e276453424d9a808..afeeb9ea6f439ebeb515bdea4b3acf0962a5b687 100644 (file)
@@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
                insn = arm_gen_nop();
 
        if (is_static)
-               __patch_text(addr, insn);
+               __patch_text_early(addr, insn);
        else
                patch_text(addr, insn);
 }
index a74b53c1b7dfc3566e51957b6422da290121ff93..07db2f8a1b4505b09eb4633488d7167bb695f669 100644 (file)
 #include <linux/irq.h>
 #include <linux/kdebug.h>
 #include <linux/kgdb.h>
+#include <linux/uaccess.h>
+
 #include <asm/traps.h>
 
+#include "patch.h"
+
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 {
        { "r0", 4, offsetof(struct pt_regs, ARM_r0)},
@@ -244,6 +248,31 @@ void kgdb_arch_exit(void)
        unregister_die_notifier(&kgdb_notifier);
 }
 
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+
+       /* patch_text() only supports int-sized breakpoints */
+       BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
+
+       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+                               BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+
+       patch_text((void *)bpt->bpt_addr,
+                  *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+
+       return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+       patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+
+       return 0;
+}
+
 /*
  * Register our undef instruction hooks with ARM undef core.
  * We regsiter a hook specifically looking for the KGB break inst
index 8cf0996aa1a8d795bfdb65add498aa1552829382..4423a565ef6fc9c073631da29dd2afd2f9d3f645 100644 (file)
@@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags;
 
 static atomic_t waiting_for_crash_ipi;
 
+static unsigned long dt_mem;
 /*
  * Provide a dummy crash_notes definition while crash dump arrives to arm.
  * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image)
                        return err;
 
                if (be32_to_cpu(header) == OF_DT_HEADER)
-                       kexec_boot_atags = current_segment->mem;
+                       dt_mem = current_segment->mem;
        }
        return 0;
 }
@@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image)
        reboot_code_buffer = page_address(image->control_code_page);
 
        /* Prepare parameters for reboot_code_buffer*/
+       set_kernel_text_rw();
        kexec_start_address = image->start;
        kexec_indirection_page = page_list;
        kexec_mach_type = machine_arch_type;
-       if (!kexec_boot_atags)
-               kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
+       kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
+                                    + KEXEC_ARM_ATAGS_OFFSET;
 
        /* copy our kernel relocation code to the control code page */
        reboot_entry = fncpy(reboot_code_buffer,
index 07314af477336a9c798e2ca7c794a2893fbfe624..5038960e3c55abc1dc9744808f7c8c9d44eca290 100644 (file)
@@ -1,8 +1,11 @@
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
 #include <linux/kprobes.h>
+#include <linux/mm.h>
 #include <linux/stop_machine.h>
 
 #include <asm/cacheflush.h>
+#include <asm/fixmap.h>
 #include <asm/smp_plat.h>
 #include <asm/opcodes.h>
 
@@ -13,21 +16,77 @@ struct patch {
        unsigned int insn;
 };
 
-void __kprobes __patch_text(void *addr, unsigned int insn)
+static DEFINE_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+       __acquires(&patch_lock)
+{
+       unsigned int uintaddr = (uintptr_t) addr;
+       bool module = !core_kernel_text(uintaddr);
+       struct page *page;
+
+       if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+               page = vmalloc_to_page(addr);
+       else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
+               page = virt_to_page(addr);
+       else
+               return addr;
+
+       if (flags)
+               spin_lock_irqsave(&patch_lock, *flags);
+       else
+               __acquire(&patch_lock);
+
+       set_fixmap(fixmap, page_to_phys(page));
+
+       return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+       __releases(&patch_lock)
+{
+       clear_fixmap(fixmap);
+
+       if (flags)
+               spin_unlock_irqrestore(&patch_lock, *flags);
+       else
+               __release(&patch_lock);
+}
+
+void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
 {
        bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+       unsigned int uintaddr = (uintptr_t) addr;
+       bool twopage = false;
+       unsigned long flags;
+       void *waddr = addr;
        int size;
 
+       if (remap)
+               waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
+       else
+               __acquire(&patch_lock);
+
        if (thumb2 && __opcode_is_thumb16(insn)) {
-               *(u16 *)addr = __opcode_to_mem_thumb16(insn);
+               *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
                size = sizeof(u16);
-       } else if (thumb2 && ((uintptr_t)addr & 2)) {
+       } else if (thumb2 && (uintaddr & 2)) {
                u16 first = __opcode_thumb32_first(insn);
                u16 second = __opcode_thumb32_second(insn);
-               u16 *addrh = addr;
+               u16 *addrh0 = waddr;
+               u16 *addrh1 = waddr + 2;
+
+               twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
+               if (twopage && remap)
+                       addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL);
+
+               *addrh0 = __opcode_to_mem_thumb16(first);
+               *addrh1 = __opcode_to_mem_thumb16(second);
 
-               addrh[0] = __opcode_to_mem_thumb16(first);
-               addrh[1] = __opcode_to_mem_thumb16(second);
+               if (twopage && addrh1 != addr + 2) {
+                       flush_kernel_vmap_range(addrh1, 2);
+                       patch_unmap(FIX_TEXT_POKE1, NULL);
+               }
 
                size = sizeof(u32);
        } else {
@@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
                else
                        insn = __opcode_to_mem_arm(insn);
 
-               *(u32 *)addr = insn;
+               *(u32 *)waddr = insn;
                size = sizeof(u32);
        }
 
+       if (waddr != addr) {
+               flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+               patch_unmap(FIX_TEXT_POKE0, &flags);
+       } else
+               __release(&patch_lock);
+
        flush_icache_range((uintptr_t)(addr),
                           (uintptr_t)(addr) + size);
 }
@@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
                .insn = insn,
        };
 
-       if (cache_ops_need_broadcast()) {
-               stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
-       } else {
-               bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
-                                     && __opcode_is_thumb32(insn)
-                                     && ((uintptr_t)addr & 2);
-
-               if (straddles_word)
-                       stop_machine(patch_text_stop_machine, &patch, NULL);
-               else
-                       __patch_text(addr, insn);
-       }
+       stop_machine(patch_text_stop_machine, &patch, NULL);
 }
index b4731f2dac38556a0d580aa4217931d0cf6d8423..77e054c2f6cd3f6a265a75adc0c27d5730fef36d 100644 (file)
@@ -2,6 +2,16 @@
 #define _ARM_KERNEL_PATCH_H
 
 void patch_text(void *addr, unsigned int insn);
-void __patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+       __patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+       __patch_text_real(addr, insn, false);
+}
 
 #endif
index 8e95aa47457a56761b4a84bcc5ad8a657fac8c66..b31aa73e80765539ce14018a1ed2fdc0a1a81ddc 100644 (file)
@@ -8,6 +8,9 @@
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+#include <asm/pgtable.h>
+#endif
        
 #define PROC_INFO                                                      \
        . = ALIGN(4);                                                   \
@@ -90,6 +93,11 @@ SECTIONS
                _text = .;
                HEAD_TEXT
        }
+
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+       . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
        .text : {                       /* Real text segment            */
                _stext = .;             /* Text and read-only data      */
                        __exception_text_start = .;
@@ -112,6 +120,9 @@ SECTIONS
                        ARM_CPU_KEEP(PROC_INFO)
        }
 
+#ifdef CONFIG_DEBUG_RODATA
+       . = ALIGN(1<<SECTION_SHIFT);
+#endif
        RO_DATA(PAGE_SIZE)
 
        . = ALIGN(4);
@@ -145,7 +156,11 @@ SECTIONS
        _etext = .;                     /* End of text and rodata section */
 
 #ifndef CONFIG_XIP_KERNEL
+# ifdef CONFIG_ARM_KERNMEM_PERMS
+       . = ALIGN(1<<SECTION_SHIFT);
+# else
        . = ALIGN(PAGE_SIZE);
+# endif
        __init_begin = .;
 #endif
        /*
@@ -218,8 +233,12 @@ SECTIONS
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
        . = PAGE_OFFSET + TEXT_OFFSET;
+#else
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+       . = ALIGN(1<<SECTION_SHIFT);
 #else
        . = ALIGN(THREAD_SIZE);
+#endif
        __init_end = .;
        __data_loc = .;
 #endif
index ae69809a9e479bef7617e41627b082452c93d09e..c9cd9c5bf1e1432ae0142ea30908f4be7a2e5bb9 100644 (file)
@@ -1008,3 +1008,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
        help
          This option specifies the architecture can support big endian
          operation.
+
+config ARM_KERNMEM_PERMS
+       bool "Restrict kernel memory permissions"
+       help
+         If this is set, kernel memory other than kernel text (and rodata)
+         will be made non-executable. The tradeoff is that each region is
+         padded to section-size (1MiB) boundaries (because their permissions
+         are different and splitting the 1M pages into 4K ones causes TLB
+         performance problems), wasting memory.
+
+config DEBUG_RODATA
+       bool "Make kernel text and rodata read-only"
+       depends on ARM_KERNMEM_PERMS
+       default y
+       help
+         If this is set, kernel text and rodata will be made read-only. This
+         is to help catch accidental or malicious attempts to change the
+         kernel's executable code. Additionally splits rodata from kernel
+         text so it can be made explicitly non-executable. This creates
+         another section-size padded region, so it can waste more memory
+         space while gaining the read-only protections.
index 45aeaaca9052f237322cf91a247a69fdb2d1571e..81061987ac4512ff9158e1162846ecf16abaa11e 100644 (file)
 #include <asm/tlbflush.h>
 #include "mm.h"
 
-pte_t *fixmap_page_table;
-
 static inline void set_fixmap_pte(int idx, pte_t pte)
 {
        unsigned long vaddr = __fix_to_virt(idx);
-       set_pte_ext(fixmap_page_table + idx, pte, 0);
+       pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+       set_pte_ext(ptep, pte, 0);
        local_flush_tlb_kernel_page(vaddr);
 }
 
 static inline pte_t get_fixmap_pte(unsigned long vaddr)
 {
-       unsigned long idx = __virt_to_fix(vaddr);
-       return *(fixmap_page_table + idx);
+       pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+       return *ptep;
 }
 
 void *kmap(struct page *page)
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
         * With debugging enabled, kunmap_atomic forces that entry to 0.
         * Make sure it was indeed properly unmapped.
         */
-       BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+       BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
 #endif
        /*
         * When debugging is off, kunmap_atomic leaves the previous mapping
@@ -134,7 +135,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
-       BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+       BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
 #endif
        set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
 
index 92bba32d92304c4383d43bee8ef95f7d988602c6..67b15426b9c6f1969844aeeb335ba3cce66fa657 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/prom.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
+#include <asm/system_info.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
 
@@ -570,7 +571,7 @@ void __init mem_init(void)
                        MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
                        MLK(ITCM_OFFSET, (unsigned long) itcm_end),
 #endif
-                       MLK(FIXADDR_START, FIXADDR_TOP),
+                       MLK(FIXADDR_START, FIXADDR_END),
                        MLM(VMALLOC_START, VMALLOC_END),
                        MLM(PAGE_OFFSET, (unsigned long)high_memory),
 #ifdef CONFIG_HIGHMEM
@@ -615,7 +616,145 @@ void __init mem_init(void)
        }
 }
 
-void free_initmem(void)
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+struct section_perm {
+       unsigned long start;
+       unsigned long end;
+       pmdval_t mask;
+       pmdval_t prot;
+       pmdval_t clear;
+};
+
+static struct section_perm nx_perms[] = {
+       /* Make pages tables, etc before _stext RW (set NX). */
+       {
+               .start  = PAGE_OFFSET,
+               .end    = (unsigned long)_stext,
+               .mask   = ~PMD_SECT_XN,
+               .prot   = PMD_SECT_XN,
+       },
+       /* Make init RW (set NX). */
+       {
+               .start  = (unsigned long)__init_begin,
+               .end    = (unsigned long)_sdata,
+               .mask   = ~PMD_SECT_XN,
+               .prot   = PMD_SECT_XN,
+       },
+#ifdef CONFIG_DEBUG_RODATA
+       /* Make rodata NX (set RO in ro_perms below). */
+       {
+               .start  = (unsigned long)__start_rodata,
+               .end    = (unsigned long)__init_begin,
+               .mask   = ~PMD_SECT_XN,
+               .prot   = PMD_SECT_XN,
+       },
+#endif
+};
+
+#ifdef CONFIG_DEBUG_RODATA
+static struct section_perm ro_perms[] = {
+       /* Make kernel code and rodata RX (set RO). */
+       {
+               .start  = (unsigned long)_stext,
+               .end    = (unsigned long)__init_begin,
+#ifdef CONFIG_ARM_LPAE
+               .mask   = ~PMD_SECT_RDONLY,
+               .prot   = PMD_SECT_RDONLY,
+#else
+               .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
+               .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+               .clear  = PMD_SECT_AP_WRITE,
+#endif
+       },
+};
+#endif
+
+/*
+ * Updates section permissions only for the current mm (sections are
+ * copied into each mm). During startup, this is the init_mm. Is only
+ * safe to be called with preemption disabled, as under stop_machine().
+ */
+static inline void section_update(unsigned long addr, pmdval_t mask,
+                                 pmdval_t prot)
+{
+       struct mm_struct *mm;
+       pmd_t *pmd;
+
+       mm = current->active_mm;
+       pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+
+#ifdef CONFIG_ARM_LPAE
+       pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#else
+       if (addr & SECTION_SIZE)
+               pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
+       else
+               pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#endif
+       flush_pmd_entry(pmd);
+       local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
+/* Make sure extended page tables are in use. */
+static inline bool arch_has_strict_perms(void)
+{
+       if (cpu_architecture() < CPU_ARCH_ARMv6)
+               return false;
+
+       return !!(get_cr() & CR_XP);
+}
+
+#define set_section_perms(perms, field)        {                               \
+       size_t i;                                                       \
+       unsigned long addr;                                             \
+                                                                       \
+       if (!arch_has_strict_perms())                                   \
+               return;                                                 \
+                                                                       \
+       for (i = 0; i < ARRAY_SIZE(perms); i++) {                       \
+               if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||        \
+                   !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {          \
+                       pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
+                               perms[i].start, perms[i].end,           \
+                               SECTION_SIZE);                          \
+                       continue;                                       \
+               }                                                       \
+                                                                       \
+               for (addr = perms[i].start;                             \
+                    addr < perms[i].end;                               \
+                    addr += SECTION_SIZE)                              \
+                       section_update(addr, perms[i].mask,             \
+                                      perms[i].field);                 \
+       }                                                               \
+}
+
+static inline void fix_kernmem_perms(void)
+{
+       set_section_perms(nx_perms, prot);
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+       set_section_perms(ro_perms, prot);
+}
+
+void set_kernel_text_rw(void)
+{
+       set_section_perms(ro_perms, clear);
+}
+
+void set_kernel_text_ro(void)
+{
+       set_section_perms(ro_perms, prot);
+}
+#endif /* CONFIG_DEBUG_RODATA */
+
+#else
+static inline void fix_kernmem_perms(void) { }
+#endif /* CONFIG_ARM_KERNMEM_PERMS */
+
+void free_tcmmem(void)
 {
 #ifdef CONFIG_HAVE_TCM
        extern char __tcm_start, __tcm_end;
@@ -623,6 +762,12 @@ void free_initmem(void)
        poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
        free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
 #endif
+}
+
+void free_initmem(void)
+{
+       fix_kernmem_perms();
+       free_tcmmem();
 
        poison_init_mem(__init_begin, __init_end - __init_begin);
        if (!machine_is_integrator() && !machine_is_cintegrator())
index 9f98cec7fe1e1855dc5fcede8b0e24e88d09c3b3..a7b12cb21e816ed84a1206199c9b96b03ac96244 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cputype.h>
 #include <asm/sections.h>
 #include <asm/cachetype.h>
+#include <asm/fixmap.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
@@ -392,6 +393,29 @@ SET_MEMORY_FN(rw, pte_set_rw)
 SET_MEMORY_FN(x, pte_set_x)
 SET_MEMORY_FN(nx, pte_set_nx)
 
+/*
+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
+ * As a result, this can only be called with preemption disabled, as under
+ * stop_machine().
+ */
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+       unsigned long vaddr = __fix_to_virt(idx);
+       pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+       /* Make sure fixmap region does not exceed available allocation. */
+       BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+                    FIXADDR_END);
+       BUG_ON(idx >= __end_of_fixed_addresses);
+
+       if (pgprot_val(prot))
+               set_pte_at(NULL, vaddr, pte,
+                       pfn_pte(phys >> PAGE_SHIFT, prot));
+       else
+               pte_clear(NULL, vaddr, pte);
+       local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
+
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */
@@ -1326,10 +1350,10 @@ static void __init kmap_init(void)
 #ifdef CONFIG_HIGHMEM
        pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
                PKMAP_BASE, _PAGE_KERNEL_TABLE);
-
-       fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
-               FIXADDR_START, _PAGE_KERNEL_TABLE);
 #endif
+
+       early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
+                       _PAGE_KERNEL_TABLE);
 }
 
 static void __init map_lowmem(void)
@@ -1349,12 +1373,19 @@ static void __init map_lowmem(void)
                if (start >= end)
                        break;
 
-               if (end < kernel_x_start || start >= kernel_x_end) {
+               if (end < kernel_x_start) {
                        map.pfn = __phys_to_pfn(start);
                        map.virtual = __phys_to_virt(start);
                        map.length = end - start;
                        map.type = MT_MEMORY_RWX;
 
+                       create_mapping(&map);
+               } else if (start >= kernel_x_end) {
+                       map.pfn = __phys_to_pfn(start);
+                       map.virtual = __phys_to_virt(start);
+                       map.length = end - start;
+                       map.type = MT_MEMORY_RW;
+
                        create_mapping(&map);
                } else {
                        /* This better cover the entire kernel */