2 * linux/arch/arm/mm/flush.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/pagemap.h>
14 #include <asm/cacheflush.h>
15 #include <asm/cachetype.h>
16 #include <asm/highmem.h>
17 #include <asm/smp_plat.h>
18 #include <asm/system.h>
19 #include <asm/tlbflush.h>
20 #include <asm/smp_plat.h>
24 #ifdef CONFIG_CPU_CACHE_VIPT
26 #define ALIAS_FLUSH_START 0xffff4000
28 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
30 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
33 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
34 flush_tlb_kernel_page(to);
36 asm( "mcrr p15, 0, %1, %0, c14\n"
37 " mcr p15, 0, %2, c7, c10, 4"
39 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
43 void flush_cache_mm(struct mm_struct *mm)
45 if (cache_is_vivt()) {
46 vivt_flush_cache_mm(mm);
50 if (cache_is_vipt_aliasing()) {
51 asm( "mcr p15, 0, %0, c7, c14, 0\n"
52 " mcr p15, 0, %0, c7, c10, 4"
59 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
61 if (cache_is_vivt()) {
62 vivt_flush_cache_range(vma, start, end);
66 if (cache_is_vipt_aliasing()) {
67 asm( "mcr p15, 0, %0, c7, c14, 0\n"
68 " mcr p15, 0, %0, c7, c10, 4"
74 if (vma->vm_flags & VM_EXEC)
78 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
80 if (cache_is_vivt()) {
81 vivt_flush_cache_page(vma, user_addr, pfn);
85 if (cache_is_vipt_aliasing()) {
86 flush_pfn_alias(pfn, user_addr);
90 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
94 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
98 static void flush_ptrace_access_other(void *args)
100 __flush_icache_all();
105 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
106 unsigned long uaddr, void *kaddr, unsigned long len)
108 if (cache_is_vivt()) {
109 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
110 unsigned long addr = (unsigned long)kaddr;
111 __cpuc_coherent_kern_range(addr, addr + len);
116 if (cache_is_vipt_aliasing()) {
117 flush_pfn_alias(page_to_pfn(page), uaddr);
118 __flush_icache_all();
122 /* VIPT non-aliasing cache */
123 if (vma->vm_flags & VM_EXEC) {
124 unsigned long addr = (unsigned long)kaddr;
125 __cpuc_coherent_kern_range(addr, addr + len);
127 if (cache_ops_need_broadcast())
128 smp_call_function(flush_ptrace_access_other,
135 * Copy user data from/to a page which is mapped into a different
136 * processes address space. Really, we want to allow our "user
137 * space" model to handle this.
139 * Note that this code needs to run on the current CPU.
141 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
142 unsigned long uaddr, void *dst, const void *src,
148 memcpy(dst, src, len);
149 flush_ptrace_access(vma, page, uaddr, dst, len);
155 void __flush_dcache_page(struct address_space *mapping, struct page *page)
158 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping.
162 if (!PageHighMem(page)) {
163 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
165 void *addr = kmap_high_get(page);
167 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
169 } else if (cache_is_vipt()) {
171 addr = kmap_high_l1_vipt(page, &saved_pte);
172 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
173 kunmap_high_l1_vipt(page, saved_pte);
178 * If this is a page cache page, and we have an aliasing VIPT cache,
179 * we only need to do one flush - which would be at the relevant
180 * userspace colour, which is congruent with page->index.
182 if (mapping && cache_is_vipt_aliasing())
183 flush_pfn_alias(page_to_pfn(page),
184 page->index << PAGE_CACHE_SHIFT);
187 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
189 struct mm_struct *mm = current->active_mm;
190 struct vm_area_struct *mpnt;
191 struct prio_tree_iter iter;
195 * There are possible user space mappings of this page:
196 * - VIVT cache: we need to also write back and invalidate all user
197 * data in the current VM view associated with this page.
198 * - aliasing VIPT: we only need to find one mapping of this page.
200 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
202 flush_dcache_mmap_lock(mapping);
203 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
204 unsigned long offset;
207 * If this VMA is not in our MM, we can ignore it.
209 if (mpnt->vm_mm != mm)
211 if (!(mpnt->vm_flags & VM_MAYSHARE))
213 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
214 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
216 flush_dcache_mmap_unlock(mapping);
220 * Ensure cache coherency between kernel mapping and userspace mapping
223 * We have three cases to consider:
224 * - VIPT non-aliasing cache: fully coherent so nothing required.
225 * - VIVT: fully aliasing, so we need to handle every alias in our
227 * - VIPT aliasing: need to handle one alias in our current VM view.
229 * If we need to handle aliasing:
230 * If the page only exists in the page cache and there are no user
231 * space mappings, we can be lazy and remember that we may have dirty
232 * kernel cache lines for later. Otherwise, we assume we have
235 * Note that we disable the lazy flush for SMP.
237 void flush_dcache_page(struct page *page)
239 struct address_space *mapping;
242 * The zero page is never written to, so never has any dirty
243 * cache lines, and therefore never needs to be flushed.
245 if (page == ZERO_PAGE(0))
248 mapping = page_mapping(page);
250 if (!cache_ops_need_broadcast() &&
251 !PageHighMem(page) && mapping && !mapping_mapped(mapping))
252 set_bit(PG_dcache_dirty, &page->flags);
254 __flush_dcache_page(mapping, page);
255 if (mapping && cache_is_vivt())
256 __flush_dcache_aliases(mapping, page);
258 __flush_icache_all();
261 EXPORT_SYMBOL(flush_dcache_page);
264 * Flush an anonymous page so that users of get_user_pages()
265 * can safely access the data. The expected sequence is:
269 * memcpy() to/from page
270 * if written to page, flush_dcache_page()
272 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
276 /* VIPT non-aliasing caches need do nothing */
277 if (cache_is_vipt_nonaliasing())
281 * Write back and invalidate userspace mapping.
283 pfn = page_to_pfn(page);
284 if (cache_is_vivt()) {
285 flush_cache_page(vma, vmaddr, pfn);
288 * For aliasing VIPT, we can flush an alias of the
289 * userspace address only.
291 flush_pfn_alias(pfn, vmaddr);
292 __flush_icache_all();
296 * Invalidate kernel mapping. No data should be contained
297 * in this mapping of the page. FIXME: this is overkill
298 * since we actually ask for a write-back and invalidate.
300 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);