2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * This code maintains the "home" for each page in the system.
17 #include <linux/kernel.h>
19 #include <linux/spinlock.h>
20 #include <linux/list.h>
21 #include <linux/bootmem.h>
22 #include <linux/rmap.h>
23 #include <linux/pagemap.h>
24 #include <linux/mutex.h>
25 #include <linux/interrupt.h>
26 #include <linux/sysctl.h>
27 #include <linux/pagevec.h>
28 #include <linux/ptrace.h>
29 #include <linux/timex.h>
30 #include <linux/cache.h>
31 #include <linux/smp.h>
32 #include <linux/module.h>
33 #include <linux/hugetlb.h>
36 #include <asm/sections.h>
37 #include <asm/tlbflush.h>
38 #include <asm/pgalloc.h>
39 #include <asm/homecache.h>
46 #if CHIP_HAS_COHERENT_LOCAL_CACHE()
49 * The noallocl2 option suppresses all use of the L2 cache to cache
50 * locally from a remote home. There's no point in using it if we
51 * don't have coherent local caching, though.
53 static int __write_once noallocl2;
54 static int __init set_noallocl2(char *str)
59 early_param("noallocl2", set_noallocl2);
67 /* Provide no-op versions of these routines to keep flush_remote() cleaner. */
68 #define mark_caches_evicted_start() 0
69 #define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
73 * Update the irq_stat for cpus that we are going to interrupt
74 * with TLB or cache flushes. Also handle removing dataplane cpus
75 * from the TLB flush set, and setting dataplane_tlb_state instead.
77 static void hv_flush_update(const struct cpumask *cache_cpumask,
78 struct cpumask *tlb_cpumask,
79 unsigned long tlb_va, unsigned long tlb_length,
80 HV_Remote_ASID *asids, int asidcount)
87 cpumask_or(&mask, &mask, cache_cpumask);
88 if (tlb_cpumask && tlb_length) {
89 cpumask_or(&mask, &mask, tlb_cpumask);
92 for (i = 0; i < asidcount; ++i)
93 cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
96 * Don't bother to update atomically; losing a count
97 * here is not that critical.
99 for_each_cpu(cpu, &mask)
100 ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
104 * This wrapper function around hv_flush_remote() does several things:
106 * - Provides a return value error-checking panic path, since
107 * there's never any good reason for hv_flush_remote() to fail.
108 * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
109 * is the type that Linux wants to pass around anyway.
110 * - Centralizes the mark_caches_evicted() handling.
111 * - Canonicalizes that lengths of zero make cpumasks NULL.
112 * - Handles deferring TLB flushes for dataplane tiles.
113 * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
115 * Note that we have to wait until the cache flush completes before
116 * updating the per-cpu last_cache_flush word, since otherwise another
117 * concurrent flush can race, conclude the flush has already
118 * completed, and start to use the page while it's still dirty
119 * remotely (running concurrently with the actual evict, presumably).
121 void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
122 const struct cpumask *cache_cpumask_orig,
123 HV_VirtAddr tlb_va, unsigned long tlb_length,
124 unsigned long tlb_pgsize,
125 const struct cpumask *tlb_cpumask_orig,
126 HV_Remote_ASID *asids, int asidcount)
129 int timestamp = 0; /* happy compiler */
130 struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
131 struct cpumask *cache_cpumask, *tlb_cpumask;
132 HV_PhysAddr cache_pa;
133 char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
135 mb(); /* provided just to simplify "magic hypervisor" mode */
138 * Canonicalize and copy the cpumasks.
140 if (cache_cpumask_orig && cache_control) {
141 cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
142 cache_cpumask = &cache_cpumask_copy;
144 cpumask_clear(&cache_cpumask_copy);
145 cache_cpumask = NULL;
147 if (cache_cpumask == NULL)
149 if (tlb_cpumask_orig && tlb_length) {
150 cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
151 tlb_cpumask = &tlb_cpumask_copy;
153 cpumask_clear(&tlb_cpumask_copy);
157 hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
159 cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
160 if (cache_control & HV_FLUSH_EVICT_L2)
161 timestamp = mark_caches_evicted_start();
162 rc = hv_flush_remote(cache_pa, cache_control,
163 cpumask_bits(cache_cpumask),
164 tlb_va, tlb_length, tlb_pgsize,
165 cpumask_bits(tlb_cpumask),
167 if (cache_control & HV_FLUSH_EVICT_L2)
168 mark_caches_evicted_finish(cache_cpumask, timestamp);
171 cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
172 cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
174 pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
175 " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
176 cache_pa, cache_control, cache_cpumask, cache_buf,
177 (unsigned long)tlb_va, tlb_length, tlb_pgsize,
178 tlb_cpumask, tlb_buf,
179 asids, asidcount, rc);
180 panic("Unsafe to continue.");
183 void flush_remote_page(struct page *page, int order)
185 int i, pages = (1 << order);
186 for (i = 0; i < pages; ++i, ++page) {
187 void *p = kmap_atomic(page);
189 int home = page_home(page);
190 #if CHIP_HAS_CBOX_HOME_MAP()
191 if (home == PAGE_HOME_HASH)
195 BUG_ON(home < 0 || home >= NR_CPUS);
196 finv_buffer_remote(p, PAGE_SIZE, hfh);
201 void homecache_evict(const struct cpumask *mask)
203 flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
207 * Return a mask of the cpus whose caches currently own these pages.
208 * The return value is whether the pages are all coherently cached
209 * (i.e. none are immutable, incoherent, or uncached).
211 static int homecache_mask(struct page *page, int pages,
212 struct cpumask *home_mask)
215 int cached_coherently = 1;
216 cpumask_clear(home_mask);
217 for (i = 0; i < pages; ++i) {
218 int home = page_home(&page[i]);
219 if (home == PAGE_HOME_IMMUTABLE ||
220 home == PAGE_HOME_INCOHERENT) {
221 cpumask_copy(home_mask, cpu_possible_mask);
224 #if CHIP_HAS_CBOX_HOME_MAP()
225 if (home == PAGE_HOME_HASH) {
226 cpumask_or(home_mask, home_mask, &hash_for_home_map);
230 if (home == PAGE_HOME_UNCACHED) {
231 cached_coherently = 0;
234 BUG_ON(home < 0 || home >= NR_CPUS);
235 cpumask_set_cpu(home, home_mask);
237 return cached_coherently;
241 * Return the passed length, or zero if it's long enough that we
242 * believe we should evict the whole L2 cache.
244 static unsigned long cache_flush_length(unsigned long length)
246 return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
249 /* Flush a page out of whatever cache(s) it is in. */
250 void homecache_flush_cache(struct page *page, int order)
252 int pages = 1 << order;
253 int length = cache_flush_length(pages * PAGE_SIZE);
254 unsigned long pfn = page_to_pfn(page);
255 struct cpumask home_mask;
257 homecache_mask(page, pages, &home_mask);
258 flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
259 sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
263 /* Report the home corresponding to a given PTE. */
264 static int pte_to_home(pte_t pte)
266 if (hv_pte_get_nc(pte))
267 return PAGE_HOME_IMMUTABLE;
268 switch (hv_pte_get_mode(pte)) {
269 case HV_PTE_MODE_CACHE_TILE_L3:
270 return get_remote_cache_cpu(pte);
271 case HV_PTE_MODE_CACHE_NO_L3:
272 return PAGE_HOME_INCOHERENT;
273 case HV_PTE_MODE_UNCACHED:
274 return PAGE_HOME_UNCACHED;
275 #if CHIP_HAS_CBOX_HOME_MAP()
276 case HV_PTE_MODE_CACHE_HASH_L3:
277 return PAGE_HOME_HASH;
280 panic("Bad PTE %#llx\n", pte.val);
283 /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
284 pte_t pte_set_home(pte_t pte, int home)
286 /* Check for non-linear file mapping "PTEs" and pass them through. */
291 /* Check for MMIO mappings and pass them through. */
292 if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
298 * Only immutable pages get NC mappings. If we have a
299 * non-coherent PTE, but the underlying page is not
300 * immutable, it's likely the result of a forced
301 * caching setting running up against ptrace setting
302 * the page to be writable underneath. In this case,
303 * just keep the PTE coherent.
305 if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
306 pte = hv_pte_clear_nc(pte);
307 pr_err("non-immutable page incoherently referenced: %#llx\n",
313 case PAGE_HOME_UNCACHED:
314 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
317 case PAGE_HOME_INCOHERENT:
318 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
321 case PAGE_HOME_IMMUTABLE:
323 * We could home this page anywhere, since it's immutable,
324 * but by default just home it to follow "hash_default".
326 BUG_ON(hv_pte_get_writable(pte));
327 if (pte_get_forcecache(pte)) {
328 /* Upgrade "force any cpu" to "No L3" for immutable. */
329 if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
330 && pte_get_anyhome(pte)) {
331 pte = hv_pte_set_mode(pte,
332 HV_PTE_MODE_CACHE_NO_L3);
335 #if CHIP_HAS_CBOX_HOME_MAP()
337 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
340 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
341 pte = hv_pte_set_nc(pte);
344 #if CHIP_HAS_CBOX_HOME_MAP()
346 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
351 BUG_ON(home < 0 || home >= NR_CPUS ||
352 !cpu_is_valid_lotar(home));
353 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
354 pte = set_remote_cache_cpu(pte, home);
358 #if CHIP_HAS_NC_AND_NOALLOC_BITS()
360 pte = hv_pte_set_no_alloc_l2(pte);
362 /* Simplify "no local and no l3" to "uncached" */
363 if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
364 hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
365 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
369 /* Checking this case here gives a better panic than from the hv. */
370 BUG_ON(hv_pte_get_mode(pte) == 0);
374 EXPORT_SYMBOL(pte_set_home);
377 * The routines in this section are the "static" versions of the normal
378 * dynamic homecaching routines; they just set the home cache
379 * of a kernel page once, and require a full-chip cache/TLB flush,
380 * so they're not suitable for anything but infrequent use.
383 #if CHIP_HAS_CBOX_HOME_MAP()
384 static inline int initial_page_home(void) { return PAGE_HOME_HASH; }
386 static inline int initial_page_home(void) { return 0; }
389 int page_home(struct page *page)
391 if (PageHighMem(page)) {
392 return initial_page_home();
394 unsigned long kva = (unsigned long)page_address(page);
395 return pte_to_home(*virt_to_pte(NULL, kva));
398 EXPORT_SYMBOL(page_home);
400 void homecache_change_page_home(struct page *page, int order, int home)
402 int i, pages = (1 << order);
405 BUG_ON(PageHighMem(page));
406 BUG_ON(page_count(page) > 1);
407 BUG_ON(page_mapcount(page) != 0);
408 kva = (unsigned long) page_address(page);
409 flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
410 kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
413 for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
414 pte_t *ptep = virt_to_pte(NULL, kva);
415 pte_t pteval = *ptep;
416 BUG_ON(!pte_present(pteval) || pte_huge(pteval));
417 __set_pte(ptep, pte_set_home(pteval, home));
421 struct page *homecache_alloc_pages(gfp_t gfp_mask,
422 unsigned int order, int home)
425 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
426 page = alloc_pages(gfp_mask, order);
428 homecache_change_page_home(page, order, home);
431 EXPORT_SYMBOL(homecache_alloc_pages);
433 struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
434 unsigned int order, int home)
437 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
438 page = alloc_pages_node(nid, gfp_mask, order);
440 homecache_change_page_home(page, order, home);
444 void homecache_free_pages(unsigned long addr, unsigned int order)
451 VM_BUG_ON(!virt_addr_valid((void *)addr));
452 page = virt_to_page((void *)addr);
453 if (put_page_testzero(page)) {
454 homecache_change_page_home(page, order, initial_page_home());
456 free_hot_cold_page(page, 0);
458 init_page_count(page);
459 __free_pages(page, order);