2 * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include "mali_kernel_common.h"
14 #include "mali_uk_types.h"
15 #include "mali_mmu_page_directory.h"
16 #include "mali_memory.h"
17 #include "mali_l2_cache.h"
19 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
21 u32 mali_allocate_empty_page(mali_io_address *virt_addr)
23 _mali_osk_errcode_t err;
24 mali_io_address mapping;
25 mali_dma_addr address;
27 if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
28 /* Allocation failed */
29 MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
33 MALI_DEBUG_ASSERT_POINTER(mapping);
35 err = fill_page(mapping, 0);
36 if (_MALI_OSK_ERR_OK != err) {
37 mali_mmu_release_table_page(address, mapping);
38 MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
46 void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
48 if (MALI_INVALID_PAGE != address) {
49 mali_mmu_release_table_page(address, virt_addr);
53 _mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
54 mali_io_address *page_directory_mapping,
55 mali_dma_addr *page_table, mali_io_address *page_table_mapping,
56 mali_dma_addr *data_page, mali_io_address *data_page_mapping)
58 _mali_osk_errcode_t err;
60 err = mali_mmu_get_table_page(data_page, data_page_mapping);
61 if (_MALI_OSK_ERR_OK == err) {
62 err = mali_mmu_get_table_page(page_table, page_table_mapping);
63 if (_MALI_OSK_ERR_OK == err) {
64 err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
65 if (_MALI_OSK_ERR_OK == err) {
66 fill_page(*data_page_mapping, 0);
67 fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
68 fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
71 mali_mmu_release_table_page(*page_table, *page_table_mapping);
72 *page_table = MALI_INVALID_PAGE;
74 mali_mmu_release_table_page(*data_page, *data_page_mapping);
75 *data_page = MALI_INVALID_PAGE;
80 void mali_destroy_fault_flush_pages(
81 mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
82 mali_dma_addr *page_table, mali_io_address *page_table_mapping,
83 mali_dma_addr *data_page, mali_io_address *data_page_mapping)
85 if (MALI_INVALID_PAGE != *page_directory) {
86 mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
87 *page_directory = MALI_INVALID_PAGE;
88 *page_directory_mapping = NULL;
91 if (MALI_INVALID_PAGE != *page_table) {
92 mali_mmu_release_table_page(*page_table, *page_table_mapping);
93 *page_table = MALI_INVALID_PAGE;
94 *page_table_mapping = NULL;
97 if (MALI_INVALID_PAGE != *data_page) {
98 mali_mmu_release_table_page(*data_page, *data_page_mapping);
99 *data_page = MALI_INVALID_PAGE;
100 *data_page_mapping = NULL;
104 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
107 MALI_DEBUG_ASSERT_POINTER(mapping);
109 for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
110 _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
112 _mali_osk_mem_barrier();
116 _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
118 const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
119 const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
120 _mali_osk_errcode_t err;
121 mali_io_address pde_mapping;
122 mali_dma_addr pde_phys;
125 if (last_pde < first_pde)
126 return _MALI_OSK_ERR_INVALID_ARGS;
128 for (i = first_pde; i <= last_pde; i++) {
129 if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
130 i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
131 /* Page table not present */
132 MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
133 MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
135 err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
136 if (_MALI_OSK_ERR_OK != err) {
137 MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
140 pagedir->page_entries_mapped[i] = pde_mapping;
142 /* Update PDE, mark as present */
143 _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
144 pde_phys | MALI_MMU_FLAGS_PRESENT);
146 MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
149 if (first_pde == last_pde) {
150 pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE;
151 } else if (i == first_pde) {
152 start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
153 page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE;
154 pagedir->page_entries_usage_count[i] += page_count;
155 } else if (i == last_pde) {
156 start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
157 page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE;
158 pagedir->page_entries_usage_count[i] += page_count;
160 pagedir->page_entries_usage_count[i] = 1024;
163 _mali_osk_write_mem_barrier();
165 return _MALI_OSK_ERR_OK;
168 MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
171 const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
172 const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
174 for (i = first_pte; i <= last_pte; i++) {
175 _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
179 static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
181 return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
182 index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
186 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
188 const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
189 const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
192 mali_bool pd_changed = MALI_FALSE;
193 u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
194 u32 num_pages_inv = 0;
195 mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
197 /* For all page directory entries in range. */
198 for (i = first_pde; i <= last_pde; i++) {
199 u32 size_in_pde, offset;
201 MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
202 MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
204 /* Offset into page table, 0 if mali_address is 4MiB aligned */
205 offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
206 if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
209 size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
212 pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE;
214 /* If entire page table is unused, free it */
215 if (0 == pagedir->page_entries_usage_count[i]) {
218 MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
219 /* last reference removed, no need to zero out each PTE */
221 page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
222 page_virt = pagedir->page_entries_mapped[i];
223 pagedir->page_entries_mapped[i] = NULL;
224 _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
226 mali_mmu_release_table_page(page_phys, page_virt);
227 pd_changed = MALI_TRUE;
229 MALI_DEBUG_ASSERT(num_pages_inv < 2);
230 if (num_pages_inv < 2) {
231 pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
234 invalidate_all = MALI_TRUE;
237 /* If part of the page table is still in use, zero the relevant PTEs */
238 mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
242 mali_address += size_in_pde;
244 _mali_osk_write_mem_barrier();
246 /* L2 pages invalidation */
247 if (MALI_TRUE == pd_changed) {
248 MALI_DEBUG_ASSERT(num_pages_inv < 3);
249 if (num_pages_inv < 3) {
250 pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
253 invalidate_all = MALI_TRUE;
257 if (invalidate_all) {
258 mali_l2_cache_invalidate_all();
260 mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
266 struct mali_page_directory *mali_mmu_pagedir_alloc(void)
268 struct mali_page_directory *pagedir;
269 _mali_osk_errcode_t err;
272 pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
273 if (NULL == pagedir) {
277 err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
278 if (_MALI_OSK_ERR_OK != err) {
279 _mali_osk_free(pagedir);
283 pagedir->page_directory = (u32)phys;
285 /* Zero page directory */
286 fill_page(pagedir->page_directory_mapped, 0);
291 void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
293 const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
296 /* Free referenced page tables and zero PDEs. */
297 for (i = 0; i < num_page_table_entries; i++) {
298 if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
299 pagedir->page_directory_mapped,
300 sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
301 mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
302 i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
303 _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
304 mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
307 _mali_osk_write_mem_barrier();
309 /* Free the page directory page. */
310 mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
312 _mali_osk_free(pagedir);
316 void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
317 mali_dma_addr phys_address, u32 size, u32 permission_bits)
319 u32 end_address = mali_address + size;
320 u32 mali_phys = (u32)phys_address;
322 /* Map physical pages into MMU page tables */
323 for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
324 MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
325 _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
326 MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
327 mali_phys | permission_bits);
331 void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
334 u32 pde_index, pte_index;
337 pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
338 pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
341 pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
342 pde_index * sizeof(u32));
345 if (pde & MALI_MMU_FLAGS_PRESENT) {
346 u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
348 pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
349 pte_index * sizeof(u32));
351 MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
352 "\t\tPTE: %08x, page %08x is %s\n",
353 fault_addr, pte_addr, pte,
354 MALI_MMU_ENTRY_ADDRESS(pte),
355 pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
357 MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
361 MALI_IGNORE(pagedir);
362 MALI_IGNORE(fault_addr);
366 /* For instrumented */
369 u32 register_writes_size;
370 u32 page_table_dump_size;
374 static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
377 info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
379 if (NULL != info->buffer) {
380 /* check that we have enough space */
381 if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
383 *info->buffer = where;
386 *info->buffer = what;
389 info->buffer_left -= sizeof(u32) * 2;
396 static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
399 /* 4096 for the page and 4 bytes for the address */
400 const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
401 const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
402 const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
404 info->page_table_dump_size += dump_size_in_bytes;
406 if (NULL != info->buffer) {
407 if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
409 *info->buffer = phys_addr;
412 _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
413 info->buffer += page_size_in_elements;
415 info->buffer_left -= dump_size_in_bytes;
422 static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
424 MALI_DEBUG_ASSERT_POINTER(pagedir);
425 MALI_DEBUG_ASSERT_POINTER(info);
427 if (NULL != pagedir->page_directory_mapped) {
431 mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
434 for (i = 0; i < 1024; i++) {
435 if (NULL != pagedir->page_entries_mapped[i]) {
437 mali_mmu_dump_page(pagedir->page_entries_mapped[i],
438 _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
439 i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
448 static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
450 MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
451 "set the page directory address", info));
452 MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
453 MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
457 _mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
459 struct dump_info info = { 0, 0, 0, NULL };
460 struct mali_session_data *session_data;
462 session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
463 MALI_DEBUG_ASSERT_POINTER(session_data);
464 MALI_DEBUG_ASSERT_POINTER(args);
466 MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
467 MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
468 args->size = info.register_writes_size + info.page_table_dump_size;
472 _mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
474 struct dump_info info = { 0, 0, 0, NULL };
475 struct mali_session_data *session_data;
477 MALI_DEBUG_ASSERT_POINTER(args);
479 session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
480 MALI_DEBUG_ASSERT_POINTER(session_data);
482 info.buffer_left = args->size;
483 info.buffer = (u32 *)(uintptr_t)args->buffer;
485 args->register_writes = (uintptr_t)info.buffer;
486 MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
488 args->page_table_dump = (uintptr_t)info.buffer;
489 MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
491 args->register_writes_size = info.register_writes_size;
492 args->page_table_dump_size = info.page_table_dump_size;