3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_mem.h
22 * Base kernel memory APIs
29 #error "Don't include this file directly, use mali_kbase.h instead"
32 #include <linux/kref.h>
34 #include <linux/kds.h>
35 #endif /* CONFIG_KDS */
37 #include <linux/ump.h>
38 #endif /* CONFIG_UMP */
39 #include "mali_base_kernel.h"
40 #include <mali_kbase_hw.h>
41 #include "mali_kbase_pm.h"
42 #include "mali_kbase_defs.h"
43 #if defined(CONFIG_MALI_GATOR_SUPPORT)
44 #include "mali_kbase_gator.h"
46 /* Required for kbase_mem_evictable_unmake */
47 #include "mali_kbase_mem_linux.h"
49 /* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
50 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
52 /* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
53 The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
54 page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
55 updates and generates duplicate page faults as the page table information used by the MMU is not valid. */
56 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
58 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */
60 /* This must always be a power of 2 */
61 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
62 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
63 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
67 struct kbase_cpu_mapping {
68 struct list_head mappings_list;
69 struct kbase_mem_phy_alloc *alloc;
70 struct kbase_context *kctx;
71 struct kbase_va_region *region;
74 unsigned long vm_start;
78 enum kbase_memory_type {
79 KBASE_MEM_TYPE_NATIVE,
80 KBASE_MEM_TYPE_IMPORTED_UMP,
81 KBASE_MEM_TYPE_IMPORTED_UMM,
82 KBASE_MEM_TYPE_IMPORTED_USER_BUF,
88 /* internal structure, mirroring base_mem_aliasing_info,
89 * but with alloc instead of a gpu va (handle) */
90 struct kbase_aliased {
91 struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
92 u64 offset; /* in pages */
93 u64 length; /* in pages */
97 * @brief Physical pages tracking object properties
99 #define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0)
100 #define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1)
102 /* physical pages tracking object.
103 * Set up to track N pages.
104 * N not stored here, the creator holds that info.
105 * This object only tracks how many elements are actually valid (present).
106 * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not
107 * shared with another region or client. CPU mappings are OK to exist when changing, as
108 * long as the tracked mappings objects are updated as part of the change.
110 struct kbase_mem_phy_alloc {
111 struct kref kref; /* number of users of this alloc */
112 atomic_t gpu_mappings;
113 size_t nents; /* 0..N */
114 phys_addr_t *pages; /* N elements, only 0..nents are valid */
116 /* kbase_cpu_mappings */
117 struct list_head mappings;
119 /* Node used to store this allocation on the eviction list */
120 struct list_head evict_node;
121 /* Physical backing size when the pages where evicted */
124 * Back reference to the region structure which created this
125 * allocation, or NULL if it has been freed.
127 struct kbase_va_region *reg;
130 enum kbase_memory_type type;
132 unsigned long properties;
134 struct list_head zone_cache;
136 /* member in union valid based on @a type */
139 ump_dd_handle ump_handle;
140 #endif /* CONFIG_UMP */
141 #if defined(CONFIG_DMA_SHARED_BUFFER)
143 struct dma_buf *dma_buf;
144 struct dma_buf_attachment *dma_attachment;
145 unsigned int current_mapping_usage_count;
146 struct sg_table *sgt;
148 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
152 struct kbase_aliased *aliased;
154 /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */
155 struct kbase_context *kctx;
157 unsigned long address;
159 unsigned long nr_pages;
161 unsigned int current_mapping_usage_count;
162 struct mm_struct *mm;
163 dma_addr_t *dma_addrs;
168 static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
170 KBASE_DEBUG_ASSERT(alloc);
171 /* we only track mappings of NATIVE buffers */
172 if (alloc->type == KBASE_MEM_TYPE_NATIVE)
173 atomic_inc(&alloc->gpu_mappings);
176 static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
178 KBASE_DEBUG_ASSERT(alloc);
179 /* we only track mappings of NATIVE buffers */
180 if (alloc->type == KBASE_MEM_TYPE_NATIVE)
181 if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
182 pr_err("Mismatched %s:\n", __func__);
187 void kbase_mem_kref_free(struct kref *kref);
189 int kbase_mem_init(struct kbase_device *kbdev);
190 void kbase_mem_halt(struct kbase_device *kbdev);
191 void kbase_mem_term(struct kbase_device *kbdev);
193 static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
195 kref_get(&alloc->kref);
199 static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
201 kref_put(&alloc->kref, kbase_mem_kref_free);
206 * A GPU memory region, and attributes for CPU mappings.
208 struct kbase_va_region {
209 struct rb_node rblink;
210 struct list_head link;
212 struct kbase_context *kctx; /* Backlink to base context */
214 u64 start_pfn; /* The PFN in GPU space */
218 #define KBASE_REG_FREE (1ul << 0)
219 /* CPU write access */
220 #define KBASE_REG_CPU_WR (1ul << 1)
221 /* GPU write access */
222 #define KBASE_REG_GPU_WR (1ul << 2)
223 /* No eXecute flag */
224 #define KBASE_REG_GPU_NX (1ul << 3)
226 #define KBASE_REG_CPU_CACHED (1ul << 4)
228 #define KBASE_REG_GPU_CACHED (1ul << 5)
230 #define KBASE_REG_GROWABLE (1ul << 6)
231 /* Can grow on pf? */
232 #define KBASE_REG_PF_GROW (1ul << 7)
234 /* VA managed by us */
235 #define KBASE_REG_CUSTOM_VA (1ul << 8)
237 /* inner shareable coherency */
238 #define KBASE_REG_SHARE_IN (1ul << 9)
239 /* inner & outer shareable coherency */
240 #define KBASE_REG_SHARE_BOTH (1ul << 10)
242 /* Space for 4 different zones */
243 #define KBASE_REG_ZONE_MASK (3ul << 11)
244 #define KBASE_REG_ZONE(x) (((x) & 3) << 11)
246 /* GPU read access */
247 #define KBASE_REG_GPU_RD (1ul<<13)
248 /* CPU read access */
249 #define KBASE_REG_CPU_RD (1ul<<14)
251 /* Aligned for GPU EX in SAME_VA */
252 #define KBASE_REG_ALIGNED (1ul<<15)
254 /* Index of chosen MEMATTR for this region (0..7) */
255 #define KBASE_REG_MEMATTR_MASK (7ul << 16)
256 #define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
257 #define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
259 #define KBASE_REG_SECURE (1ul << 19)
261 #define KBASE_REG_DONT_NEED (1ul << 20)
263 #define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
265 /* only used with 32-bit clients */
267 * On a 32bit platform, custom VA should be wired from (4GB + shader region)
268 * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
269 * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
270 * So we put the default limit to the maximum possible on Linux and shrink
271 * it down, if required by the GPU, during initialization.
275 * Dedicated 16MB region for shader code:
276 * VA range 0x101000000-0x102000000
278 #define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
279 #define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
280 #define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
282 #define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
283 #define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
284 #define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
285 /* end 32-bit clients only */
289 size_t extent; /* nr of pages alloc'd on PF */
291 struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
292 struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
294 /* non-NULL if this memory object is a kds_resource */
295 struct kds_resource *kds_res;
297 /* List head used to store the region in the JIT allocation pool */
298 struct list_head jit_node;
301 /* Common functions */
302 static inline phys_addr_t *kbase_get_cpu_phy_pages(struct kbase_va_region *reg)
304 KBASE_DEBUG_ASSERT(reg);
305 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
306 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
307 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
309 return reg->cpu_alloc->pages;
312 static inline phys_addr_t *kbase_get_gpu_phy_pages(struct kbase_va_region *reg)
314 KBASE_DEBUG_ASSERT(reg);
315 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
316 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
317 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
319 return reg->gpu_alloc->pages;
322 static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
324 KBASE_DEBUG_ASSERT(reg);
325 /* if no alloc object the backed size naturally is 0 */
329 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
330 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
331 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
333 return reg->cpu_alloc->nents;
336 #define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
338 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
340 struct kbase_mem_phy_alloc *alloc;
341 size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
342 size_t per_page_size = sizeof(*alloc->pages);
344 /* Imported pages may have page private data already in use */
345 if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
346 alloc_size += nr_pages *
347 sizeof(*alloc->imported.user_buf.dma_addrs);
348 per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
352 * Prevent nr_pages*per_page_size + sizeof(*alloc) from
355 if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
357 return ERR_PTR(-ENOMEM);
359 /* Allocate based on the size to reduce internal fragmentation of vmem */
360 if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
361 alloc = vzalloc(alloc_size);
363 alloc = kzalloc(alloc_size, GFP_KERNEL);
366 return ERR_PTR(-ENOMEM);
368 /* Store allocation method */
369 if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
370 alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
372 kref_init(&alloc->kref);
373 atomic_set(&alloc->gpu_mappings, 0);
375 alloc->pages = (void *)(alloc + 1);
376 INIT_LIST_HEAD(&alloc->mappings);
378 INIT_LIST_HEAD(&alloc->zone_cache);
380 if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
381 alloc->imported.user_buf.dma_addrs =
382 (void *) (alloc->pages + nr_pages);
387 static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
388 struct kbase_context *kctx)
390 KBASE_DEBUG_ASSERT(reg);
391 KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
392 KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
393 KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
395 reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
396 KBASE_MEM_TYPE_NATIVE);
397 if (IS_ERR(reg->cpu_alloc))
398 return PTR_ERR(reg->cpu_alloc);
399 else if (!reg->cpu_alloc)
401 reg->cpu_alloc->imported.kctx = kctx;
402 INIT_LIST_HEAD(®->cpu_alloc->evict_node);
403 if (kctx->infinite_cache_active && (reg->flags & KBASE_REG_CPU_CACHED)) {
404 reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
405 KBASE_MEM_TYPE_NATIVE);
406 reg->gpu_alloc->imported.kctx = kctx;
407 INIT_LIST_HEAD(®->gpu_alloc->evict_node);
409 reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
412 INIT_LIST_HEAD(®->jit_node);
413 reg->flags &= ~KBASE_REG_FREE;
417 static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
419 int new_val = atomic_add_return(num_pages, used_pages);
420 #if defined(CONFIG_MALI_GATOR_SUPPORT)
421 kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
426 static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
428 int new_val = atomic_sub_return(num_pages, used_pages);
429 #if defined(CONFIG_MALI_GATOR_SUPPORT)
430 kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
436 * Max size for kbdev memory pool (in pages)
438 #define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
441 * Max size for kctx memory pool (in pages)
443 #define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)
446 * kbase_mem_pool_init - Create a memory pool for a kbase device
447 * @pool: Memory pool to initialize
448 * @max_size: Maximum number of free pages the pool can hold
449 * @kbdev: Kbase device where memory is used
450 * @next_pool: Pointer to the next pool or NULL.
452 * Allocations from @pool are in whole pages. Each @pool has a free list where
453 * pages can be quickly allocated from. The free list is initially empty and
454 * filled whenever pages are freed back to the pool. The number of free pages
455 * in the pool will in general not exceed @max_size, but the pool may in
456 * certain corner cases grow above @max_size.
458 * If @next_pool is not NULL, we will allocate from @next_pool before going to
459 * the kernel allocator. Similarily pages can spill over to @next_pool when
460 * @pool is full. Pages are zeroed before they spill over to another pool, to
461 * prevent leaking information between applications.
463 * A shrinker is registered so that Linux mm can reclaim pages from the pool as
466 * Return: 0 on success, negative -errno on error
468 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
470 struct kbase_device *kbdev,
471 struct kbase_mem_pool *next_pool);
474 * kbase_mem_pool_term - Destroy a memory pool
475 * @pool: Memory pool to destroy
477 * Pages in the pool will spill over to @next_pool (if available) or freed to
480 void kbase_mem_pool_term(struct kbase_mem_pool *pool);
483 * kbase_mem_pool_alloc - Allocate a page from memory pool
484 * @pool: Memory pool to allocate from
486 * Allocations from the pool are made as follows:
487 * 1. If there are free pages in the pool, allocate a page from @pool.
488 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
490 * 3. Finally, allocate a page from the kernel.
492 * Return: Pointer to allocated page, or NULL if allocation failed.
494 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
497 * kbase_mem_pool_free - Free a page to memory pool
498 * @pool: Memory pool where page should be freed
499 * @page: Page to free to the pool
500 * @dirty: Whether some of the page may be dirty in the cache.
502 * Pages are freed to the pool as follows:
503 * 1. If @pool is not full, add @page to @pool.
504 * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
506 * 3. Finally, free @page to the kernel.
508 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
512 * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
513 * @pool: Memory pool to allocate from
514 * @nr_pages: Number of pages to allocate
515 * @pages: Pointer to array where the physical address of the allocated
516 * pages will be stored.
518 * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
520 * Return: 0 on success, negative -errno on error
522 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
526 * kbase_mem_pool_free_pages - Free pages to memory pool
527 * @pool: Memory pool where pages should be freed
528 * @nr_pages: Number of pages to free
529 * @pages: Pointer to array holding the physical addresses of the pages to
531 * @dirty: Whether any pages may be dirty in the cache.
532 * @reclaimed: Whether the pages where reclaimable and thus should bypass
533 * the pool and go straight to the kernel.
535 * Like kbase_mem_pool_free() but optimized for freeing many pages.
537 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
538 phys_addr_t *pages, bool dirty, bool reclaimed);
541 * kbase_mem_pool_size - Get number of free pages in memory pool
542 * @pool: Memory pool to inspect
544 * Note: the size of the pool may in certain corner cases exceed @max_size!
546 * Return: Number of free pages in the pool
548 static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
550 return ACCESS_ONCE(pool->cur_size);
554 * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
555 * @pool: Memory pool to inspect
557 * Return: Maximum number of free pages in the pool
559 static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
561 return pool->max_size;
566 * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
567 * @pool: Memory pool to inspect
568 * @max_size: Maximum number of free pages the pool can hold
570 * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
571 * For details see kbase_mem_pool_shrink().
573 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
576 * kbase_mem_pool_trim - Grow or shrink the pool to a new size
577 * @pool: Memory pool to trim
578 * @new_size: New number of pages in the pool
580 * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
581 * not above @max_size.
582 * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
584 * Return: The new size of the pool
586 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
589 int kbase_region_tracker_init(struct kbase_context *kctx);
590 int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages);
591 void kbase_region_tracker_term(struct kbase_context *kctx);
593 struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
596 * @brief Check that a pointer is actually a valid region.
598 * Must be called with context lock held.
600 struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
602 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
603 void kbase_free_alloced_region(struct kbase_va_region *reg);
604 int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
606 bool kbase_check_alloc_flags(unsigned long flags);
607 bool kbase_check_import_flags(unsigned long flags);
608 void kbase_update_region_flags(struct kbase_context *kctx,
609 struct kbase_va_region *reg, unsigned long flags);
611 void kbase_gpu_vm_lock(struct kbase_context *kctx);
612 void kbase_gpu_vm_unlock(struct kbase_context *kctx);
614 int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
616 int kbase_mmu_init(struct kbase_context *kctx);
617 void kbase_mmu_term(struct kbase_context *kctx);
619 phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
620 void kbase_mmu_free_pgd(struct kbase_context *kctx);
621 int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
622 phys_addr_t *phys, size_t nr,
623 unsigned long flags);
624 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
625 phys_addr_t phys, size_t nr,
626 unsigned long flags);
628 int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
629 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
632 * @brief Register region and map it on the GPU.
634 * Call kbase_add_va_region() and map the region on the GPU.
636 int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
639 * @brief Remove the region from the GPU and unregister it.
641 * Must be called with context lock held.
643 int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
646 * The caller has the following locking conditions:
647 * - It must hold kbase_as::transaction_mutex on kctx's address space
648 * - It must hold the kbasep_js_device_data::runpool_irq::lock
650 void kbase_mmu_update(struct kbase_context *kctx);
653 * The caller has the following locking conditions:
654 * - It must hold kbase_as::transaction_mutex on kctx's address space
655 * - It must hold the kbasep_js_device_data::runpool_irq::lock
657 void kbase_mmu_disable(struct kbase_context *kctx);
660 * kbase_mmu_disable_as() - set the MMU in unmapped mode for an address space.
662 * @kbdev: Kbase device
663 * @as_nr: Number of the address space for which the MMU
664 * should be set in unmapped mode.
666 * The caller must hold kbdev->as[as_nr].transaction_mutex.
668 void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
670 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
672 /** Dump the MMU tables to a buffer
674 * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
675 * buffer is too small then the return value will be NULL.
677 * The GPU vm lock must be held when calling this function.
679 * The buffer returned should be freed with @ref vfree when it is no longer required.
681 * @param[in] kctx The kbase context to dump
682 * @param[in] nr_pages The number of pages to allocate for the buffer.
684 * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
687 void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
689 int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset);
690 void kbase_sync_single(struct kbase_context *kctx, phys_addr_t cpu_pa,
691 phys_addr_t gpu_pa, off_t offset, size_t size,
692 enum kbase_sync_type sync_fn);
693 void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
694 void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
696 /* OS specific functions */
697 int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
698 int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
699 void kbase_os_mem_map_lock(struct kbase_context *kctx);
700 void kbase_os_mem_map_unlock(struct kbase_context *kctx);
703 * @brief Update the memory allocation counters for the current process
705 * OS specific call to updates the current memory allocation counters for the current process with
706 * the supplied delta.
708 * @param[in] kctx The kbase context
709 * @param[in] pages The desired delta to apply to the memory usage counters.
712 void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
715 * @brief Add to the memory allocation counters for the current process
717 * OS specific call to add to the current memory allocation counters for the current process by
718 * the supplied amount.
720 * @param[in] kctx The kernel base context used for the allocation.
721 * @param[in] pages The desired delta to apply to the memory usage counters.
724 static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
726 kbasep_os_process_page_usage_update(kctx, pages);
730 * @brief Subtract from the memory allocation counters for the current process
732 * OS specific call to subtract from the current memory allocation counters for the current process by
733 * the supplied amount.
735 * @param[in] kctx The kernel base context used for the allocation.
736 * @param[in] pages The desired delta to apply to the memory usage counters.
739 static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
741 kbasep_os_process_page_usage_update(kctx, 0 - pages);
745 * @brief Find the offset of the CPU mapping of a memory allocation containing
746 * a given address range
748 * Searches for a CPU mapping of any part of the region starting at @p gpu_addr
749 * that fully encloses the CPU virtual address range specified by @p uaddr and
750 * @p size. Returns a failure indication if only part of the address range lies
751 * within a CPU mapping, or the address range lies within a CPU mapping of a
754 * @param[in,out] kctx The kernel base context used for the allocation.
755 * @param[in] gpu_addr GPU address of the start of the allocated region
756 * within which to search.
757 * @param[in] uaddr Start of the CPU virtual address range.
758 * @param[in] size Size of the CPU virtual address range (in bytes).
759 * @param[out] offset The offset from the start of the allocation to the
760 * specified CPU virtual address.
762 * @return 0 if offset was obtained successfully. Error code
765 int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx,
771 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
772 void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
773 void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
776 * @brief Allocates physical pages.
778 * Allocates \a nr_pages_requested and updates the alloc object.
780 * @param[in] alloc allocation object to add pages to
781 * @param[in] nr_pages_requested number of physical pages to allocate
783 * @return 0 if all pages have been successfully allocated. Error code otherwise
785 int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
788 * @brief Free physical pages.
790 * Frees \a nr_pages and updates the alloc object.
792 * @param[in] alloc allocation object to free pages from
793 * @param[in] nr_pages_to_free number of physical pages to free
795 int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
797 static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
800 if (sizeof(dma_addr_t) > sizeof(p->private)) {
801 /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
802 * private field stays the same. So we have to be clever and
803 * use the fact that we only store DMA addresses of whole pages,
804 * so the low bits should be zero */
805 KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
806 set_page_private(p, dma_addr >> PAGE_SHIFT);
808 set_page_private(p, dma_addr);
812 static inline dma_addr_t kbase_dma_addr(struct page *p)
814 if (sizeof(dma_addr_t) > sizeof(p->private))
815 return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
817 return (dma_addr_t)page_private(p);
820 static inline void kbase_clear_dma_addr(struct page *p)
826 * @brief Process a bus or page fault.
828 * This function will process a fault on a specific address space
830 * @param[in] kbdev The @ref kbase_device the fault happened on
831 * @param[in] kctx The @ref kbase_context for the faulting address space if
833 * @param[in] as The address space that has the fault
835 void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
836 struct kbase_context *kctx, struct kbase_as *as);
839 * @brief Process a page fault.
841 * @param[in] data work_struct passed by queue_work()
843 void page_fault_worker(struct work_struct *data);
846 * @brief Process a bus fault.
848 * @param[in] data work_struct passed by queue_work()
850 void bus_fault_worker(struct work_struct *data);
853 * @brief Flush MMU workqueues.
855 * This function will cause any outstanding page or bus faults to be processed.
856 * It should be called prior to powering off the GPU.
858 * @param[in] kbdev Device pointer
860 void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
863 * kbase_sync_single_for_device - update physical memory and give GPU ownership
864 * @kbdev: Device pointer
865 * @handle: DMA address of region
866 * @size: Size of region to sync
867 * @dir: DMA data direction
870 void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
871 size_t size, enum dma_data_direction dir);
874 * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
875 * @kbdev: Device pointer
876 * @handle: DMA address of region
877 * @size: Size of region to sync
878 * @dir: DMA data direction
881 void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
882 size_t size, enum dma_data_direction dir);
885 * kbase_jit_debugfs_add - Add per context debugfs entry for JIT.
886 * @kctx: kbase context
888 void kbase_jit_debugfs_add(struct kbase_context *kctx);
891 * kbase_jit_init - Initialize the JIT memory pool management
892 * @kctx: kbase context
894 * Returns zero on success or negative error number on failure.
896 int kbase_jit_init(struct kbase_context *kctx);
899 * kbase_jit_allocate - Allocate JIT memory
900 * @kctx: kbase context
901 * @info: JIT allocation information
903 * Return: JIT allocation on success or NULL on failure.
905 struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
906 struct base_jit_alloc_info *info);
909 * kbase_jit_free - Free a JIT allocation
910 * @kctx: kbase context
911 * @reg: JIT allocation
913 * Frees a JIT allocation and places it into the free pool for later reuse.
915 void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
918 * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
919 * @reg: JIT allocation
921 void kbase_jit_backing_lost(struct kbase_va_region *reg);
924 * kbase_jit_evict - Evict a JIT allocation from the pool
925 * @kctx: kbase context
927 * Evict the least recently used JIT allocation from the pool. This can be
928 * required if normal VA allocations are failing due to VA exhaustion.
930 * Return: True if a JIT allocation was freed, false otherwise.
932 bool kbase_jit_evict(struct kbase_context *kctx);
935 * kbase_jit_term - Terminate the JIT memory pool management
936 * @kctx: kbase context
938 void kbase_jit_term(struct kbase_context *kctx);
941 * kbase_map_external_resource - Map an external resource to the GPU.
942 * @kctx: kbase context.
943 * @reg: The region to map.
944 * @locked_mm: The mm_struct which has been locked for this operation.
945 * @kds_res_count: The number of KDS resources.
946 * @kds_resources: Array of KDS resources.
947 * @kds_access_bitmap: Access bitmap for KDS.
948 * @exclusive: If the KDS resource requires exclusive access.
950 * Return: The physical allocation which backs the region on success or NULL
953 struct kbase_mem_phy_alloc *kbase_map_external_resource(
954 struct kbase_context *kctx, struct kbase_va_region *reg,
955 struct mm_struct *locked_mm
957 , u32 *kds_res_count, struct kds_resource **kds_resources,
958 unsigned long *kds_access_bitmap, bool exclusive
963 * kbase_unmap_external_resource - Unmap an external resource from the GPU.
964 * @kctx: kbase context.
965 * @reg: The region to unmap or NULL if it has already been released.
966 * @alloc: The physical allocation being unmapped.
968 void kbase_unmap_external_resource(struct kbase_context *kctx,
969 struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
972 * kbase_sticky_resource_init - Initialize sticky resource management.
973 * @kctx: kbase context
975 * Returns zero on success or negative error number on failure.
977 int kbase_sticky_resource_init(struct kbase_context *kctx);
980 * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
981 * @kctx: kbase context.
982 * @gpu_addr: The GPU address of the external resource.
984 * Return: The metadata object which represents the binding between the
985 * external resource and the kbase context on success or NULL on failure.
987 struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
988 struct kbase_context *kctx, u64 gpu_addr);
991 * kbase_sticky_resource_release - Release a reference on a sticky resource.
992 * @kctx: kbase context.
993 * @meta: Binding metadata.
994 * @gpu_addr: GPU address of the external resource.
996 * If meta is NULL then gpu_addr will be used to scan the metadata list and
997 * find the matching metadata (if any), otherwise the provided meta will be
998 * used and gpu_addr will be ignored.
1000 * Return: True if the release found the metadata and the reference was dropped.
1002 bool kbase_sticky_resource_release(struct kbase_context *kctx,
1003 struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);
1006 * kbase_sticky_resource_term - Terminate sticky resource management.
1007 * @kctx: kbase context
1009 void kbase_sticky_resource_term(struct kbase_context *kctx);
1012 * kbase_zone_cache_update - Update the memory zone cache after new pages have
1014 * @alloc: The physical memory allocation to build the cache for.
1015 * @start_offset: Offset to where the new pages start.
1017 * Updates an existing memory zone cache, updating the counters for the
1019 * If the memory allocation doesn't already have a zone cache assume that
1020 * one isn't created and thus don't do anything.
1022 * Return: Zero cache was updated, negative error code on error.
1024 int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
1025 size_t start_offset);
1028 * kbase_zone_cache_build - Build the memory zone cache.
1029 * @alloc: The physical memory allocation to build the cache for.
1031 * Create a new zone cache for the provided physical memory allocation if
1032 * one doesn't already exist, if one does exist then just return.
1034 * Return: Zero if the zone cache was created, negative error code on error.
1036 int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc);
1039 * kbase_zone_cache_clear - Clear the memory zone cache.
1040 * @alloc: The physical memory allocation to clear the cache on.
1042 void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc);
1044 #endif /* _KBASE_MEM_H_ */