3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_mem.h
22 * Base kernel memory APIs
29 #error "Don't include this file directly, use mali_kbase.h instead"
32 #include <linux/kref.h>
34 #include <linux/kds.h>
35 #endif /* CONFIG_KDS */
37 #include <linux/ump.h>
38 #endif /* CONFIG_UMP */
39 #include "mali_base_kernel.h"
40 #include <mali_kbase_hw.h>
41 #include "mali_kbase_pm.h"
42 #include "mali_kbase_defs.h"
43 #if defined(CONFIG_MALI_GATOR_SUPPORT)
44 #include "mali_kbase_gator.h"
46 /* Required for kbase_mem_evictable_unmake */
47 #include "mali_kbase_mem_linux.h"
49 /* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
50 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
52 /* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
53 The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
54 page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
55 updates and generates duplicate page faults as the page table information used by the MMU is not valid. */
56 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
58 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */
60 /* This must always be a power of 2 */
61 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
62 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
63 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
67 struct kbase_cpu_mapping {
68 struct list_head mappings_list;
69 struct kbase_mem_phy_alloc *alloc;
70 struct kbase_context *kctx;
71 struct kbase_va_region *region;
74 unsigned long vm_start;
78 enum kbase_memory_type {
79 KBASE_MEM_TYPE_NATIVE,
80 KBASE_MEM_TYPE_IMPORTED_UMP,
81 KBASE_MEM_TYPE_IMPORTED_UMM,
82 KBASE_MEM_TYPE_IMPORTED_USER_BUF,
88 /* internal structure, mirroring base_mem_aliasing_info,
89 * but with alloc instead of a gpu va (handle) */
90 struct kbase_aliased {
91 struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
92 u64 offset; /* in pages */
93 u64 length; /* in pages */
97 * @brief Physical pages tracking object properties
99 #define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0)
100 #define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1)
102 /* physical pages tracking object.
103 * Set up to track N pages.
104 * N not stored here, the creator holds that info.
105 * This object only tracks how many elements are actually valid (present).
106 * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not
107 * shared with another region or client. CPU mappings are OK to exist when changing, as
108 * long as the tracked mappings objects are updated as part of the change.
110 struct kbase_mem_phy_alloc {
111 struct kref kref; /* number of users of this alloc */
112 atomic_t gpu_mappings;
113 size_t nents; /* 0..N */
114 phys_addr_t *pages; /* N elements, only 0..nents are valid */
116 /* kbase_cpu_mappings */
117 struct list_head mappings;
119 /* Node used to store this allocation on the eviction list */
120 struct list_head evict_node;
121 /* Physical backing size when the pages where evicted */
124 * Back reference to the region structure which created this
125 * allocation, or NULL if it has been freed.
127 struct kbase_va_region *reg;
130 enum kbase_memory_type type;
132 unsigned long properties;
134 /* member in union valid based on @a type */
137 ump_dd_handle ump_handle;
138 #endif /* CONFIG_UMP */
139 #if defined(CONFIG_DMA_SHARED_BUFFER)
141 struct dma_buf *dma_buf;
142 struct dma_buf_attachment *dma_attachment;
143 unsigned int current_mapping_usage_count;
144 struct sg_table *sgt;
146 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
150 struct kbase_aliased *aliased;
152 /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */
153 struct kbase_context *kctx;
155 unsigned long address;
157 unsigned long nr_pages;
159 unsigned int current_mapping_usage_count;
160 struct task_struct *owner;
161 dma_addr_t *dma_addrs;
166 static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
168 KBASE_DEBUG_ASSERT(alloc);
169 /* we only track mappings of NATIVE buffers */
170 if (alloc->type == KBASE_MEM_TYPE_NATIVE)
171 atomic_inc(&alloc->gpu_mappings);
174 static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
176 KBASE_DEBUG_ASSERT(alloc);
177 /* we only track mappings of NATIVE buffers */
178 if (alloc->type == KBASE_MEM_TYPE_NATIVE)
179 if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
180 pr_err("Mismatched %s:\n", __func__);
185 void kbase_mem_kref_free(struct kref *kref);
187 int kbase_mem_init(struct kbase_device *kbdev);
188 void kbase_mem_halt(struct kbase_device *kbdev);
189 void kbase_mem_term(struct kbase_device *kbdev);
191 static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
193 kref_get(&alloc->kref);
197 static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
199 kref_put(&alloc->kref, kbase_mem_kref_free);
204 * A GPU memory region, and attributes for CPU mappings.
206 struct kbase_va_region {
207 struct rb_node rblink;
208 struct list_head link;
210 struct kbase_context *kctx; /* Backlink to base context */
212 u64 start_pfn; /* The PFN in GPU space */
216 #define KBASE_REG_FREE (1ul << 0)
217 /* CPU write access */
218 #define KBASE_REG_CPU_WR (1ul << 1)
219 /* GPU write access */
220 #define KBASE_REG_GPU_WR (1ul << 2)
221 /* No eXecute flag */
222 #define KBASE_REG_GPU_NX (1ul << 3)
224 #define KBASE_REG_CPU_CACHED (1ul << 4)
226 #define KBASE_REG_GPU_CACHED (1ul << 5)
228 #define KBASE_REG_GROWABLE (1ul << 6)
229 /* Can grow on pf? */
230 #define KBASE_REG_PF_GROW (1ul << 7)
232 /* VA managed by us */
233 #define KBASE_REG_CUSTOM_VA (1ul << 8)
235 /* inner shareable coherency */
236 #define KBASE_REG_SHARE_IN (1ul << 9)
237 /* inner & outer shareable coherency */
238 #define KBASE_REG_SHARE_BOTH (1ul << 10)
240 /* Space for 4 different zones */
241 #define KBASE_REG_ZONE_MASK (3ul << 11)
242 #define KBASE_REG_ZONE(x) (((x) & 3) << 11)
244 /* GPU read access */
245 #define KBASE_REG_GPU_RD (1ul<<13)
246 /* CPU read access */
247 #define KBASE_REG_CPU_RD (1ul<<14)
249 /* Aligned for GPU EX in SAME_VA */
250 #define KBASE_REG_ALIGNED (1ul<<15)
252 /* Index of chosen MEMATTR for this region (0..7) */
253 #define KBASE_REG_MEMATTR_MASK (7ul << 16)
254 #define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
255 #define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
257 #define KBASE_REG_SECURE (1ul << 19)
259 #define KBASE_REG_DONT_NEED (1ul << 20)
261 #define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
263 /* only used with 32-bit clients */
265 * On a 32bit platform, custom VA should be wired from (4GB + shader region)
266 * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
267 * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
268 * So we put the default limit to the maximum possible on Linux and shrink
269 * it down, if required by the GPU, during initialization.
273 * Dedicated 16MB region for shader code:
274 * VA range 0x101000000-0x102000000
276 #define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
277 #define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
278 #define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
280 #define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
281 #define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
282 #define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
283 /* end 32-bit clients only */
287 size_t extent; /* nr of pages alloc'd on PF */
289 struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
290 struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
292 /* non-NULL if this memory object is a kds_resource */
293 struct kds_resource *kds_res;
295 /* List head used to store the region in the JIT allocation pool */
296 struct list_head jit_node;
299 /* Common functions */
300 static inline phys_addr_t *kbase_get_cpu_phy_pages(struct kbase_va_region *reg)
302 KBASE_DEBUG_ASSERT(reg);
303 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
304 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
305 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
307 return reg->cpu_alloc->pages;
310 static inline phys_addr_t *kbase_get_gpu_phy_pages(struct kbase_va_region *reg)
312 KBASE_DEBUG_ASSERT(reg);
313 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
314 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
315 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
317 return reg->gpu_alloc->pages;
320 static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
322 KBASE_DEBUG_ASSERT(reg);
323 /* if no alloc object the backed size naturally is 0 */
327 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
328 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
329 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
331 return reg->cpu_alloc->nents;
334 #define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
336 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
338 struct kbase_mem_phy_alloc *alloc;
339 size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
340 size_t per_page_size = sizeof(*alloc->pages);
342 /* Imported pages may have page private data already in use */
343 if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
344 alloc_size += nr_pages *
345 sizeof(*alloc->imported.user_buf.dma_addrs);
346 per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
350 * Prevent nr_pages*per_page_size + sizeof(*alloc) from
353 if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
355 return ERR_PTR(-ENOMEM);
357 /* Allocate based on the size to reduce internal fragmentation of vmem */
358 if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
359 alloc = vzalloc(alloc_size);
361 alloc = kzalloc(alloc_size, GFP_KERNEL);
364 return ERR_PTR(-ENOMEM);
366 /* Store allocation method */
367 if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
368 alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
370 kref_init(&alloc->kref);
371 atomic_set(&alloc->gpu_mappings, 0);
373 alloc->pages = (void *)(alloc + 1);
374 INIT_LIST_HEAD(&alloc->mappings);
377 if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
378 alloc->imported.user_buf.dma_addrs =
379 (void *) (alloc->pages + nr_pages);
384 static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
385 struct kbase_context *kctx)
387 KBASE_DEBUG_ASSERT(reg);
388 KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
389 KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
390 KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
392 reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
393 KBASE_MEM_TYPE_NATIVE);
394 if (IS_ERR(reg->cpu_alloc))
395 return PTR_ERR(reg->cpu_alloc);
396 else if (!reg->cpu_alloc)
398 reg->cpu_alloc->imported.kctx = kctx;
399 INIT_LIST_HEAD(®->cpu_alloc->evict_node);
400 if (kctx->infinite_cache_active && (reg->flags & KBASE_REG_CPU_CACHED)) {
401 reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
402 KBASE_MEM_TYPE_NATIVE);
403 reg->gpu_alloc->imported.kctx = kctx;
404 INIT_LIST_HEAD(®->gpu_alloc->evict_node);
406 reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
409 INIT_LIST_HEAD(®->jit_node);
410 reg->flags &= ~KBASE_REG_FREE;
414 static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
416 int new_val = atomic_add_return(num_pages, used_pages);
417 #if defined(CONFIG_MALI_GATOR_SUPPORT)
418 kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
423 static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
425 int new_val = atomic_sub_return(num_pages, used_pages);
426 #if defined(CONFIG_MALI_GATOR_SUPPORT)
427 kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
433 * Max size for kbdev memory pool (in pages)
435 #define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
438 * Max size for kctx memory pool (in pages)
440 #define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)
443 * kbase_mem_pool_init - Create a memory pool for a kbase device
444 * @pool: Memory pool to initialize
445 * @max_size: Maximum number of free pages the pool can hold
446 * @kbdev: Kbase device where memory is used
447 * @next_pool: Pointer to the next pool or NULL.
449 * Allocations from @pool are in whole pages. Each @pool has a free list where
450 * pages can be quickly allocated from. The free list is initially empty and
451 * filled whenever pages are freed back to the pool. The number of free pages
452 * in the pool will in general not exceed @max_size, but the pool may in
453 * certain corner cases grow above @max_size.
455 * If @next_pool is not NULL, we will allocate from @next_pool before going to
456 * the kernel allocator. Similarily pages can spill over to @next_pool when
457 * @pool is full. Pages are zeroed before they spill over to another pool, to
458 * prevent leaking information between applications.
460 * A shrinker is registered so that Linux mm can reclaim pages from the pool as
463 * Return: 0 on success, negative -errno on error
465 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
467 struct kbase_device *kbdev,
468 struct kbase_mem_pool *next_pool);
471 * kbase_mem_pool_term - Destroy a memory pool
472 * @pool: Memory pool to destroy
474 * Pages in the pool will spill over to @next_pool (if available) or freed to
477 void kbase_mem_pool_term(struct kbase_mem_pool *pool);
480 * kbase_mem_pool_alloc - Allocate a page from memory pool
481 * @pool: Memory pool to allocate from
483 * Allocations from the pool are made as follows:
484 * 1. If there are free pages in the pool, allocate a page from @pool.
485 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
487 * 3. Finally, allocate a page from the kernel.
489 * Return: Pointer to allocated page, or NULL if allocation failed.
491 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
494 * kbase_mem_pool_free - Free a page to memory pool
495 * @pool: Memory pool where page should be freed
496 * @page: Page to free to the pool
497 * @dirty: Whether some of the page may be dirty in the cache.
499 * Pages are freed to the pool as follows:
500 * 1. If @pool is not full, add @page to @pool.
501 * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
503 * 3. Finally, free @page to the kernel.
505 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
509 * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
510 * @pool: Memory pool to allocate from
511 * @nr_pages: Number of pages to allocate
512 * @pages: Pointer to array where the physical address of the allocated
513 * pages will be stored.
515 * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
517 * Return: 0 on success, negative -errno on error
519 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
523 * kbase_mem_pool_free_pages - Free pages to memory pool
524 * @pool: Memory pool where pages should be freed
525 * @nr_pages: Number of pages to free
526 * @pages: Pointer to array holding the physical addresses of the pages to
528 * @dirty: Whether any pages may be dirty in the cache.
529 * @reclaimed: Whether the pages where reclaimable and thus should bypass
530 * the pool and go straight to the kernel.
532 * Like kbase_mem_pool_free() but optimized for freeing many pages.
534 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
535 phys_addr_t *pages, bool dirty, bool reclaimed);
538 * kbase_mem_pool_size - Get number of free pages in memory pool
539 * @pool: Memory pool to inspect
541 * Note: the size of the pool may in certain corner cases exceed @max_size!
543 * Return: Number of free pages in the pool
545 static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
547 return ACCESS_ONCE(pool->cur_size);
551 * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
552 * @pool: Memory pool to inspect
554 * Return: Maximum number of free pages in the pool
556 static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
558 return pool->max_size;
563 * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
564 * @pool: Memory pool to inspect
565 * @max_size: Maximum number of free pages the pool can hold
567 * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
568 * For details see kbase_mem_pool_shrink().
570 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
573 * kbase_mem_pool_trim - Grow or shrink the pool to a new size
574 * @pool: Memory pool to trim
575 * @new_size: New number of pages in the pool
577 * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
578 * not above @max_size.
579 * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
581 * Return: The new size of the pool
583 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
586 int kbase_region_tracker_init(struct kbase_context *kctx);
587 int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages);
588 void kbase_region_tracker_term(struct kbase_context *kctx);
590 struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
593 * @brief Check that a pointer is actually a valid region.
595 * Must be called with context lock held.
597 struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
599 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
600 void kbase_free_alloced_region(struct kbase_va_region *reg);
601 int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
603 bool kbase_check_alloc_flags(unsigned long flags);
604 bool kbase_check_import_flags(unsigned long flags);
605 void kbase_update_region_flags(struct kbase_context *kctx,
606 struct kbase_va_region *reg, unsigned long flags);
608 void kbase_gpu_vm_lock(struct kbase_context *kctx);
609 void kbase_gpu_vm_unlock(struct kbase_context *kctx);
611 int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
613 int kbase_mmu_init(struct kbase_context *kctx);
614 void kbase_mmu_term(struct kbase_context *kctx);
616 phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
617 void kbase_mmu_free_pgd(struct kbase_context *kctx);
618 int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
619 phys_addr_t *phys, size_t nr,
620 unsigned long flags);
621 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
622 phys_addr_t phys, size_t nr,
623 unsigned long flags);
625 int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
626 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
629 * @brief Register region and map it on the GPU.
631 * Call kbase_add_va_region() and map the region on the GPU.
633 int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
636 * @brief Remove the region from the GPU and unregister it.
638 * Must be called with context lock held.
640 int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
643 * The caller has the following locking conditions:
644 * - It must hold kbase_as::transaction_mutex on kctx's address space
645 * - It must hold the kbasep_js_device_data::runpool_irq::lock
647 void kbase_mmu_update(struct kbase_context *kctx);
650 * The caller has the following locking conditions:
651 * - It must hold kbase_as::transaction_mutex on kctx's address space
652 * - It must hold the kbasep_js_device_data::runpool_irq::lock
654 void kbase_mmu_disable(struct kbase_context *kctx);
657 * kbase_mmu_disable_as() - set the MMU in unmapped mode for an address space.
659 * @kbdev: Kbase device
660 * @as_nr: Number of the address space for which the MMU
661 * should be set in unmapped mode.
663 * The caller must hold kbdev->as[as_nr].transaction_mutex.
665 void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
667 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
669 /** Dump the MMU tables to a buffer
671 * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
672 * buffer is too small then the return value will be NULL.
674 * The GPU vm lock must be held when calling this function.
676 * The buffer returned should be freed with @ref vfree when it is no longer required.
678 * @param[in] kctx The kbase context to dump
679 * @param[in] nr_pages The number of pages to allocate for the buffer.
681 * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
684 void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
686 int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset);
687 void kbase_sync_single(struct kbase_context *kctx, phys_addr_t cpu_pa,
688 phys_addr_t gpu_pa, off_t offset, size_t size,
689 enum kbase_sync_type sync_fn);
690 void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
691 void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
693 /* OS specific functions */
694 int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
695 int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
696 void kbase_os_mem_map_lock(struct kbase_context *kctx);
697 void kbase_os_mem_map_unlock(struct kbase_context *kctx);
700 * @brief Update the memory allocation counters for the current process
702 * OS specific call to updates the current memory allocation counters for the current process with
703 * the supplied delta.
705 * @param[in] kctx The kbase context
706 * @param[in] pages The desired delta to apply to the memory usage counters.
709 void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
712 * @brief Add to the memory allocation counters for the current process
714 * OS specific call to add to the current memory allocation counters for the current process by
715 * the supplied amount.
717 * @param[in] kctx The kernel base context used for the allocation.
718 * @param[in] pages The desired delta to apply to the memory usage counters.
721 static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
723 kbasep_os_process_page_usage_update(kctx, pages);
727 * @brief Subtract from the memory allocation counters for the current process
729 * OS specific call to subtract from the current memory allocation counters for the current process by
730 * the supplied amount.
732 * @param[in] kctx The kernel base context used for the allocation.
733 * @param[in] pages The desired delta to apply to the memory usage counters.
736 static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
738 kbasep_os_process_page_usage_update(kctx, 0 - pages);
742 * @brief Find the offset of the CPU mapping of a memory allocation containing
743 * a given address range
745 * Searches for a CPU mapping of any part of the region starting at @p gpu_addr
746 * that fully encloses the CPU virtual address range specified by @p uaddr and
747 * @p size. Returns a failure indication if only part of the address range lies
748 * within a CPU mapping, or the address range lies within a CPU mapping of a
751 * @param[in,out] kctx The kernel base context used for the allocation.
752 * @param[in] gpu_addr GPU address of the start of the allocated region
753 * within which to search.
754 * @param[in] uaddr Start of the CPU virtual address range.
755 * @param[in] size Size of the CPU virtual address range (in bytes).
756 * @param[out] offset The offset from the start of the allocation to the
757 * specified CPU virtual address.
759 * @return 0 if offset was obtained successfully. Error code
762 int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx,
768 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
769 void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
770 void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
773 * @brief Allocates physical pages.
775 * Allocates \a nr_pages_requested and updates the alloc object.
777 * @param[in] alloc allocation object to add pages to
778 * @param[in] nr_pages_requested number of physical pages to allocate
780 * @return 0 if all pages have been successfully allocated. Error code otherwise
782 int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
785 * @brief Free physical pages.
787 * Frees \a nr_pages and updates the alloc object.
789 * @param[in] alloc allocation object to free pages from
790 * @param[in] nr_pages_to_free number of physical pages to free
792 int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
794 static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
797 if (sizeof(dma_addr_t) > sizeof(p->private)) {
798 /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
799 * private field stays the same. So we have to be clever and
800 * use the fact that we only store DMA addresses of whole pages,
801 * so the low bits should be zero */
802 KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
803 set_page_private(p, dma_addr >> PAGE_SHIFT);
805 set_page_private(p, dma_addr);
809 static inline dma_addr_t kbase_dma_addr(struct page *p)
811 if (sizeof(dma_addr_t) > sizeof(p->private))
812 return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
814 return (dma_addr_t)page_private(p);
817 static inline void kbase_clear_dma_addr(struct page *p)
823 * @brief Process a bus or page fault.
825 * This function will process a fault on a specific address space
827 * @param[in] kbdev The @ref kbase_device the fault happened on
828 * @param[in] kctx The @ref kbase_context for the faulting address space if
830 * @param[in] as The address space that has the fault
832 void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
833 struct kbase_context *kctx, struct kbase_as *as);
836 * @brief Process a page fault.
838 * @param[in] data work_struct passed by queue_work()
840 void page_fault_worker(struct work_struct *data);
843 * @brief Process a bus fault.
845 * @param[in] data work_struct passed by queue_work()
847 void bus_fault_worker(struct work_struct *data);
850 * @brief Flush MMU workqueues.
852 * This function will cause any outstanding page or bus faults to be processed.
853 * It should be called prior to powering off the GPU.
855 * @param[in] kbdev Device pointer
857 void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
860 * kbase_sync_single_for_device - update physical memory and give GPU ownership
861 * @kbdev: Device pointer
862 * @handle: DMA address of region
863 * @size: Size of region to sync
864 * @dir: DMA data direction
867 void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
868 size_t size, enum dma_data_direction dir);
871 * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
872 * @kbdev: Device pointer
873 * @handle: DMA address of region
874 * @size: Size of region to sync
875 * @dir: DMA data direction
878 void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
879 size_t size, enum dma_data_direction dir);
882 * kbase_jit_debugfs_add - Add per context debugfs entry for JIT.
883 * @kctx: kbase context
885 void kbase_jit_debugfs_add(struct kbase_context *kctx);
888 * kbase_jit_init - Initialize the JIT memory pool management
889 * @kctx: kbase context
891 * Returns zero on success or negative error number on failure.
893 int kbase_jit_init(struct kbase_context *kctx);
896 * kbase_jit_allocate - Allocate JIT memory
897 * @kctx: kbase context
898 * @info: JIT allocation information
900 * Return: JIT allocation on success or NULL on failure.
902 struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
903 struct base_jit_alloc_info *info);
906 * kbase_jit_free - Free a JIT allocation
907 * @kctx: kbase context
908 * @reg: JIT allocation
910 * Frees a JIT allocation and places it into the free pool for later reuse.
912 void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
915 * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
916 * @reg: JIT allocation
918 void kbase_jit_backing_lost(struct kbase_va_region *reg);
921 * kbase_jit_evict - Evict a JIT allocation from the pool
922 * @kctx: kbase context
924 * Evict the least recently used JIT allocation from the pool. This can be
925 * required if normal VA allocations are failing due to VA exhaustion.
927 * Return: True if a JIT allocation was freed, false otherwise.
929 bool kbase_jit_evict(struct kbase_context *kctx);
932 * kbase_jit_term - Terminate the JIT memory pool management
933 * @kctx: kbase context
935 void kbase_jit_term(struct kbase_context *kctx);
938 * kbase_map_external_resource - Map an external resource to the GPU.
939 * @kctx: kbase context.
940 * @reg: The region to map.
941 * @locked_mm: The mm_struct which has been locked for this operation.
942 * @kds_res_count: The number of KDS resources.
943 * @kds_resources: Array of KDS resources.
944 * @kds_access_bitmap: Access bitmap for KDS.
945 * @exclusive: If the KDS resource requires exclusive access.
947 * Return: The physical allocation which backs the region on success or NULL
950 struct kbase_mem_phy_alloc *kbase_map_external_resource(
951 struct kbase_context *kctx, struct kbase_va_region *reg,
952 struct mm_struct *locked_mm
954 , u32 *kds_res_count, struct kds_resource **kds_resources,
955 unsigned long *kds_access_bitmap, bool exclusive
960 * kbase_unmap_external_resource - Unmap an external resource from the GPU.
961 * @kctx: kbase context.
962 * @reg: The region to unmap or NULL if it has already been released.
963 * @alloc: The physical allocation being unmapped.
965 void kbase_unmap_external_resource(struct kbase_context *kctx,
966 struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
969 * kbase_sticky_resource_init - Initialize sticky resource management.
970 * @kctx: kbase context
972 * Returns zero on success or negative error number on failure.
974 int kbase_sticky_resource_init(struct kbase_context *kctx);
977 * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
978 * @kctx: kbase context.
979 * @gpu_addr: The GPU address of the external resource.
981 * Return: The metadata object which represents the binding between the
982 * external resource and the kbase context on success or NULL on failure.
984 struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
985 struct kbase_context *kctx, u64 gpu_addr);
988 * kbase_sticky_resource_release - Release a reference on a sticky resource.
989 * @kctx: kbase context.
990 * @meta: Binding metadata.
991 * @gpu_addr: GPU address of the external resource.
992 * @force: If the release is being forced.
994 * If meta is NULL then gpu_addr will be used to scan the metadata list and
995 * find the matching metadata (if any), otherwise the provided meta will be
996 * used and gpu_addr will be ignored.
998 * If force is true then the refcount in the metadata is ignored and the
999 * resource will be forced freed.
1001 * Return: True if the release found the metadata and the reference was dropped.
1003 bool kbase_sticky_resource_release(struct kbase_context *kctx,
1004 struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr, bool force);
1007 * kbase_sticky_resource_term - Terminate sticky resource management.
1008 * @kctx: kbase context
1010 void kbase_sticky_resource_term(struct kbase_context *kctx);
1011 #endif /* _KBASE_MEM_H_ */