3 * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_mem.h
22 * Base kernel memory APIs
29 #error "Don't include this file directly, use mali_kbase.h instead"
32 #include <linux/kref.h>
35 #include <linux/ump.h>
36 #endif /* CONFIG_UMP */
37 #include "mali_base_kernel.h"
38 #include <mali_kbase_hw.h>
39 #include "mali_kbase_pm.h"
40 #include "mali_kbase_defs.h"
41 #if defined(CONFIG_MALI_GATOR_SUPPORT)
42 #include "mali_kbase_gator.h"
45 /* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
46 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
48 /* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
49 The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
50 page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
51 updates and generates duplicate page faults as the page table information used by the MMU is not valid. */
52 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
54 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */
56 /* This must always be a power of 2 */
57 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
58 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
59 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
63 struct kbase_cpu_mapping {
64 struct list_head mappings_list;
65 struct kbase_mem_phy_alloc *alloc;
66 struct kbase_context *kctx;
67 struct kbase_va_region *region;
70 unsigned long vm_start;
74 enum kbase_memory_type {
75 KBASE_MEM_TYPE_NATIVE,
76 KBASE_MEM_TYPE_IMPORTED_UMP,
77 KBASE_MEM_TYPE_IMPORTED_UMM,
78 KBASE_MEM_TYPE_IMPORTED_USER_BUF,
84 /* internal structure, mirroring base_mem_aliasing_info,
85 * but with alloc instead of a gpu va (handle) */
86 struct kbase_aliased {
87 struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
88 u64 offset; /* in pages */
89 u64 length; /* in pages */
93 * @brief Physical pages tracking object properties
95 #define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0)
96 #define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1)
98 /* physical pages tracking object.
99 * Set up to track N pages.
100 * N not stored here, the creator holds that info.
101 * This object only tracks how many elements are actually valid (present).
102 * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not
103 * shared with another region or client. CPU mappings are OK to exist when changing, as
104 * long as the tracked mappings objects are updated as part of the change.
106 struct kbase_mem_phy_alloc {
107 struct kref kref; /* number of users of this alloc */
108 atomic_t gpu_mappings;
109 size_t nents; /* 0..N */
110 phys_addr_t *pages; /* N elements, only 0..nents are valid */
112 /* kbase_cpu_mappings */
113 struct list_head mappings;
116 enum kbase_memory_type type;
118 unsigned long properties;
120 /* member in union valid based on @a type */
123 ump_dd_handle ump_handle;
124 #endif /* CONFIG_UMP */
125 #if defined(CONFIG_DMA_SHARED_BUFFER)
127 struct dma_buf *dma_buf;
128 struct dma_buf_attachment *dma_attachment;
129 unsigned int current_mapping_usage_count;
130 struct sg_table *sgt;
132 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
136 struct kbase_aliased *aliased;
138 /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */
139 struct kbase_context *kctx;
141 unsigned long address;
143 unsigned long nr_pages;
145 unsigned int current_mapping_usage_count;
146 struct task_struct *owner;
151 static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
153 KBASE_DEBUG_ASSERT(alloc);
154 /* we only track mappings of NATIVE buffers */
155 if (alloc->type == KBASE_MEM_TYPE_NATIVE)
156 atomic_inc(&alloc->gpu_mappings);
159 static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
161 KBASE_DEBUG_ASSERT(alloc);
162 /* we only track mappings of NATIVE buffers */
163 if (alloc->type == KBASE_MEM_TYPE_NATIVE)
164 if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
165 pr_err("Mismatched %s:\n", __func__);
170 void kbase_mem_kref_free(struct kref *kref);
172 int kbase_mem_init(struct kbase_device *kbdev);
173 void kbase_mem_halt(struct kbase_device *kbdev);
174 void kbase_mem_term(struct kbase_device *kbdev);
176 static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
178 kref_get(&alloc->kref);
182 static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
184 kref_put(&alloc->kref, kbase_mem_kref_free);
189 * A GPU memory region, and attributes for CPU mappings.
191 struct kbase_va_region {
192 struct rb_node rblink;
193 struct list_head link;
195 struct kbase_context *kctx; /* Backlink to base context */
197 u64 start_pfn; /* The PFN in GPU space */
201 #define KBASE_REG_FREE (1ul << 0)
202 /* CPU write access */
203 #define KBASE_REG_CPU_WR (1ul << 1)
204 /* GPU write access */
205 #define KBASE_REG_GPU_WR (1ul << 2)
206 /* No eXecute flag */
207 #define KBASE_REG_GPU_NX (1ul << 3)
209 #define KBASE_REG_CPU_CACHED (1ul << 4)
211 #define KBASE_REG_GPU_CACHED (1ul << 5)
213 #define KBASE_REG_GROWABLE (1ul << 6)
214 /* Can grow on pf? */
215 #define KBASE_REG_PF_GROW (1ul << 7)
217 /* VA managed by us */
218 #define KBASE_REG_CUSTOM_VA (1ul << 8)
220 /* inner shareable coherency */
221 #define KBASE_REG_SHARE_IN (1ul << 9)
222 /* inner & outer shareable coherency */
223 #define KBASE_REG_SHARE_BOTH (1ul << 10)
225 /* Space for 4 different zones */
226 #define KBASE_REG_ZONE_MASK (3ul << 11)
227 #define KBASE_REG_ZONE(x) (((x) & 3) << 11)
229 /* GPU read access */
230 #define KBASE_REG_GPU_RD (1ul<<13)
231 /* CPU read access */
232 #define KBASE_REG_CPU_RD (1ul<<14)
234 /* Aligned for GPU EX in SAME_VA */
235 #define KBASE_REG_ALIGNED (1ul<<15)
237 /* Index of chosen MEMATTR for this region (0..7) */
238 #define KBASE_REG_MEMATTR_MASK (7ul << 16)
239 #define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
240 #define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
242 #define KBASE_REG_SECURE (1ul << 19)
244 #define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
246 /* only used with 32-bit clients */
248 * On a 32bit platform, custom VA should be wired from (4GB + shader region)
249 * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
250 * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
251 * So we put the default limit to the maximum possible on Linux and shrink
252 * it down, if required by the GPU, during initialization.
256 * Dedicated 16MB region for shader code:
257 * VA range 0x101000000-0x102000000
259 #define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
260 #define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
261 #define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
263 #define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
264 #define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
265 #define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
266 /* end 32-bit clients only */
270 size_t extent; /* nr of pages alloc'd on PF */
272 struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
273 struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
275 /* non-NULL if this memory object is a kds_resource */
276 struct kds_resource *kds_res;
280 /* Common functions */
281 static inline phys_addr_t *kbase_get_cpu_phy_pages(struct kbase_va_region *reg)
283 KBASE_DEBUG_ASSERT(reg);
284 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
285 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
286 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
288 return reg->cpu_alloc->pages;
291 static inline phys_addr_t *kbase_get_gpu_phy_pages(struct kbase_va_region *reg)
293 KBASE_DEBUG_ASSERT(reg);
294 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
295 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
296 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
298 return reg->gpu_alloc->pages;
301 static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
303 KBASE_DEBUG_ASSERT(reg);
304 /* if no alloc object the backed size naturally is 0 */
308 KBASE_DEBUG_ASSERT(reg->cpu_alloc);
309 KBASE_DEBUG_ASSERT(reg->gpu_alloc);
310 KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
312 return reg->cpu_alloc->nents;
315 #define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
317 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
319 struct kbase_mem_phy_alloc *alloc;
320 const size_t alloc_size =
321 sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
323 /* Prevent nr_pages*sizeof + sizeof(*alloc) from wrapping around. */
324 if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
325 / sizeof(*alloc->pages)))
326 return ERR_PTR(-ENOMEM);
328 /* Allocate based on the size to reduce internal fragmentation of vmem */
329 if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
330 alloc = vzalloc(alloc_size);
332 alloc = kzalloc(alloc_size, GFP_KERNEL);
335 return ERR_PTR(-ENOMEM);
337 /* Store allocation method */
338 if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
339 alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
341 kref_init(&alloc->kref);
342 atomic_set(&alloc->gpu_mappings, 0);
344 alloc->pages = (void *)(alloc + 1);
345 INIT_LIST_HEAD(&alloc->mappings);
351 static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
352 struct kbase_context *kctx)
354 KBASE_DEBUG_ASSERT(reg);
355 KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
356 KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
357 KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
359 reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
360 KBASE_MEM_TYPE_NATIVE);
361 if (IS_ERR(reg->cpu_alloc))
362 return PTR_ERR(reg->cpu_alloc);
363 else if (!reg->cpu_alloc)
365 reg->cpu_alloc->imported.kctx = kctx;
366 if (kctx->infinite_cache_active && (reg->flags & KBASE_REG_CPU_CACHED)) {
367 reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
368 KBASE_MEM_TYPE_NATIVE);
369 reg->gpu_alloc->imported.kctx = kctx;
371 reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
374 reg->flags &= ~KBASE_REG_FREE;
378 static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
380 int new_val = atomic_add_return(num_pages, used_pages);
381 #if defined(CONFIG_MALI_GATOR_SUPPORT)
382 kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
387 static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
389 int new_val = atomic_sub_return(num_pages, used_pages);
390 #if defined(CONFIG_MALI_GATOR_SUPPORT)
391 kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
397 * Max size for kbdev memory pool (in pages)
399 #define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
402 * Max size for kctx memory pool (in pages)
404 #define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)
407 * kbase_mem_pool_init - Create a memory pool for a kbase device
408 * @pool: Memory pool to initialize
409 * @max_size: Maximum number of free pages the pool can hold
410 * @kbdev: Kbase device where memory is used
411 * @next_pool: Pointer to the next pool or NULL.
413 * Allocations from @pool are in whole pages. Each @pool has a free list where
414 * pages can be quickly allocated from. The free list is initially empty and
415 * filled whenever pages are freed back to the pool. The number of free pages
416 * in the pool will in general not exceed @max_size, but the pool may in
417 * certain corner cases grow above @max_size.
419 * If @next_pool is not NULL, we will allocate from @next_pool before going to
420 * the kernel allocator. Similarily pages can spill over to @next_pool when
421 * @pool is full. Pages are zeroed before they spill over to another pool, to
422 * prevent leaking information between applications.
424 * A shrinker is registered so that Linux mm can reclaim pages from the pool as
427 * Return: 0 on success, negative -errno on error
429 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
431 struct kbase_device *kbdev,
432 struct kbase_mem_pool *next_pool);
435 * kbase_mem_pool_term - Destroy a memory pool
436 * @pool: Memory pool to destroy
438 * Pages in the pool will spill over to @next_pool (if available) or freed to
441 void kbase_mem_pool_term(struct kbase_mem_pool *pool);
444 * kbase_mem_pool_alloc - Allocate a page from memory pool
445 * @pool: Memory pool to allocate from
447 * Allocations from the pool are made as follows:
448 * 1. If there are free pages in the pool, allocate a page from @pool.
449 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
451 * 3. Finally, allocate a page from the kernel.
453 * Return: Pointer to allocated page, or NULL if allocation failed.
455 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
458 * kbase_mem_pool_free - Free a page to memory pool
459 * @pool: Memory pool where page should be freed
460 * @page: Page to free to the pool
461 * @dirty: Whether some of the page may be dirty in the cache.
463 * Pages are freed to the pool as follows:
464 * 1. If @pool is not full, add @page to @pool.
465 * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
467 * 3. Finally, free @page to the kernel.
469 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
473 * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
474 * @pool: Memory pool to allocate from
475 * @nr_pages: Number of pages to allocate
476 * @pages: Pointer to array where the physical address of the allocated
477 * pages will be stored.
479 * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
481 * Return: 0 on success, negative -errno on error
483 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
487 * kbase_mem_pool_free_pages - Free pages to memory pool
488 * @pool: Memory pool where pages should be freed
489 * @nr_pages: Number of pages to free
490 * @pages: Pointer to array holding the physical addresses of the pages to
492 * @dirty: Whether any pages may be dirty in the cache.
494 * Like kbase_mem_pool_free() but optimized for freeing many pages.
496 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
497 phys_addr_t *pages, bool dirty);
500 * kbase_mem_pool_size - Get number of free pages in memory pool
501 * @pool: Memory pool to inspect
503 * Note: the size of the pool may in certain corner cases exceed @max_size!
505 * Return: Number of free pages in the pool
507 static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
509 return ACCESS_ONCE(pool->cur_size);
513 * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
514 * @pool: Memory pool to inspect
516 * Return: Maximum number of free pages in the pool
518 static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
520 return pool->max_size;
525 * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
526 * @pool: Memory pool to inspect
527 * @max_size: Maximum number of free pages the pool can hold
529 * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
530 * For details see kbase_mem_pool_shrink().
532 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
535 * kbase_mem_pool_trim - Grow or shrink the pool to a new size
536 * @pool: Memory pool to trim
537 * @new_size: New number of pages in the pool
539 * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
540 * not above @max_size.
541 * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
543 * Return: The new size of the pool
545 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
548 int kbase_region_tracker_init(struct kbase_context *kctx);
549 void kbase_region_tracker_term(struct kbase_context *kctx);
551 struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
554 * @brief Check that a pointer is actually a valid region.
556 * Must be called with context lock held.
558 struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
560 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
561 void kbase_free_alloced_region(struct kbase_va_region *reg);
562 int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
564 bool kbase_check_alloc_flags(unsigned long flags);
565 bool kbase_check_import_flags(unsigned long flags);
566 void kbase_update_region_flags(struct kbase_context *kctx,
567 struct kbase_va_region *reg, unsigned long flags);
569 void kbase_gpu_vm_lock(struct kbase_context *kctx);
570 void kbase_gpu_vm_unlock(struct kbase_context *kctx);
572 int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
574 int kbase_mmu_init(struct kbase_context *kctx);
575 void kbase_mmu_term(struct kbase_context *kctx);
577 phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
578 void kbase_mmu_free_pgd(struct kbase_context *kctx);
579 int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
580 phys_addr_t *phys, size_t nr,
581 unsigned long flags);
582 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
583 phys_addr_t phys, size_t nr,
584 unsigned long flags);
586 int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
587 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
590 * @brief Register region and map it on the GPU.
592 * Call kbase_add_va_region() and map the region on the GPU.
594 int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
597 * @brief Remove the region from the GPU and unregister it.
599 * Must be called with context lock held.
601 int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
604 * The caller has the following locking conditions:
605 * - It must hold kbase_as::transaction_mutex on kctx's address space
606 * - It must hold the kbasep_js_device_data::runpool_irq::lock
608 void kbase_mmu_update(struct kbase_context *kctx);
611 * The caller has the following locking conditions:
612 * - It must hold kbase_as::transaction_mutex on kctx's address space
613 * - It must hold the kbasep_js_device_data::runpool_irq::lock
615 void kbase_mmu_disable(struct kbase_context *kctx);
618 * kbase_mmu_disable_as() - set the MMU in unmapped mode for an address space.
620 * @kbdev: Kbase device
621 * @as_nr: Number of the address space for which the MMU
622 * should be set in unmapped mode.
624 * The caller must hold kbdev->as[as_nr].transaction_mutex.
626 void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
628 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
630 /** Dump the MMU tables to a buffer
632 * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
633 * buffer is too small then the return value will be NULL.
635 * The GPU vm lock must be held when calling this function.
637 * The buffer returned should be freed with @ref vfree when it is no longer required.
639 * @param[in] kctx The kbase context to dump
640 * @param[in] nr_pages The number of pages to allocate for the buffer.
642 * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
645 void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
647 int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset);
648 void kbase_sync_single(struct kbase_context *kctx, phys_addr_t cpu_pa,
649 phys_addr_t gpu_pa, off_t offset, size_t size,
650 enum kbase_sync_type sync_fn);
651 void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
652 void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
654 /* OS specific functions */
655 int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
656 int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
657 void kbase_os_mem_map_lock(struct kbase_context *kctx);
658 void kbase_os_mem_map_unlock(struct kbase_context *kctx);
661 * @brief Update the memory allocation counters for the current process
663 * OS specific call to updates the current memory allocation counters for the current process with
664 * the supplied delta.
666 * @param[in] kctx The kbase context
667 * @param[in] pages The desired delta to apply to the memory usage counters.
670 void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
673 * @brief Add to the memory allocation counters for the current process
675 * OS specific call to add to the current memory allocation counters for the current process by
676 * the supplied amount.
678 * @param[in] kctx The kernel base context used for the allocation.
679 * @param[in] pages The desired delta to apply to the memory usage counters.
682 static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
684 kbasep_os_process_page_usage_update(kctx, pages);
688 * @brief Subtract from the memory allocation counters for the current process
690 * OS specific call to subtract from the current memory allocation counters for the current process by
691 * the supplied amount.
693 * @param[in] kctx The kernel base context used for the allocation.
694 * @param[in] pages The desired delta to apply to the memory usage counters.
697 static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
699 kbasep_os_process_page_usage_update(kctx, 0 - pages);
703 * @brief Find the offset of the CPU mapping of a memory allocation containing
704 * a given address range
706 * Searches for a CPU mapping of any part of the region starting at @p gpu_addr
707 * that fully encloses the CPU virtual address range specified by @p uaddr and
708 * @p size. Returns a failure indication if only part of the address range lies
709 * within a CPU mapping, or the address range lies within a CPU mapping of a
712 * @param[in,out] kctx The kernel base context used for the allocation.
713 * @param[in] gpu_addr GPU address of the start of the allocated region
714 * within which to search.
715 * @param[in] uaddr Start of the CPU virtual address range.
716 * @param[in] size Size of the CPU virtual address range (in bytes).
717 * @param[out] offset The offset from the start of the allocation to the
718 * specified CPU virtual address.
720 * @return 0 if offset was obtained successfully. Error code
723 int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx,
729 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
730 void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
731 void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
734 * @brief Allocates physical pages.
736 * Allocates \a nr_pages_requested and updates the alloc object.
738 * @param[in] alloc allocation object to add pages to
739 * @param[in] nr_pages_requested number of physical pages to allocate
741 * @return 0 if all pages have been successfully allocated. Error code otherwise
743 int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
746 * @brief Free physical pages.
748 * Frees \a nr_pages and updates the alloc object.
750 * @param[in] alloc allocation object to free pages from
751 * @param[in] nr_pages_to_free number of physical pages to free
753 int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
755 static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
758 if (sizeof(dma_addr_t) > sizeof(p->private)) {
759 /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
760 * private filed stays the same. So we have to be clever and
761 * use the fact that we only store DMA addresses of whole pages,
762 * so the low bits should be zero */
763 KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
764 set_page_private(p, dma_addr >> PAGE_SHIFT);
766 set_page_private(p, dma_addr);
770 static inline dma_addr_t kbase_dma_addr(struct page *p)
772 if (sizeof(dma_addr_t) > sizeof(p->private))
773 return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
775 return (dma_addr_t)page_private(p);
778 static inline void kbase_clear_dma_addr(struct page *p)
784 * @brief Process a bus or page fault.
786 * This function will process a fault on a specific address space
788 * @param[in] kbdev The @ref kbase_device the fault happened on
789 * @param[in] kctx The @ref kbase_context for the faulting address space if
791 * @param[in] as The address space that has the fault
793 void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
794 struct kbase_context *kctx, struct kbase_as *as);
797 * @brief Process a page fault.
799 * @param[in] data work_struct passed by queue_work()
801 void page_fault_worker(struct work_struct *data);
804 * @brief Process a bus fault.
806 * @param[in] data work_struct passed by queue_work()
808 void bus_fault_worker(struct work_struct *data);
811 * @brief Flush MMU workqueues.
813 * This function will cause any outstanding page or bus faults to be processed.
814 * It should be called prior to powering off the GPU.
816 * @param[in] kbdev Device pointer
818 void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
821 * kbase_sync_single_for_device - update physical memory and give GPU ownership
822 * @kbdev: Device pointer
823 * @handle: DMA address of region
824 * @size: Size of region to sync
825 * @dir: DMA data direction
828 void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
829 size_t size, enum dma_data_direction dir);
832 * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
833 * @kbdev: Device pointer
834 * @handle: DMA address of region
835 * @size: Size of region to sync
836 * @dir: DMA data direction
839 void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
840 size_t size, enum dma_data_direction dir);
842 #endif /* _KBASE_MEM_H_ */