3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_mem_linux.c
22 * Base kernel memory APIs, Linux implementation.
25 #include <linux/compat.h>
26 #include <linux/kernel.h>
27 #include <linux/bug.h>
30 #include <linux/version.h>
31 #include <linux/dma-mapping.h>
32 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
33 #include <linux/dma-attrs.h>
35 #ifdef CONFIG_DMA_SHARED_BUFFER
36 #include <linux/dma-buf.h>
37 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
39 #include <mali_kbase.h>
40 #include <mali_kbase_mem_linux.h>
41 #include <mali_kbase_config_defaults.h>
43 static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
44 static const struct vm_operations_struct kbase_vm_ops;
46 struct kbase_va_region *kbase_mem_alloc(kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment)
51 struct kbase_va_region *reg;
53 KBASE_DEBUG_ASSERT(kctx);
54 KBASE_DEBUG_ASSERT(flags);
55 KBASE_DEBUG_ASSERT(gpu_va);
56 KBASE_DEBUG_ASSERT(va_alignment);
58 dev = kctx->kbdev->dev;
59 *va_alignment = 0; /* no alignment by default */
60 *gpu_va = 0; /* return 0 on failure */
62 gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
63 cpu_va_bits = BITS_PER_LONG;
66 dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
70 #if defined(CONFIG_64BIT)
74 /* force SAME_VA if a 64-bit client */
75 *flags |= BASE_MEM_SAME_VA;
78 if (!kbase_check_alloc_flags(*flags)) {
80 "kbase_mem_alloc called with bad flags (%llx)",
81 (unsigned long long)*flags);
85 /* Limit GPU executable allocs to GPU PC size */
86 if ((*flags & BASE_MEM_PROT_GPU_EX) &&
87 (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT)))
90 /* find out which VA zone to use */
91 if (*flags & BASE_MEM_SAME_VA)
92 zone = KBASE_REG_ZONE_SAME_VA;
93 else if (*flags & BASE_MEM_PROT_GPU_EX)
94 zone = KBASE_REG_ZONE_EXEC;
96 zone = KBASE_REG_ZONE_CUSTOM_VA;
98 reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
100 dev_err(dev, "Failed to allocate free region");
104 if (MALI_ERROR_NONE != kbase_reg_prepare_native(reg, kctx)) {
105 dev_err(dev, "Failed to prepare region");
109 kbase_update_region_flags(reg, *flags);
111 if (*flags & BASE_MEM_GROW_ON_GPF)
112 reg->extent = extent;
116 if (kbase_alloc_phy_pages(reg, va_pages, commit_pages)) {
117 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
118 (unsigned long long)commit_pages, (unsigned long long)va_pages);
122 kbase_gpu_vm_lock(kctx);
124 /* mmap needed to setup VA? */
125 if (*flags & BASE_MEM_SAME_VA) {
126 /* Bind to a cookie */
127 if (!kctx->cookies) {
128 dev_err(dev, "No cookies available for allocation!");
131 /* return a cookie */
132 *gpu_va = __ffs(kctx->cookies);
133 kctx->cookies &= ~(1UL << *gpu_va);
134 BUG_ON(kctx->pending_regions[*gpu_va]);
135 kctx->pending_regions[*gpu_va] = reg;
137 /* relocate to correct base */
138 *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
139 *gpu_va <<= PAGE_SHIFT;
141 /* See if we must align memory due to GPU PC bits vs CPU VA */
142 if ((*flags & BASE_MEM_PROT_GPU_EX) &&
143 (cpu_va_bits > gpu_pc_bits)) {
144 *va_alignment = gpu_pc_bits;
145 reg->flags |= KBASE_REG_ALIGNED;
147 } else /* we control the VA */ {
148 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0, va_pages, 1)) {
149 dev_warn(dev, "Failed to map memory on GPU");
152 /* return real GPU VA */
153 *gpu_va = reg->start_pfn << PAGE_SHIFT;
156 kbase_gpu_vm_unlock(kctx);
161 kbase_gpu_vm_unlock(kctx);
163 kbase_mem_phy_alloc_put(reg->alloc);
173 mali_error kbase_mem_query(kbase_context *kctx, mali_addr64 gpu_addr, int query, u64 * const out)
175 kbase_va_region *reg;
176 mali_error ret = MALI_ERROR_FUNCTION_FAILED;
178 KBASE_DEBUG_ASSERT(kctx);
179 KBASE_DEBUG_ASSERT(out);
181 kbase_gpu_vm_lock(kctx);
183 /* Validate the region */
184 reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
185 if (!reg || (reg->flags & KBASE_REG_FREE) )
189 case KBASE_MEM_QUERY_COMMIT_SIZE:
190 if (reg->alloc->type != KBASE_MEM_TYPE_ALIAS) {
191 *out = kbase_reg_current_backed_size(reg);
194 struct kbase_aliased *aliased;
196 aliased = reg->alloc->imported.alias.aliased;
197 for (i = 0; i < reg->alloc->imported.alias.nents; i++)
198 *out += aliased[i].length;
201 case KBASE_MEM_QUERY_VA_SIZE:
202 *out = reg->nr_pages;
204 case KBASE_MEM_QUERY_FLAGS:
207 if( KBASE_REG_GPU_WR & reg->flags )
208 *out |= BASE_MEM_PROT_GPU_WR;
209 if( KBASE_REG_GPU_RD & reg->flags )
210 *out |= BASE_MEM_PROT_GPU_RD;
211 if( !(KBASE_REG_GPU_NX & reg->flags) )
212 *out |= BASE_MEM_PROT_GPU_EX;
213 if( KBASE_REG_SHARE_BOTH & reg->flags )
214 *out |= BASE_MEM_COHERENT_SYSTEM;
215 if ( KBASE_REG_SHARE_IN & reg->flags )
216 *out |= BASE_MEM_COHERENT_LOCAL;
224 ret = MALI_ERROR_NONE;
227 kbase_gpu_vm_unlock(kctx);
231 mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, unsigned int flags, unsigned int mask)
233 kbase_va_region *reg;
234 mali_error ret = MALI_ERROR_FUNCTION_FAILED;
235 unsigned int real_flags = 0;
236 unsigned int prev_flags = 0;
238 KBASE_DEBUG_ASSERT(kctx);
241 return MALI_ERROR_FUNCTION_FAILED;
243 /* nuke other bits */
246 /* check for only supported flags */
247 if (flags & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL))
250 /* mask covers bits we don't support? */
251 if (mask & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL))
255 if( BASE_MEM_COHERENT_SYSTEM & flags )
256 real_flags |= KBASE_REG_SHARE_BOTH;
257 else if ( BASE_MEM_COHERENT_LOCAL & flags )
258 real_flags |= KBASE_REG_SHARE_IN;
260 /* now we can lock down the context, and find the region */
261 kbase_gpu_vm_lock(kctx);
263 /* Validate the region */
264 reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
265 if (!reg || (reg->flags & KBASE_REG_FREE) )
268 /* limit to imported memory */
269 if ( (reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
270 (reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
274 if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH)))
276 ret = MALI_ERROR_NONE;
280 /* save for roll back */
281 prev_flags = reg->flags;
282 reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
283 reg->flags |= real_flags;
285 /* Currently supporting only imported memory */
286 switch(reg->alloc->type)
289 case KBASE_MEM_TYPE_IMPORTED_UMP:
290 ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_phy_pages(reg), reg->alloc->nents, reg->flags);
293 #ifdef CONFIG_DMA_SHARED_BUFFER
294 case KBASE_MEM_TYPE_IMPORTED_UMM:
295 /* Future use will use the new flags, existing mapping will NOT be updated
296 * as memory should not be in use by the GPU when updating the flags.
298 ret = MALI_ERROR_NONE;
299 WARN_ON(reg->alloc->imported.umm.current_mapping_usage_count);
306 /* roll back on error, i.e. not UMP */
307 if (ret != MALI_ERROR_NONE)
308 reg->flags = prev_flags;
311 kbase_gpu_vm_unlock(kctx);
317 static struct kbase_va_region *kbase_mem_from_ump(kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
319 struct kbase_va_region *reg;
322 const ump_dd_physical_block_64 *block_array;
325 ump_alloc_flags ump_flags;
326 ump_alloc_flags cpu_flags;
327 ump_alloc_flags gpu_flags;
329 KBASE_DEBUG_ASSERT(kctx);
330 KBASE_DEBUG_ASSERT(va_pages);
331 KBASE_DEBUG_ASSERT(flags);
333 umph = ump_dd_from_secure_id(id);
334 if (UMP_DD_INVALID_MEMORY_HANDLE == umph)
337 ump_flags = ump_dd_allocation_flags_get(umph);
338 cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK;
339 gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) &
342 *va_pages = ump_dd_size_get_64(umph);
343 *va_pages >>= PAGE_SHIFT;
348 if (*flags & BASE_MEM_SAME_VA)
349 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
351 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
356 /* we've got pages to map now, and support SAME_VA */
357 *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
359 reg->alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
360 if (IS_ERR_OR_NULL(reg->alloc))
363 reg->alloc->imported.ump_handle = umph;
365 reg->flags &= ~KBASE_REG_FREE;
366 reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
367 reg->flags &= ~KBASE_REG_GROWABLE; /* UMP cannot be grown */
369 if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
370 (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
371 reg->flags |= KBASE_REG_CPU_CACHED;
372 *flags |= BASE_MEM_CACHED_CPU;
375 if (cpu_flags & UMP_PROT_DEVICE_WR) {
376 reg->flags |= KBASE_REG_CPU_WR;
377 *flags |= BASE_MEM_PROT_CPU_WR;
380 if (cpu_flags & UMP_PROT_DEVICE_RD) {
381 reg->flags |= KBASE_REG_CPU_RD;
382 *flags |= BASE_MEM_PROT_CPU_RD;
385 if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
386 (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR))
387 reg->flags |= KBASE_REG_GPU_CACHED;
389 if (gpu_flags & UMP_PROT_DEVICE_WR) {
390 reg->flags |= KBASE_REG_GPU_WR;
391 *flags |= BASE_MEM_PROT_GPU_WR;
394 if (gpu_flags & UMP_PROT_DEVICE_RD) {
395 reg->flags |= KBASE_REG_GPU_RD;
396 *flags |= BASE_MEM_PROT_GPU_RD;
399 /* ump phys block query */
400 ump_dd_phys_blocks_get_64(umph, &block_count, &block_array);
402 for (i = 0; i < block_count; i++) {
403 for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) {
404 reg->alloc->pages[page] = block_array[i].addr + (j << PAGE_SHIFT);
408 reg->alloc->nents = *va_pages;
417 ump_dd_release(umph);
422 #endif /* CONFIG_UMP */
424 #ifdef CONFIG_DMA_SHARED_BUFFER
425 static struct kbase_va_region *kbase_mem_from_umm(kbase_context *kctx, int fd, u64 *va_pages, u64 *flags)
427 struct kbase_va_region *reg;
428 struct dma_buf *dma_buf;
429 struct dma_buf_attachment *dma_attachment;
431 dma_buf = dma_buf_get(fd);
432 if (IS_ERR_OR_NULL(dma_buf))
435 dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
439 *va_pages = PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT;
444 *flags &= ~BASE_MEM_SAME_VA;
447 if (!is_compat_task()) {
448 /* 64-bit tasks must MMAP anyway, but not expose this address to clients */
449 *flags |= KBASE_MEM_NEED_MMAP;
450 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
455 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
461 reg->alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM);
462 if (IS_ERR_OR_NULL(reg->alloc))
465 /* No pages to map yet */
466 reg->alloc->nents = 0;
468 reg->flags &= ~KBASE_REG_FREE;
469 reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
470 reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */
471 reg->flags |= KBASE_REG_GPU_CACHED;
473 if (*flags & BASE_MEM_PROT_CPU_WR)
474 reg->flags |= KBASE_REG_CPU_WR;
476 if (*flags & BASE_MEM_PROT_CPU_RD)
477 reg->flags |= KBASE_REG_CPU_RD;
479 if (*flags & BASE_MEM_PROT_GPU_WR)
480 reg->flags |= KBASE_REG_GPU_WR;
482 if (*flags & BASE_MEM_PROT_GPU_RD)
483 reg->flags |= KBASE_REG_GPU_RD;
485 /* no read or write permission given on import, only on run do we give the right permissions */
487 reg->alloc->type = BASE_TMEM_IMPORT_TYPE_UMM;
488 reg->alloc->imported.umm.sgt = NULL;
489 reg->alloc->imported.umm.dma_buf = dma_buf;
490 reg->alloc->imported.umm.dma_attachment = dma_attachment;
491 reg->alloc->imported.umm.current_mapping_usage_count = 0;
500 dma_buf_detach(dma_buf, dma_attachment);
502 dma_buf_put(dma_buf);
506 #endif /* CONFIG_DMA_SHARED_BUFFER */
508 u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
509 u64 nents, struct base_mem_aliasing_info *ai,
512 kbase_va_region *reg;
516 KBASE_DEBUG_ASSERT(kctx);
517 KBASE_DEBUG_ASSERT(flags);
518 KBASE_DEBUG_ASSERT(ai);
519 KBASE_DEBUG_ASSERT(num_pages);
521 /* mask to only allowed flags */
522 *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
523 BASE_MEM_HINT_GPU_RD | BASE_MEM_HINT_GPU_WR |
524 BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL);
526 if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR) )) {
527 dev_warn(kctx->kbdev->dev,
528 "kbase_mem_alias called with bad flags (%llx)",
529 (unsigned long long)*flags);
539 /* calculate the number of pages this alias will cover */
540 *num_pages = nents * stride;
543 if (!is_compat_task()) {
544 /* 64-bit tasks must MMAP anyway, but not expose this address to
546 *flags |= KBASE_MEM_NEED_MMAP;
547 reg = kbase_alloc_free_region(kctx, 0, *num_pages,
548 KBASE_REG_ZONE_SAME_VA);
553 reg = kbase_alloc_free_region(kctx, 0, *num_pages,
554 KBASE_REG_ZONE_CUSTOM_VA);
560 /* zero-sized page array, as we don't need one/can support one */
561 reg->alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS);
562 if (IS_ERR_OR_NULL(reg->alloc))
565 kbase_update_region_flags(reg, *flags);
567 reg->alloc->imported.alias.nents = nents;
568 reg->alloc->imported.alias.stride = stride;
569 reg->alloc->imported.alias.aliased = vzalloc(sizeof(*reg->alloc->imported.alias.aliased) * nents);
570 if (!reg->alloc->imported.alias.aliased)
571 goto no_aliased_array;
573 kbase_gpu_vm_lock(kctx);
575 /* validate and add src handles */
576 for (i = 0; i < nents; i++) {
577 if (ai[i].handle < BASE_MEM_FIRST_FREE_ADDRESS) {
578 if (ai[i].handle != BASE_MEM_WRITE_ALLOC_PAGES_HANDLE)
579 goto bad_handle; /* unsupported magic handle */
581 goto bad_handle; /* must be > 0 */
582 if (ai[i].length > stride)
583 goto bad_handle; /* can't be larger than the
585 reg->alloc->imported.alias.aliased[i].length = ai[i].length;
587 struct kbase_va_region *aliasing_reg;
588 struct kbase_mem_phy_alloc *alloc;
589 aliasing_reg = kbase_region_tracker_find_region_base_address(kctx, (ai[i].handle >> PAGE_SHIFT) << PAGE_SHIFT);
591 /* validate found region */
593 goto bad_handle; /* Not found */
594 if (aliasing_reg->flags & KBASE_REG_FREE)
595 goto bad_handle; /* Free region */
596 if (!aliasing_reg->alloc)
597 goto bad_handle; /* No alloc */
598 if (aliasing_reg->alloc->type != KBASE_MEM_TYPE_NATIVE)
599 goto bad_handle; /* Not a native alloc */
601 /* check size against stride */
603 goto bad_handle; /* must be > 0 */
604 if (ai[i].length > stride)
605 goto bad_handle; /* can't be larger than the
608 alloc = aliasing_reg->alloc;
610 /* check against the alloc's size */
611 if (ai[i].offset > alloc->nents)
612 goto bad_handle; /* beyond end */
613 if (ai[i].offset + ai[i].length > alloc->nents)
614 goto bad_handle; /* beyond end */
616 reg->alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
617 reg->alloc->imported.alias.aliased[i].length = ai[i].length;
618 reg->alloc->imported.alias.aliased[i].offset = ai[i].offset;
623 if (!is_compat_task()) {
624 /* Bind to a cookie */
625 if (!kctx->cookies) {
626 dev_err(kctx->kbdev->dev, "No cookies "
627 "available for allocation!");
630 /* return a cookie */
631 gpu_va = __ffs(kctx->cookies);
632 kctx->cookies &= ~(1UL << gpu_va);
633 BUG_ON(kctx->pending_regions[gpu_va]);
634 kctx->pending_regions[gpu_va] = reg;
636 /* relocate to correct base */
637 gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
638 gpu_va <<= PAGE_SHIFT;
639 } else /* we control the VA */ {
643 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0,
645 dev_warn(kctx->kbdev->dev,
646 "Failed to map memory on GPU");
649 /* return real GPU VA */
650 gpu_va = reg->start_pfn << PAGE_SHIFT;
653 reg->flags &= ~KBASE_REG_FREE;
654 reg->flags &= ~KBASE_REG_GROWABLE;
656 kbase_gpu_vm_unlock(kctx);
665 kbase_gpu_vm_unlock(kctx);
667 kbase_mem_phy_alloc_put(reg->alloc);
677 int kbase_mem_import(kbase_context *kctx, base_mem_import_type type, int handle, mali_addr64 * gpu_va, u64 * va_pages, u64 * flags)
679 kbase_va_region * reg;
681 KBASE_DEBUG_ASSERT(kctx);
682 KBASE_DEBUG_ASSERT(gpu_va);
683 KBASE_DEBUG_ASSERT(va_pages);
684 KBASE_DEBUG_ASSERT(flags);
687 if (!is_compat_task())
688 *flags |= BASE_MEM_SAME_VA;
693 case BASE_MEM_IMPORT_TYPE_UMP:
694 reg = kbase_mem_from_ump(kctx, (ump_secure_id)handle, va_pages, flags);
696 #endif /* CONFIG_UMP */
697 #ifdef CONFIG_DMA_SHARED_BUFFER
698 case BASE_MEM_IMPORT_TYPE_UMM:
699 reg = kbase_mem_from_umm(kctx, handle, va_pages, flags);
701 #endif /* CONFIG_DMA_SHARED_BUFFER */
710 kbase_gpu_vm_lock(kctx);
712 /* mmap needed to setup VA? */
713 if (*flags & (BASE_MEM_SAME_VA | KBASE_MEM_NEED_MMAP)) {
714 /* Bind to a cookie */
717 /* return a cookie */
718 *gpu_va = __ffs(kctx->cookies);
719 kctx->cookies &= ~(1UL << *gpu_va);
720 BUG_ON(kctx->pending_regions[*gpu_va]);
721 kctx->pending_regions[*gpu_va] = reg;
723 /* relocate to correct base */
724 *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
725 *gpu_va <<= PAGE_SHIFT;
727 } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES) {
728 /* we control the VA, mmap now to the GPU */
729 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1))
731 /* return real GPU VA */
732 *gpu_va = reg->start_pfn << PAGE_SHIFT;
734 /* we control the VA, but nothing to mmap yet */
735 if (MALI_ERROR_NONE != kbase_add_va_region(kctx, reg, 0, *va_pages, 1))
737 /* return real GPU VA */
738 *gpu_va = reg->start_pfn << PAGE_SHIFT;
741 kbase_gpu_vm_unlock(kctx);
747 kbase_gpu_vm_unlock(kctx);
748 kbase_mem_phy_alloc_put(reg->alloc);
759 static int zap_range_nolock(struct mm_struct *mm,
760 const struct vm_operations_struct *vm_ops,
761 unsigned long start, unsigned long end)
763 struct vm_area_struct *vma;
764 int err = -EINVAL; /* in case end < start */
766 while (start < end) {
767 unsigned long local_end;
769 vma = find_vma_intersection(mm, start, end);
774 if (vma->vm_ops != vm_ops)
777 local_end = vma->vm_end;
782 err = zap_vma_ptes(vma, start, local_end - start);
787 /* go to next vma, if any */
794 int kbase_mem_commit(kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages, base_backing_threshold_status * failure_reason)
799 kbase_va_region *reg;
800 phys_addr_t *phy_pages;
802 KBASE_DEBUG_ASSERT(kctx);
803 KBASE_DEBUG_ASSERT(failure_reason);
804 KBASE_DEBUG_ASSERT(gpu_addr != 0);
806 down_read(¤t->mm->mmap_sem);
807 kbase_gpu_vm_lock(kctx);
809 /* Validate the region */
810 reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
811 if (!reg || (reg->flags & KBASE_REG_FREE)) {
812 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
816 KBASE_DEBUG_ASSERT(reg->alloc);
818 if (reg->alloc->type != KBASE_MEM_TYPE_NATIVE) {
819 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
823 if (0 == (reg->flags & KBASE_REG_GROWABLE)) {
824 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
828 if (new_pages > reg->nr_pages) {
829 /* Would overflow the VA region */
830 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
834 /* can't be mapped more than once on the GPU */
835 if (atomic_read(®->alloc->gpu_mappings) > 1) {
836 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
840 if (new_pages == reg->alloc->nents) {
846 phy_pages = kbase_get_phy_pages(reg);
847 old_pages = kbase_reg_current_backed_size(reg);
849 if (new_pages > old_pages) {
852 delta = new_pages - old_pages;
853 /* Allocate some more pages */
854 if (MALI_ERROR_NONE != kbase_alloc_phy_pages_helper(reg->alloc, delta)) {
855 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
858 err = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages, phy_pages + old_pages, delta, reg->flags);
859 if (MALI_ERROR_NONE != err) {
860 kbase_free_phy_pages_helper(reg->alloc, delta);
861 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
866 struct kbase_cpu_mapping * mapping;
869 /* first, unmap from any mappings affected */
870 list_for_each_entry(mapping, ®->alloc->mappings, mappings_list) {
871 unsigned long mapping_size = (mapping->vm_end - mapping->vm_start) >> PAGE_SHIFT;
873 /* is this mapping affected ?*/
874 if ((mapping->page_off + mapping_size) > new_pages) {
875 unsigned long first_bad = 0;
878 if (new_pages > mapping->page_off)
879 first_bad = new_pages - mapping->page_off;
881 zap_res = zap_range_nolock(current->mm,
884 (first_bad << PAGE_SHIFT),
887 "Failed to zap VA range (0x%lx -0x%lx);\n",
889 (first_bad << PAGE_SHIFT),
895 /* Free some pages */
896 delta = old_pages - new_pages;
897 err = kbase_mmu_teardown_pages(kctx, reg->start_pfn + new_pages, delta);
898 if (MALI_ERROR_NONE != err) {
899 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
903 if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) {
904 /* Wait for GPU to flush write buffer before freeing physical pages */
905 kbase_wait_write_flush(kctx);
908 kbase_free_phy_pages_helper(reg->alloc, delta);
914 kbase_gpu_vm_unlock(kctx);
915 up_read(¤t->mm->mmap_sem);
921 STATIC void kbase_cpu_vm_open(struct vm_area_struct *vma)
923 struct kbase_cpu_mapping *map = vma->vm_private_data;
924 KBASE_DEBUG_ASSERT(map);
925 KBASE_DEBUG_ASSERT(map->count > 0);
926 /* non-atomic as we're under Linux' mm lock */
930 STATIC void kbase_cpu_vm_close(struct vm_area_struct *vma)
932 struct kbase_cpu_mapping *map = vma->vm_private_data;
933 KBASE_DEBUG_ASSERT(map);
934 KBASE_DEBUG_ASSERT(map->count > 0);
936 /* non-atomic as we're under Linux' mm lock */
940 KBASE_DEBUG_ASSERT(map->kctx);
941 KBASE_DEBUG_ASSERT(map->alloc);
943 kbase_gpu_vm_lock(map->kctx);
946 KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA);
947 kbase_mem_free_region(map->kctx, map->region);
950 list_del(&map->mappings_list);
952 kbase_gpu_vm_unlock(map->kctx);
954 kbase_mem_phy_alloc_put(map->alloc);
958 KBASE_EXPORT_TEST_API(kbase_cpu_vm_close)
961 STATIC int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
963 struct kbase_cpu_mapping *map = vma->vm_private_data;
967 KBASE_DEBUG_ASSERT(map);
968 KBASE_DEBUG_ASSERT(map->count > 0);
969 KBASE_DEBUG_ASSERT(map->kctx);
970 KBASE_DEBUG_ASSERT(map->alloc);
972 /* we don't use vmf->pgoff as it's affected by our mmap with offset being a GPU VA or a cookie */
973 rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start) >> PAGE_SHIFT;
975 kbase_gpu_vm_lock(map->kctx);
976 if (map->page_off + rel_pgoff >= map->alloc->nents)
977 goto locked_bad_fault;
979 /* insert all valid pages from the fault location */
981 i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
982 map->alloc->nents - map->page_off); i++) {
983 int ret = vm_insert_pfn(vma, map->vm_start + (i << PAGE_SHIFT),
984 PFN_DOWN(map->alloc->pages[map->page_off + i]));
985 if (ret < 0 && ret != -EBUSY)
986 goto locked_bad_fault;
989 kbase_gpu_vm_unlock(map->kctx);
990 /* we resolved it, nothing for VM to do */
991 return VM_FAULT_NOPAGE;
994 kbase_gpu_vm_unlock(map->kctx);
995 send_sig(SIGSEGV, current, 1);
996 return VM_FAULT_NOPAGE;
999 static const struct vm_operations_struct kbase_vm_ops = {
1000 .open = kbase_cpu_vm_open,
1001 .close = kbase_cpu_vm_close,
1002 .fault = kbase_cpu_vm_fault
1005 static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, int free_on_close)
1007 struct kbase_cpu_mapping *map;
1008 u64 start_off = vma->vm_pgoff - reg->start_pfn;
1009 phys_addr_t *page_array;
1013 map = kzalloc(sizeof(*map), GFP_KERNEL);
1022 * VM_DONTCOPY - don't make this mapping available in fork'ed processes
1023 * VM_DONTEXPAND - disable mremap on this region
1024 * VM_IO - disables paging
1025 * VM_DONTDUMP - Don't include in core dumps (3.7 only)
1026 * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
1027 * This is needed to support using the dedicated and
1028 * the OS based memory backends together.
1031 * This will need updating to propagate coherency flags
1035 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
1036 vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
1038 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
1040 vma->vm_ops = &kbase_vm_ops;
1041 vma->vm_private_data = map;
1043 page_array = kbase_get_phy_pages(reg);
1045 if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
1046 (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
1047 /* We can't map vmalloc'd memory uncached.
1048 * Other memory will have been returned from
1049 * kbase_mem_allocator_alloc which would be
1050 * suitable for mapping uncached.
1053 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1057 vma->vm_flags |= VM_PFNMAP;
1058 for (i = 0; i < nr_pages; i++) {
1059 err = vm_insert_pfn(vma, vma->vm_start + (i << PAGE_SHIFT), page_array[i + start_off] >> PAGE_SHIFT);
1064 /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
1065 vma->vm_flags |= VM_MIXEDMAP;
1066 /* vmalloc remaping is easy... */
1067 err = remap_vmalloc_range(vma, kaddr, 0);
1077 map->page_off = start_off;
1078 map->region = free_on_close ? reg : NULL;
1079 map->kctx = reg->kctx;
1080 map->vm_start = vma->vm_start;
1081 map->vm_end = vma->vm_end;
1082 map->alloc = kbase_mem_phy_alloc_get(reg->alloc);
1083 map->count = 1; /* start with one ref */
1085 if (reg->flags & KBASE_REG_CPU_CACHED)
1086 map->alloc->accessed_cached = 1;
1088 list_add(&map->mappings_list, &map->alloc->mappings);
1094 static int kbase_trace_buffer_mmap(kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
1096 struct kbase_va_region *new_reg;
1103 KBASE_LOG(1, kctx->kbdev->dev, "in %s\n", __func__);
1104 size = (vma->vm_end - vma->vm_start);
1105 nr_pages = size >> PAGE_SHIFT;
1107 if (!kctx->jctx.tb) {
1108 KBASE_DEBUG_ASSERT(0 != size);
1109 tb = vmalloc_user(size);
1116 kbase_device_trace_buffer_install(kctx, tb, size);
1122 *kaddr = kctx->jctx.tb;
1124 new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
1131 new_reg->alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB);
1132 if (IS_ERR_OR_NULL(new_reg->alloc)) {
1134 new_reg->alloc = NULL;
1139 new_reg->alloc->imported.kctx = kctx;
1140 new_reg->flags &= ~KBASE_REG_FREE;
1141 new_reg->flags |= KBASE_REG_CPU_CACHED;
1143 /* alloc now owns the tb */
1146 if (MALI_ERROR_NONE != kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1)) {
1149 goto out_no_va_region;
1154 /* map read only, noexec */
1155 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
1156 /* the rest of the flags is added by the cpu_mmap handler */
1158 KBASE_LOG(1, kctx->kbdev->dev, "%s done\n", __func__);
1163 kbase_free_alloced_region(new_reg);
1166 kbase_device_trace_buffer_uninstall(kctx);
1174 static int kbase_mmu_dump_mmap(kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
1176 struct kbase_va_region *new_reg;
1182 KBASE_LOG(1, kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
1183 size = (vma->vm_end - vma->vm_start);
1184 nr_pages = size >> PAGE_SHIFT;
1186 kaddr = kbase_mmu_dump(kctx, nr_pages);
1193 new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
1200 new_reg->alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW);
1201 if (IS_ERR_OR_NULL(new_reg->alloc)) {
1203 new_reg->alloc = NULL;
1208 new_reg->flags &= ~KBASE_REG_FREE;
1209 new_reg->flags |= KBASE_REG_CPU_CACHED;
1210 if (MALI_ERROR_NONE != kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1)) {
1219 KBASE_LOG(1, kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
1224 kbase_free_alloced_region(new_reg);
1230 void kbase_os_mem_map_lock(kbase_context *kctx)
1232 struct mm_struct *mm = current->mm;
1234 down_read(&mm->mmap_sem);
1237 void kbase_os_mem_map_unlock(kbase_context *kctx)
1239 struct mm_struct *mm = current->mm;
1241 up_read(&mm->mmap_sem);
1244 int kbase_mmap(struct file *file, struct vm_area_struct *vma)
1246 kbase_context *kctx = file->private_data;
1247 struct kbase_va_region *reg;
1251 int free_on_close = 0;
1252 struct device *dev = kctx->kbdev->dev;
1254 KBASE_LOG(1, dev, "kbase_mmap\n");
1255 nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1257 /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
1258 vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4);
1260 if (0 == nr_pages) {
1265 if (!(vma->vm_flags & VM_SHARED)) {
1270 kbase_gpu_vm_lock(kctx);
1272 if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
1273 /* The non-mapped tracking helper page */
1274 err = kbase_tracking_page_setup(kctx, vma);
1278 /* if not the MTP, verify that the MTP has been mapped */
1280 /* catches both when the special page isn't present or
1281 * when we've forked */
1282 if (rcu_dereference(kctx->process_mm) != current->mm) {
1289 switch (vma->vm_pgoff) {
1290 case PFN_DOWN(BASE_MEM_INVALID_HANDLE):
1291 case PFN_DOWN(BASE_MEM_WRITE_ALLOC_PAGES_HANDLE):
1292 /* Illegal handle for direct map */
1295 case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE):
1296 err = kbase_trace_buffer_mmap(kctx, vma, ®, &kaddr);
1299 KBASE_LOG(1, dev, "kbase_trace_buffer_mmap ok\n");
1300 /* free the region on munmap */
1303 case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
1305 err = kbase_mmu_dump_mmap(kctx, vma, ®, &kaddr);
1308 /* free the region on munmap */
1311 case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
1312 PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
1313 /* SAME_VA stuff, fetch the right region */
1315 int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
1316 gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
1317 reg = kctx->pending_regions[cookie];
1319 size_t aligned_offset = 0;
1321 if (reg->flags & KBASE_REG_ALIGNED) {
1322 /* nr_pages must be able to hold alignment pages
1323 * plus actual pages */
1324 if (nr_pages != ((1UL << gpu_pc_bits >>
1327 /* incorrect mmap size */
1328 /* leave the cookie for a potential
1329 * later mapping, or to be reclaimed
1330 * later when the context is freed */
1335 aligned_offset = (vma->vm_start +
1336 (1UL << gpu_pc_bits) - 1) &
1337 ~((1UL << gpu_pc_bits) - 1);
1338 aligned_offset -= vma->vm_start;
1339 } else if (reg->nr_pages != nr_pages) {
1340 /* incorrect mmap size */
1341 /* leave the cookie for a potential later
1342 * mapping, or to be reclaimed later when the
1343 * context is freed */
1348 if ((vma->vm_flags & VM_READ &&
1349 !(reg->flags & KBASE_REG_CPU_RD)) ||
1350 (vma->vm_flags & VM_WRITE &&
1351 !(reg->flags & KBASE_REG_CPU_WR))) {
1352 /* VM flags inconsistent with region flags */
1354 dev_err(dev, "%s:%d inconsistent VM flags\n",
1355 __FILE__, __LINE__);
1359 /* adjust down nr_pages to what we have physically */
1360 nr_pages = kbase_reg_current_backed_size(reg);
1362 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg,
1367 dev_err(dev, "%s:%d\n", __FILE__, __LINE__);
1368 /* Unable to map in GPU space. */
1374 /* no need for the cookie anymore */
1375 kctx->pending_regions[cookie] = NULL;
1376 kctx->cookies |= (1UL << cookie);
1379 * Overwrite the offset with the
1380 * region start_pfn, so we effectively
1381 * map from offset 0 in the region.
1383 vma->vm_pgoff = reg->start_pfn;
1385 /* free the region on munmap */
1394 reg = kbase_region_tracker_find_region_enclosing_address(kctx, (u64)vma->vm_pgoff << PAGE_SHIFT);
1396 if (reg && !(reg->flags & KBASE_REG_FREE)) {
1397 /* will this mapping overflow the size of the region? */
1398 if (nr_pages > (reg->nr_pages - (vma->vm_pgoff - reg->start_pfn)))
1401 if ((vma->vm_flags & VM_READ &&
1402 !(reg->flags & KBASE_REG_CPU_RD)) ||
1403 (vma->vm_flags & VM_WRITE &&
1404 !(reg->flags & KBASE_REG_CPU_WR))) {
1405 /* VM flags inconsistent with region flags */
1407 printk(KERN_ERR "%s:%d inconsistent VM flags\n",
1408 __FILE__, __LINE__);
1412 #ifdef CONFIG_DMA_SHARED_BUFFER
1413 if (reg->alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM)
1415 #endif /* CONFIG_DMA_SHARED_BUFFER */
1417 /* limit what we map to the amount currently backed */
1418 if (reg->alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) {
1419 if ((vma->vm_pgoff - reg->start_pfn) >= reg->alloc->nents)
1422 nr_pages = reg->alloc->nents - (vma->vm_pgoff - reg->start_pfn);
1434 err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, free_on_close);
1436 if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
1437 /* MMU dump - userspace should now have a reference on
1438 * the pages, so we can now free the kernel mapping */
1443 #ifdef CONFIG_DMA_SHARED_BUFFER
1445 err = dma_buf_mmap(reg->alloc->imported.umm.dma_buf, vma, vma->vm_pgoff - reg->start_pfn);
1446 #endif /* CONFIG_DMA_SHARED_BUFFER */
1448 kbase_gpu_vm_unlock(kctx);
1451 dev_err(dev, "mmap failed %d\n", err);
1456 KBASE_EXPORT_TEST_API(kbase_mmap)
1458 void kbasep_os_process_page_usage_update( kbase_context *kctx, int pages )
1460 struct mm_struct *mm;
1463 mm = rcu_dereference(kctx->process_mm);
1466 atomic_add(pages, &kctx->nonmapped_pages);
1467 #ifdef SPLIT_RSS_COUNTING
1468 add_mm_counter(mm, MM_FILEPAGES, pages);
1470 spin_lock(&mm->page_table_lock);
1471 add_mm_counter(mm, MM_FILEPAGES, pages);
1472 spin_unlock(&mm->page_table_lock);
1478 static void kbasep_os_process_page_usage_drain(kbase_context * kctx)
1481 struct mm_struct * mm;
1483 spin_lock(&kctx->mm_update_lock);
1484 mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
1487 spin_unlock(&kctx->mm_update_lock);
1491 rcu_assign_pointer(kctx->process_mm, NULL);
1492 spin_unlock(&kctx->mm_update_lock);
1495 pages = atomic_xchg(&kctx->nonmapped_pages, 0);
1496 #ifdef SPLIT_RSS_COUNTING
1497 add_mm_counter(mm, MM_FILEPAGES, -pages);
1499 spin_lock(&mm->page_table_lock);
1500 add_mm_counter(mm, MM_FILEPAGES, -pages);
1501 spin_unlock(&mm->page_table_lock);
1505 static void kbase_special_vm_close(struct vm_area_struct *vma)
1507 kbase_context * kctx;
1508 kctx = vma->vm_private_data;
1509 kbasep_os_process_page_usage_drain(kctx);
1512 static const struct vm_operations_struct kbase_vm_special_ops = {
1513 .close = kbase_special_vm_close,
1516 static int kbase_tracking_page_setup(struct kbase_context * kctx, struct vm_area_struct * vma)
1518 /* check that this is the only tracking page */
1519 spin_lock(&kctx->mm_update_lock);
1520 if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock)))
1522 spin_unlock(&kctx->mm_update_lock);
1526 rcu_assign_pointer(kctx->process_mm, current->mm);
1528 spin_unlock(&kctx->mm_update_lock);
1530 /* no real access */
1531 vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
1532 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
1533 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
1535 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
1537 vma->vm_ops = &kbase_vm_special_ops;
1538 vma->vm_private_data = kctx;
1542 void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handle)
1548 struct kbase_va_region *reg;
1549 phys_addr_t *page_array;
1550 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1551 DEFINE_DMA_ATTRS(attrs);
1554 u32 pages = ((size - 1) >> PAGE_SHIFT) + 1;
1555 u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR |
1556 BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR;
1558 KBASE_DEBUG_ASSERT(kctx != NULL);
1559 KBASE_DEBUG_ASSERT(0 != size);
1560 KBASE_DEBUG_ASSERT(0 != pages);
1565 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1566 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
1567 va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
1569 va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL);
1574 memset(va, 0x0, size);
1576 /* Store the state so we can free it later. */
1577 handle->cpu_va = va;
1578 handle->dma_pa = dma_pa;
1579 handle->size = size;
1582 reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA);
1586 reg->flags &= ~KBASE_REG_FREE;
1587 kbase_update_region_flags(reg, flags);
1589 reg->alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW);
1590 if (IS_ERR_OR_NULL(reg->alloc))
1593 page_array = kbase_get_phy_pages(reg);
1595 for (i = 0; i < pages; i++) {
1596 page_array[i] = dma_pa + (i << PAGE_SHIFT);
1599 reg->alloc->nents = pages;
1601 kbase_gpu_vm_lock(kctx);
1602 res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1);
1603 kbase_gpu_vm_unlock(kctx);
1610 kbase_mem_phy_alloc_put(reg->alloc);
1614 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1615 dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
1617 dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
1622 KBASE_EXPORT_SYMBOL(kbase_va_alloc);
1624 void kbase_va_free(kbase_context *kctx, kbase_hwc_dma_mapping *handle)
1626 struct kbase_va_region *reg;
1628 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1629 DEFINE_DMA_ATTRS(attrs);
1632 KBASE_DEBUG_ASSERT(kctx != NULL);
1633 KBASE_DEBUG_ASSERT(handle->cpu_va != NULL);
1635 kbase_gpu_vm_lock(kctx);
1636 reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va);
1637 KBASE_DEBUG_ASSERT(reg);
1638 err = kbase_gpu_munmap(kctx, reg);
1639 kbase_gpu_vm_unlock(kctx);
1640 KBASE_DEBUG_ASSERT(err == MALI_ERROR_NONE);
1642 kbase_mem_phy_alloc_put(reg->alloc);
1645 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1646 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
1647 dma_free_attrs(kctx->kbdev->dev, handle->size,
1648 handle->cpu_va, handle->dma_pa, &attrs);
1650 dma_free_writecombine(kctx->kbdev->dev, handle->size,
1651 handle->cpu_va, handle->dma_pa);
1654 KBASE_EXPORT_SYMBOL(kbase_va_free);