Merge tag 'v4.4-rc3'
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_mem_linux.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /**
21  * @file mali_kbase_mem_linux.c
22  * Base kernel memory APIs, Linux implementation.
23  */
24
25 #include <linux/compat.h>
26 #include <linux/kernel.h>
27 #include <linux/bug.h>
28 #include <linux/mm.h>
29 #include <linux/fs.h>
30 #include <linux/version.h>
31 #include <linux/dma-mapping.h>
32 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
33         #include <linux/dma-attrs.h>
34 #endif
35 #ifdef CONFIG_DMA_SHARED_BUFFER
36 #include <linux/dma-buf.h>
37 #endif                          /* defined(CONFIG_DMA_SHARED_BUFFER) */
38
39 #include <mali_kbase.h>
40 #include <mali_kbase_mem_linux.h>
41 #include <mali_kbase_config_defaults.h>
42 #include <mali_kbase_hwaccess_time.h>
43
44 static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
45 static const struct vm_operations_struct kbase_vm_ops;
46
47 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment)
48 {
49         int zone;
50         int gpu_pc_bits;
51         int cpu_va_bits;
52         struct kbase_va_region *reg;
53         struct device *dev;
54
55         KBASE_DEBUG_ASSERT(kctx);
56         KBASE_DEBUG_ASSERT(flags);
57         KBASE_DEBUG_ASSERT(gpu_va);
58         KBASE_DEBUG_ASSERT(va_alignment);
59
60         dev = kctx->kbdev->dev;
61         *va_alignment = 0; /* no alignment by default */
62         *gpu_va = 0; /* return 0 on failure */
63
64         gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
65         cpu_va_bits = BITS_PER_LONG;
66
67         if (0 == va_pages) {
68                 dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
69                 goto bad_size;
70         }
71
72         if (va_pages > (U64_MAX / PAGE_SIZE))
73                 /* 64-bit address range is the max */
74                 goto bad_size;
75
76 #if defined(CONFIG_64BIT)
77         if (kctx->is_compat)
78                 cpu_va_bits = 32;
79         else
80                 /* force SAME_VA if a 64-bit client */
81                 *flags |= BASE_MEM_SAME_VA;
82 #endif
83
84         if (!kbase_check_alloc_flags(*flags)) {
85                 dev_warn(dev,
86                                 "kbase_mem_alloc called with bad flags (%llx)",
87                                 (unsigned long long)*flags);
88                 goto bad_flags;
89         }
90
91         if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
92                         kctx->kbdev->system_coherency != COHERENCY_ACE) {
93                 dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable");
94                 goto bad_flags;
95         }
96         if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
97                         kctx->kbdev->system_coherency != COHERENCY_ACE) {
98                 /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
99                 *flags &= ~BASE_MEM_COHERENT_SYSTEM;
100         }
101
102         /* Limit GPU executable allocs to GPU PC size */
103         if ((*flags & BASE_MEM_PROT_GPU_EX) &&
104             (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT)))
105                 goto bad_ex_size;
106
107         /* find out which VA zone to use */
108         if (*flags & BASE_MEM_SAME_VA)
109                 zone = KBASE_REG_ZONE_SAME_VA;
110         else if (*flags & BASE_MEM_PROT_GPU_EX)
111                 zone = KBASE_REG_ZONE_EXEC;
112         else
113                 zone = KBASE_REG_ZONE_CUSTOM_VA;
114
115         reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
116         if (!reg) {
117                 dev_err(dev, "Failed to allocate free region");
118                 goto no_region;
119         }
120
121         kbase_update_region_flags(reg, *flags);
122
123         if (kbase_reg_prepare_native(reg, kctx) != 0) {
124                 dev_err(dev, "Failed to prepare region");
125                 goto prepare_failed;
126         }
127
128         if (*flags & BASE_MEM_GROW_ON_GPF)
129                 reg->extent = extent;
130         else
131                 reg->extent = 0;
132
133         if (kbase_alloc_phy_pages(reg, va_pages, commit_pages)) {
134                 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
135                                 (unsigned long long)commit_pages,
136                                 (unsigned long long)va_pages);
137                 goto no_mem;
138         }
139
140         kbase_gpu_vm_lock(kctx);
141
142         /* mmap needed to setup VA? */
143         if (*flags & BASE_MEM_SAME_VA) {
144                 /* Bind to a cookie */
145                 if (!kctx->cookies) {
146                         dev_err(dev, "No cookies available for allocation!");
147                         goto no_cookie;
148                 }
149                 /* return a cookie */
150                 *gpu_va = __ffs(kctx->cookies);
151                 kctx->cookies &= ~(1UL << *gpu_va);
152                 BUG_ON(kctx->pending_regions[*gpu_va]);
153                 kctx->pending_regions[*gpu_va] = reg;
154
155                 /* relocate to correct base */
156                 *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
157                 *gpu_va <<= PAGE_SHIFT;
158
159                 /* See if we must align memory due to GPU PC bits vs CPU VA */
160                 if ((*flags & BASE_MEM_PROT_GPU_EX) &&
161                     (cpu_va_bits > gpu_pc_bits)) {
162                         *va_alignment = gpu_pc_bits;
163                         reg->flags |= KBASE_REG_ALIGNED;
164                 }
165         } else /* we control the VA */ {
166                 if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
167                         dev_warn(dev, "Failed to map memory on GPU");
168                         goto no_mmap;
169                 }
170                 /* return real GPU VA */
171                 *gpu_va = reg->start_pfn << PAGE_SHIFT;
172         }
173
174         kbase_gpu_vm_unlock(kctx);
175         return reg;
176
177 no_mmap:
178 no_cookie:
179         kbase_gpu_vm_unlock(kctx);
180 no_mem:
181         kbase_mem_phy_alloc_put(reg->cpu_alloc);
182         kbase_mem_phy_alloc_put(reg->gpu_alloc);
183 prepare_failed:
184         kfree(reg);
185 no_region:
186 bad_ex_size:
187 bad_flags:
188 bad_size:
189         return NULL;
190 }
191
192 int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out)
193 {
194         struct kbase_va_region *reg;
195         int ret = -EINVAL;
196
197         KBASE_DEBUG_ASSERT(kctx);
198         KBASE_DEBUG_ASSERT(out);
199
200         kbase_gpu_vm_lock(kctx);
201
202         /* Validate the region */
203         reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
204         if (!reg || (reg->flags & KBASE_REG_FREE))
205                 goto out_unlock;
206
207         switch (query) {
208         case KBASE_MEM_QUERY_COMMIT_SIZE:
209                 if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) {
210                         *out = kbase_reg_current_backed_size(reg);
211                 } else {
212                         size_t i;
213                         struct kbase_aliased *aliased;
214                         *out = 0;
215                         aliased = reg->cpu_alloc->imported.alias.aliased;
216                         for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++)
217                                 *out += aliased[i].length;
218                 }
219                 break;
220         case KBASE_MEM_QUERY_VA_SIZE:
221                 *out = reg->nr_pages;
222                 break;
223         case KBASE_MEM_QUERY_FLAGS:
224         {
225                 *out = 0;
226                 if (KBASE_REG_CPU_WR & reg->flags)
227                         *out |= BASE_MEM_PROT_CPU_WR;
228                 if (KBASE_REG_CPU_RD & reg->flags)
229                         *out |= BASE_MEM_PROT_CPU_RD;
230                 if (KBASE_REG_CPU_CACHED & reg->flags)
231                         *out |= BASE_MEM_CACHED_CPU;
232                 if (KBASE_REG_GPU_WR & reg->flags)
233                         *out |= BASE_MEM_PROT_GPU_WR;
234                 if (KBASE_REG_GPU_RD & reg->flags)
235                         *out |= BASE_MEM_PROT_GPU_RD;
236                 if (!(KBASE_REG_GPU_NX & reg->flags))
237                         *out |= BASE_MEM_PROT_GPU_EX;
238                 if (KBASE_REG_SHARE_BOTH & reg->flags)
239                         *out |= BASE_MEM_COHERENT_SYSTEM;
240                 if (KBASE_REG_SHARE_IN & reg->flags)
241                         *out |= BASE_MEM_COHERENT_LOCAL;
242                 break;
243         }
244         default:
245                 *out = 0;
246                 goto out_unlock;
247         }
248
249         ret = 0;
250
251 out_unlock:
252         kbase_gpu_vm_unlock(kctx);
253         return ret;
254 }
255
256 int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask)
257 {
258         struct kbase_va_region *reg;
259         int ret = -EINVAL;
260         unsigned int real_flags = 0;
261         unsigned int prev_flags = 0;
262
263         KBASE_DEBUG_ASSERT(kctx);
264
265         if (!gpu_addr)
266                 return -EINVAL;
267
268         /* nuke other bits */
269         flags &= mask;
270
271         /* check for only supported flags */
272         if (flags & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL))
273                 goto out;
274
275         /* mask covers bits we don't support? */
276         if (mask & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL))
277                 goto out;
278
279         /* convert flags */
280         if (BASE_MEM_COHERENT_SYSTEM & flags)
281                 real_flags |= KBASE_REG_SHARE_BOTH;
282         else if (BASE_MEM_COHERENT_LOCAL & flags)
283                 real_flags |= KBASE_REG_SHARE_IN;
284
285         /* now we can lock down the context, and find the region */
286         kbase_gpu_vm_lock(kctx);
287
288         /* Validate the region */
289         reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
290         if (!reg || (reg->flags & KBASE_REG_FREE))
291                 goto out_unlock;
292
293         /* limit to imported memory */
294         if ((reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
295              (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
296                 goto out_unlock;
297
298         /* no change? */
299         if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
300                 ret = 0;
301                 goto out_unlock;
302         }
303
304         /* save for roll back */
305         prev_flags = reg->flags;
306         reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
307         reg->flags |= real_flags;
308
309         /* Currently supporting only imported memory */
310         switch (reg->gpu_alloc->type) {
311 #ifdef CONFIG_UMP
312         case KBASE_MEM_TYPE_IMPORTED_UMP:
313                 ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_cpu_phy_pages(reg), reg->gpu_alloc->nents, reg->flags);
314                 break;
315 #endif
316 #ifdef CONFIG_DMA_SHARED_BUFFER
317         case KBASE_MEM_TYPE_IMPORTED_UMM:
318                 /* Future use will use the new flags, existing mapping will NOT be updated
319                  * as memory should not be in use by the GPU when updating the flags.
320                  */
321                 ret = 0;
322                 WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
323                 break;
324 #endif
325         default:
326                 break;
327         }
328
329         /* roll back on error, i.e. not UMP */
330         if (ret)
331                 reg->flags = prev_flags;
332
333 out_unlock:
334         kbase_gpu_vm_unlock(kctx);
335 out:
336         return ret;
337 }
338
339 #define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
340
341 #ifdef CONFIG_UMP
342 static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
343 {
344         struct kbase_va_region *reg;
345         ump_dd_handle umph;
346         u64 block_count;
347         const ump_dd_physical_block_64 *block_array;
348         u64 i, j;
349         int page = 0;
350         ump_alloc_flags ump_flags;
351         ump_alloc_flags cpu_flags;
352         ump_alloc_flags gpu_flags;
353
354         KBASE_DEBUG_ASSERT(kctx);
355         KBASE_DEBUG_ASSERT(va_pages);
356         KBASE_DEBUG_ASSERT(flags);
357
358         if (*flags & BASE_MEM_SECURE)
359                 goto bad_flags;
360
361         umph = ump_dd_from_secure_id(id);
362         if (UMP_DD_INVALID_MEMORY_HANDLE == umph)
363                 goto bad_id;
364
365         ump_flags = ump_dd_allocation_flags_get(umph);
366         cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK;
367         gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) &
368                         UMP_DEVICE_MASK;
369
370         *va_pages = ump_dd_size_get_64(umph);
371         *va_pages >>= PAGE_SHIFT;
372
373         if (!*va_pages)
374                 goto bad_size;
375
376         if (*va_pages > (U64_MAX / PAGE_SIZE))
377                 /* 64-bit address range is the max */
378                 goto bad_size;
379
380         if (*flags & BASE_MEM_SAME_VA)
381                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
382         else
383                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
384
385         if (!reg)
386                 goto no_region;
387
388         /* we've got pages to map now, and support SAME_VA */
389         *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
390
391         reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
392         if (IS_ERR_OR_NULL(reg->gpu_alloc))
393                 goto no_alloc_obj;
394
395         reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
396
397         reg->gpu_alloc->imported.ump_handle = umph;
398
399         reg->flags &= ~KBASE_REG_FREE;
400         reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
401         reg->flags &= ~KBASE_REG_GROWABLE;      /* UMP cannot be grown */
402
403         /* Override import flags based on UMP flags */
404         *flags &= ~(BASE_MEM_CACHED_CPU);
405         *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR);
406         *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR);
407
408         if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
409             (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
410                 reg->flags |= KBASE_REG_CPU_CACHED;
411                 *flags |= BASE_MEM_CACHED_CPU;
412         }
413
414         if (cpu_flags & UMP_PROT_CPU_WR) {
415                 reg->flags |= KBASE_REG_CPU_WR;
416                 *flags |= BASE_MEM_PROT_CPU_WR;
417         }
418
419         if (cpu_flags & UMP_PROT_CPU_RD) {
420                 reg->flags |= KBASE_REG_CPU_RD;
421                 *flags |= BASE_MEM_PROT_CPU_RD;
422         }
423
424         if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
425             (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR))
426                 reg->flags |= KBASE_REG_GPU_CACHED;
427
428         if (gpu_flags & UMP_PROT_DEVICE_WR) {
429                 reg->flags |= KBASE_REG_GPU_WR;
430                 *flags |= BASE_MEM_PROT_GPU_WR;
431         }
432
433         if (gpu_flags & UMP_PROT_DEVICE_RD) {
434                 reg->flags |= KBASE_REG_GPU_RD;
435                 *flags |= BASE_MEM_PROT_GPU_RD;
436         }
437
438         /* ump phys block query */
439         ump_dd_phys_blocks_get_64(umph, &block_count, &block_array);
440
441         for (i = 0; i < block_count; i++) {
442                 for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) {
443                         reg->gpu_alloc->pages[page] = block_array[i].addr + (j << PAGE_SHIFT);
444                         page++;
445                 }
446         }
447         reg->gpu_alloc->nents = *va_pages;
448         reg->extent = 0;
449
450         return reg;
451
452 no_alloc_obj:
453         kfree(reg);
454 no_region:
455 bad_size:
456         ump_dd_release(umph);
457 bad_id:
458 bad_flags:
459         return NULL;
460 }
461 #endif                          /* CONFIG_UMP */
462
463 #ifdef CONFIG_DMA_SHARED_BUFFER
464 static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags)
465 {
466         struct kbase_va_region *reg;
467         struct dma_buf *dma_buf;
468         struct dma_buf_attachment *dma_attachment;
469
470         dma_buf = dma_buf_get(fd);
471         if (IS_ERR_OR_NULL(dma_buf))
472                 goto no_buf;
473
474         dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
475         if (!dma_attachment)
476                 goto no_attachment;
477
478         *va_pages = PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT;
479         if (!*va_pages)
480                 goto bad_size;
481
482         if (*va_pages > (U64_MAX / PAGE_SIZE))
483                 /* 64-bit address range is the max */
484                 goto bad_size;
485
486         /* ignore SAME_VA */
487         *flags &= ~BASE_MEM_SAME_VA;
488
489 #ifdef CONFIG_64BIT
490         if (!kctx->is_compat) {
491                 /* 64-bit tasks must MMAP anyway, but not expose this address to clients */
492                 *flags |= BASE_MEM_NEED_MMAP;
493                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
494         } else {
495 #else
496         if (1) {
497 #endif
498                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
499         }
500
501         if (!reg)
502                 goto no_region;
503
504         reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM);
505         if (IS_ERR_OR_NULL(reg->gpu_alloc))
506                 goto no_alloc_obj;
507
508         reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
509
510         /* No pages to map yet */
511         reg->gpu_alloc->nents = 0;
512
513         reg->flags &= ~KBASE_REG_FREE;
514         reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
515         reg->flags &= ~KBASE_REG_GROWABLE;      /* UMM cannot be grown */
516         reg->flags |= KBASE_REG_GPU_CACHED;
517
518         if (*flags & BASE_MEM_PROT_CPU_WR)
519                 reg->flags |= KBASE_REG_CPU_WR;
520
521         if (*flags & BASE_MEM_PROT_CPU_RD)
522                 reg->flags |= KBASE_REG_CPU_RD;
523
524         if (*flags & BASE_MEM_PROT_GPU_WR)
525                 reg->flags |= KBASE_REG_GPU_WR;
526
527         if (*flags & BASE_MEM_PROT_GPU_RD)
528                 reg->flags |= KBASE_REG_GPU_RD;
529
530         if (*flags & BASE_MEM_SECURE)
531                 reg->flags |= KBASE_REG_SECURE;
532
533         /* no read or write permission given on import, only on run do we give the right permissions */
534
535         reg->gpu_alloc->type = BASE_TMEM_IMPORT_TYPE_UMM;
536         reg->gpu_alloc->imported.umm.sgt = NULL;
537         reg->gpu_alloc->imported.umm.dma_buf = dma_buf;
538         reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment;
539         reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
540         reg->extent = 0;
541
542         return reg;
543
544 no_alloc_obj:
545         kfree(reg);
546 no_region:
547 bad_size:
548         dma_buf_detach(dma_buf, dma_attachment);
549 no_attachment:
550         dma_buf_put(dma_buf);
551 no_buf:
552         return NULL;
553 }
554 #endif  /* CONFIG_DMA_SHARED_BUFFER */
555
556 u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
557                     u64 nents, struct base_mem_aliasing_info *ai,
558                     u64 *num_pages)
559 {
560         struct kbase_va_region *reg;
561         u64 gpu_va;
562         size_t i;
563         bool coherent;
564
565         KBASE_DEBUG_ASSERT(kctx);
566         KBASE_DEBUG_ASSERT(flags);
567         KBASE_DEBUG_ASSERT(ai);
568         KBASE_DEBUG_ASSERT(num_pages);
569
570         /* mask to only allowed flags */
571         *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
572                    BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL |
573                    BASE_MEM_COHERENT_SYSTEM_REQUIRED);
574
575         if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) {
576                 dev_warn(kctx->kbdev->dev,
577                                 "kbase_mem_alias called with bad flags (%llx)",
578                                 (unsigned long long)*flags);
579                 goto bad_flags;
580         }
581         coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 ||
582                         (*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0;
583
584         if (!stride)
585                 goto bad_stride;
586
587         if (!nents)
588                 goto bad_nents;
589
590         if ((nents * stride) > (U64_MAX / PAGE_SIZE))
591                 /* 64-bit address range is the max */
592                 goto bad_size;
593
594         /* calculate the number of pages this alias will cover */
595         *num_pages = nents * stride;
596
597 #ifdef CONFIG_64BIT
598         if (!kctx->is_compat) {
599                 /* 64-bit tasks must MMAP anyway, but not expose this address to
600                  * clients */
601                 *flags |= BASE_MEM_NEED_MMAP;
602                 reg = kbase_alloc_free_region(kctx, 0, *num_pages,
603                                               KBASE_REG_ZONE_SAME_VA);
604         } else {
605 #else
606         if (1) {
607 #endif
608                 reg = kbase_alloc_free_region(kctx, 0, *num_pages,
609                                               KBASE_REG_ZONE_CUSTOM_VA);
610         }
611
612         if (!reg)
613                 goto no_reg;
614
615         /* zero-sized page array, as we don't need one/can support one */
616         reg->gpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS);
617         if (IS_ERR_OR_NULL(reg->gpu_alloc))
618                 goto no_alloc_obj;
619
620         reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
621
622         kbase_update_region_flags(reg, *flags);
623
624         reg->gpu_alloc->imported.alias.nents = nents;
625         reg->gpu_alloc->imported.alias.stride = stride;
626         reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents);
627         if (!reg->gpu_alloc->imported.alias.aliased)
628                 goto no_aliased_array;
629
630         kbase_gpu_vm_lock(kctx);
631
632         /* validate and add src handles */
633         for (i = 0; i < nents; i++) {
634                 if (ai[i].handle < BASE_MEM_FIRST_FREE_ADDRESS) {
635                         if (ai[i].handle != BASE_MEM_WRITE_ALLOC_PAGES_HANDLE)
636                                 goto bad_handle; /* unsupported magic handle */
637                         if (!ai[i].length)
638                                 goto bad_handle; /* must be > 0 */
639                         if (ai[i].length > stride)
640                                 goto bad_handle; /* can't be larger than the
641                                                     stride */
642                         reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
643                 } else {
644                         struct kbase_va_region *aliasing_reg;
645                         struct kbase_mem_phy_alloc *alloc;
646
647                         aliasing_reg = kbase_region_tracker_find_region_base_address(kctx, (ai[i].handle >> PAGE_SHIFT) << PAGE_SHIFT);
648
649                         /* validate found region */
650                         if (!aliasing_reg)
651                                 goto bad_handle; /* Not found */
652                         if (aliasing_reg->flags & KBASE_REG_FREE)
653                                 goto bad_handle; /* Free region */
654                         if (!aliasing_reg->gpu_alloc)
655                                 goto bad_handle; /* No alloc */
656                         if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
657                                 goto bad_handle; /* Not a native alloc */
658                         if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0))
659                                 goto bad_handle;
660                                 /* Non-coherent memory cannot alias
661                                    coherent memory, and vice versa.*/
662
663                         /* check size against stride */
664                         if (!ai[i].length)
665                                 goto bad_handle; /* must be > 0 */
666                         if (ai[i].length > stride)
667                                 goto bad_handle; /* can't be larger than the
668                                                     stride */
669
670                         alloc = aliasing_reg->gpu_alloc;
671
672                         /* check against the alloc's size */
673                         if (ai[i].offset > alloc->nents)
674                                 goto bad_handle; /* beyond end */
675                         if (ai[i].offset + ai[i].length > alloc->nents)
676                                 goto bad_handle; /* beyond end */
677
678                         reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
679                         reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
680                         reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
681                 }
682         }
683
684 #ifdef CONFIG_64BIT
685         if (!kctx->is_compat) {
686                 /* Bind to a cookie */
687                 if (!kctx->cookies) {
688                         dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
689                         goto no_cookie;
690                 }
691                 /* return a cookie */
692                 gpu_va = __ffs(kctx->cookies);
693                 kctx->cookies &= ~(1UL << gpu_va);
694                 BUG_ON(kctx->pending_regions[gpu_va]);
695                 kctx->pending_regions[gpu_va] = reg;
696
697                 /* relocate to correct base */
698                 gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
699                 gpu_va <<= PAGE_SHIFT;
700         } else /* we control the VA */ {
701 #else
702         if (1) {
703 #endif
704                 if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) {
705                         dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU");
706                         goto no_mmap;
707                 }
708                 /* return real GPU VA */
709                 gpu_va = reg->start_pfn << PAGE_SHIFT;
710         }
711
712         reg->flags &= ~KBASE_REG_FREE;
713         reg->flags &= ~KBASE_REG_GROWABLE;
714
715         kbase_gpu_vm_unlock(kctx);
716
717         return gpu_va;
718
719 #ifdef CONFIG_64BIT
720 no_cookie:
721 #endif
722 no_mmap:
723 bad_handle:
724         kbase_gpu_vm_unlock(kctx);
725 no_aliased_array:
726         kbase_mem_phy_alloc_put(reg->cpu_alloc);
727         kbase_mem_phy_alloc_put(reg->gpu_alloc);
728 no_alloc_obj:
729         kfree(reg);
730 no_reg:
731 bad_size:
732 bad_nents:
733 bad_stride:
734 bad_flags:
735         return 0;
736 }
737
738 int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, int handle, u64 *gpu_va, u64 *va_pages, u64 *flags)
739 {
740         struct kbase_va_region *reg;
741
742         KBASE_DEBUG_ASSERT(kctx);
743         KBASE_DEBUG_ASSERT(gpu_va);
744         KBASE_DEBUG_ASSERT(va_pages);
745         KBASE_DEBUG_ASSERT(flags);
746
747 #ifdef CONFIG_64BIT
748         if (!kctx->is_compat)
749                 *flags |= BASE_MEM_SAME_VA;
750 #endif
751
752         if (!kbase_check_import_flags(*flags)) {
753                 dev_warn(kctx->kbdev->dev,
754                                 "kbase_mem_import called with bad flags (%llx)",
755                                 (unsigned long long)*flags);
756                 goto bad_flags;
757         }
758
759         switch (type) {
760 #ifdef CONFIG_UMP
761         case BASE_MEM_IMPORT_TYPE_UMP:
762                 reg = kbase_mem_from_ump(kctx, (ump_secure_id)handle, va_pages, flags);
763                 break;
764 #endif /* CONFIG_UMP */
765 #ifdef CONFIG_DMA_SHARED_BUFFER
766         case BASE_MEM_IMPORT_TYPE_UMM:
767                 reg = kbase_mem_from_umm(kctx, handle, va_pages, flags);
768                 break;
769 #endif /* CONFIG_DMA_SHARED_BUFFER */
770         default:
771                 reg = NULL;
772                 break;
773         }
774
775         if (!reg)
776                 goto no_reg;
777
778         kbase_gpu_vm_lock(kctx);
779
780         /* mmap needed to setup VA? */
781         if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) {
782                 /* Bind to a cookie */
783                 if (!kctx->cookies)
784                         goto no_cookie;
785                 /* return a cookie */
786                 *gpu_va = __ffs(kctx->cookies);
787                 kctx->cookies &= ~(1UL << *gpu_va);
788                 BUG_ON(kctx->pending_regions[*gpu_va]);
789                 kctx->pending_regions[*gpu_va] = reg;
790
791                 /* relocate to correct base */
792                 *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
793                 *gpu_va <<= PAGE_SHIFT;
794
795         } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES)  {
796                 /* we control the VA, mmap now to the GPU */
797                 if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0)
798                         goto no_gpu_va;
799                 /* return real GPU VA */
800                 *gpu_va = reg->start_pfn << PAGE_SHIFT;
801         } else {
802                 /* we control the VA, but nothing to mmap yet */
803                 if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0)
804                         goto no_gpu_va;
805                 /* return real GPU VA */
806                 *gpu_va = reg->start_pfn << PAGE_SHIFT;
807         }
808
809         /* clear out private flags */
810         *flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1);
811
812         kbase_gpu_vm_unlock(kctx);
813
814         return 0;
815
816 no_gpu_va:
817 no_cookie:
818         kbase_gpu_vm_unlock(kctx);
819         kbase_mem_phy_alloc_put(reg->cpu_alloc);
820         kbase_mem_phy_alloc_put(reg->gpu_alloc);
821         kfree(reg);
822 no_reg:
823 bad_flags:
824         *gpu_va = 0;
825         *va_pages = 0;
826         *flags = 0;
827         return -ENOMEM;
828 }
829
830
831 static int zap_range_nolock(struct mm_struct *mm,
832                 const struct vm_operations_struct *vm_ops,
833                 unsigned long start, unsigned long end)
834 {
835         struct vm_area_struct *vma;
836         int err = -EINVAL; /* in case end < start */
837
838         while (start < end) {
839                 unsigned long local_end;
840
841                 vma = find_vma_intersection(mm, start, end);
842                 if (!vma)
843                         break;
844
845                 /* is it ours? */
846                 if (vma->vm_ops != vm_ops)
847                         goto try_next;
848
849                 local_end = vma->vm_end;
850
851                 if (end < local_end)
852                         local_end = end;
853
854                 err = zap_vma_ptes(vma, start, local_end - start);
855                 if (unlikely(err))
856                         break;
857
858 try_next:
859                 /* go to next vma, if any */
860                 start = vma->vm_end;
861         }
862
863         return err;
864 }
865
866 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason)
867 {
868         u64 old_pages;
869         u64 delta;
870         int res = -EINVAL;
871         struct kbase_va_region *reg;
872         phys_addr_t *phy_pages;
873
874         KBASE_DEBUG_ASSERT(kctx);
875         KBASE_DEBUG_ASSERT(failure_reason);
876         KBASE_DEBUG_ASSERT(gpu_addr != 0);
877
878         down_read(&current->mm->mmap_sem);
879         kbase_gpu_vm_lock(kctx);
880
881         /* Validate the region */
882         reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
883         if (!reg || (reg->flags & KBASE_REG_FREE)) {
884                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
885                 goto out_unlock;
886         }
887
888         KBASE_DEBUG_ASSERT(reg->cpu_alloc);
889         KBASE_DEBUG_ASSERT(reg->gpu_alloc);
890
891         if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) {
892                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
893                 goto out_unlock;
894         }
895
896         if (0 == (reg->flags & KBASE_REG_GROWABLE)) {
897                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
898                 goto out_unlock;
899         }
900
901         if (new_pages > reg->nr_pages) {
902                 /* Would overflow the VA region */
903                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
904                 goto out_unlock;
905         }
906
907         /* can't be mapped more than once on the GPU */
908         if (atomic_read(&reg->gpu_alloc->gpu_mappings) > 1) {
909                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
910                 goto out_unlock;
911         }
912
913         if (new_pages == reg->gpu_alloc->nents) {
914                 /* no change */
915                 res = 0;
916                 goto out_unlock;
917         }
918
919         phy_pages = kbase_get_gpu_phy_pages(reg);
920         old_pages = kbase_reg_current_backed_size(reg);
921
922         if (new_pages > old_pages) {
923                 /* growing */
924                 int err;
925
926                 delta = new_pages - old_pages;
927                 /* Allocate some more pages */
928                 if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) {
929                         *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
930                         goto out_unlock;
931                 }
932                 if (reg->cpu_alloc != reg->gpu_alloc) {
933                         if (kbase_alloc_phy_pages_helper(
934                                         reg->gpu_alloc, delta) != 0) {
935                                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
936                                 kbase_free_phy_pages_helper(reg->cpu_alloc,
937                                                 delta);
938                                 goto out_unlock;
939                         }
940                 }
941                 err = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages,
942                                 phy_pages + old_pages, delta, reg->flags);
943                 if (err) {
944                         kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
945                         if (reg->cpu_alloc != reg->gpu_alloc)
946                                 kbase_free_phy_pages_helper(reg->gpu_alloc,
947                                                 delta);
948                         *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
949                         goto out_unlock;
950                 }
951         } else {
952                 /* shrinking */
953                 struct kbase_cpu_mapping *mapping;
954                 int err;
955
956                 /* first, unmap from any mappings affected */
957                 list_for_each_entry(mapping, &reg->cpu_alloc->mappings, mappings_list) {
958                         unsigned long mapping_size = (mapping->vm_end - mapping->vm_start) >> PAGE_SHIFT;
959
960                         /* is this mapping affected ?*/
961                         if ((mapping->page_off + mapping_size) > new_pages) {
962                                 unsigned long first_bad = 0;
963                                 int zap_res;
964
965                                 if (new_pages > mapping->page_off)
966                                         first_bad = new_pages - mapping->page_off;
967
968                                 zap_res = zap_range_nolock(current->mm,
969                                                 &kbase_vm_ops,
970                                                 mapping->vm_start +
971                                                 (first_bad << PAGE_SHIFT),
972                                                 mapping->vm_end);
973                                 WARN(zap_res,
974                                      "Failed to zap VA range (0x%lx - 0x%lx);\n",
975                                      mapping->vm_start +
976                                      (first_bad << PAGE_SHIFT),
977                                      mapping->vm_end
978                                      );
979                         }
980                 }
981
982                 /* Free some pages */
983                 delta = old_pages - new_pages;
984                 err = kbase_mmu_teardown_pages(kctx, reg->start_pfn + new_pages,
985                                 delta);
986                 if (err) {
987                         *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
988                         goto out_unlock;
989                 }
990 #ifndef CONFIG_MALI_NO_MALI
991                 if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) {
992                         /* Wait for GPU to flush write buffer before freeing physical pages */
993                         kbase_wait_write_flush(kctx);
994                 }
995 #endif
996                 kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
997                 if (reg->cpu_alloc != reg->gpu_alloc)
998                         kbase_free_phy_pages_helper(reg->gpu_alloc, delta);
999         }
1000
1001         res = 0;
1002
1003 out_unlock:
1004         kbase_gpu_vm_unlock(kctx);
1005         up_read(&current->mm->mmap_sem);
1006
1007         return res;
1008 }
1009
1010 static void kbase_cpu_vm_open(struct vm_area_struct *vma)
1011 {
1012         struct kbase_cpu_mapping *map = vma->vm_private_data;
1013
1014         KBASE_DEBUG_ASSERT(map);
1015         KBASE_DEBUG_ASSERT(map->count > 0);
1016         /* non-atomic as we're under Linux' mm lock */
1017         map->count++;
1018 }
1019
1020 static void kbase_cpu_vm_close(struct vm_area_struct *vma)
1021 {
1022         struct kbase_cpu_mapping *map = vma->vm_private_data;
1023
1024         KBASE_DEBUG_ASSERT(map);
1025         KBASE_DEBUG_ASSERT(map->count > 0);
1026
1027         /* non-atomic as we're under Linux' mm lock */
1028         if (--map->count)
1029                 return;
1030
1031         KBASE_DEBUG_ASSERT(map->kctx);
1032         KBASE_DEBUG_ASSERT(map->alloc);
1033
1034         kbase_gpu_vm_lock(map->kctx);
1035
1036         if (map->region) {
1037                 KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) ==
1038                                 KBASE_REG_ZONE_SAME_VA);
1039                 /* Avoid freeing memory on the process death which results in
1040                  * GPU Page Fault. Memory will be freed in kbase_destroy_context
1041                  */
1042                 if (!(current->flags & PF_EXITING))
1043                         kbase_mem_free_region(map->kctx, map->region);
1044         }
1045
1046         list_del(&map->mappings_list);
1047
1048         kbase_gpu_vm_unlock(map->kctx);
1049
1050         kbase_mem_phy_alloc_put(map->alloc);
1051         kfree(map);
1052 }
1053
1054 KBASE_EXPORT_TEST_API(kbase_cpu_vm_close);
1055
1056
1057 static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1058 {
1059         struct kbase_cpu_mapping *map = vma->vm_private_data;
1060         pgoff_t rel_pgoff;
1061         size_t i;
1062
1063         KBASE_DEBUG_ASSERT(map);
1064         KBASE_DEBUG_ASSERT(map->count > 0);
1065         KBASE_DEBUG_ASSERT(map->kctx);
1066         KBASE_DEBUG_ASSERT(map->alloc);
1067
1068         /* we don't use vmf->pgoff as it's affected by our mmap with
1069          * offset being a GPU VA or a cookie */
1070         rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start)
1071                         >> PAGE_SHIFT;
1072
1073         kbase_gpu_vm_lock(map->kctx);
1074         if (map->page_off + rel_pgoff >= map->alloc->nents)
1075                 goto locked_bad_fault;
1076
1077         /* insert all valid pages from the fault location */
1078         for (i = rel_pgoff;
1079              i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
1080              map->alloc->nents - map->page_off); i++) {
1081                 int ret = vm_insert_pfn(vma, map->vm_start + (i << PAGE_SHIFT),
1082                     PFN_DOWN(map->alloc->pages[map->page_off + i]));
1083                 if (ret < 0 && ret != -EBUSY)
1084                         goto locked_bad_fault;
1085         }
1086
1087         kbase_gpu_vm_unlock(map->kctx);
1088         /* we resolved it, nothing for VM to do */
1089         return VM_FAULT_NOPAGE;
1090
1091 locked_bad_fault:
1092         kbase_gpu_vm_unlock(map->kctx);
1093         send_sig(SIGSEGV, current, 1);
1094         return VM_FAULT_NOPAGE;
1095 }
1096
1097 static const struct vm_operations_struct kbase_vm_ops = {
1098         .open  = kbase_cpu_vm_open,
1099         .close = kbase_cpu_vm_close,
1100         .fault = kbase_cpu_vm_fault
1101 };
1102
1103 static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close)
1104 {
1105         struct kbase_cpu_mapping *map;
1106         u64 start_off = vma->vm_pgoff - reg->start_pfn;
1107         phys_addr_t *page_array;
1108         int err = 0;
1109         int i;
1110
1111         map = kzalloc(sizeof(*map), GFP_KERNEL);
1112
1113         if (!map) {
1114                 WARN_ON(1);
1115                 err = -ENOMEM;
1116                 goto out;
1117         }
1118
1119         /*
1120          * VM_DONTCOPY - don't make this mapping available in fork'ed processes
1121          * VM_DONTEXPAND - disable mremap on this region
1122          * VM_IO - disables paging
1123          * VM_DONTDUMP - Don't include in core dumps (3.7 only)
1124          * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
1125          *               This is needed to support using the dedicated and
1126          *               the OS based memory backends together.
1127          */
1128         /*
1129          * This will need updating to propagate coherency flags
1130          * See MIDBASE-1057
1131          */
1132
1133 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
1134         vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
1135 #else
1136         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
1137 #endif
1138         vma->vm_ops = &kbase_vm_ops;
1139         vma->vm_private_data = map;
1140
1141         page_array = kbase_get_cpu_phy_pages(reg);
1142
1143         if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
1144             (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
1145                 /* We can't map vmalloc'd memory uncached.
1146                  * Other memory will have been returned from
1147                  * kbase_mem_allocator_alloc which would be
1148                  * suitable for mapping uncached.
1149                  */
1150                 BUG_ON(kaddr);
1151                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1152         }
1153
1154         if (!kaddr) {
1155                 vma->vm_flags |= VM_PFNMAP;
1156                 for (i = 0; i < nr_pages; i++) {
1157                         err = vm_insert_pfn(vma, vma->vm_start + (i << PAGE_SHIFT), page_array[i + start_off] >> PAGE_SHIFT);
1158                         if (WARN_ON(err))
1159                                 break;
1160                 }
1161         } else {
1162                 /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
1163                 vma->vm_flags |= VM_MIXEDMAP;
1164                 /* vmalloc remaping is easy... */
1165                 err = remap_vmalloc_range(vma, kaddr, 0);
1166                 WARN_ON(err);
1167         }
1168
1169         if (err) {
1170                 kfree(map);
1171                 goto out;
1172         }
1173
1174         map->page_off = start_off;
1175         map->region = free_on_close ? reg : NULL;
1176         map->kctx = reg->kctx;
1177         map->vm_start = vma->vm_start + aligned_offset;
1178         if (aligned_offset) {
1179                 KBASE_DEBUG_ASSERT(!start_off);
1180                 map->vm_end = map->vm_start + (reg->nr_pages << PAGE_SHIFT);
1181         } else {
1182                 map->vm_end = vma->vm_end;
1183         }
1184         map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
1185         map->count = 1; /* start with one ref */
1186
1187         if (reg->flags & KBASE_REG_CPU_CACHED)
1188                 map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
1189
1190         list_add(&map->mappings_list, &map->alloc->mappings);
1191
1192  out:
1193         return err;
1194 }
1195
1196 static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
1197 {
1198         struct kbase_va_region *new_reg;
1199         u32 nr_pages;
1200         size_t size;
1201         int err = 0;
1202         u32 *tb;
1203         int owns_tb = 1;
1204
1205         dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
1206         size = (vma->vm_end - vma->vm_start);
1207         nr_pages = size >> PAGE_SHIFT;
1208
1209         if (!kctx->jctx.tb) {
1210                 KBASE_DEBUG_ASSERT(0 != size);
1211                 tb = vmalloc_user(size);
1212
1213                 if (NULL == tb) {
1214                         err = -ENOMEM;
1215                         goto out;
1216                 }
1217
1218                 kbase_device_trace_buffer_install(kctx, tb, size);
1219         } else {
1220                 err = -EINVAL;
1221                 goto out;
1222         }
1223
1224         *kaddr = kctx->jctx.tb;
1225
1226         new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
1227         if (!new_reg) {
1228                 err = -ENOMEM;
1229                 WARN_ON(1);
1230                 goto out_no_region;
1231         }
1232
1233         new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB);
1234         if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
1235                 err = -ENOMEM;
1236                 new_reg->cpu_alloc = NULL;
1237                 WARN_ON(1);
1238                 goto out_no_alloc;
1239         }
1240
1241         new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
1242
1243         new_reg->cpu_alloc->imported.kctx = kctx;
1244         new_reg->flags &= ~KBASE_REG_FREE;
1245         new_reg->flags |= KBASE_REG_CPU_CACHED;
1246
1247         /* alloc now owns the tb */
1248         owns_tb = 0;
1249
1250         if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
1251                 err = -ENOMEM;
1252                 WARN_ON(1);
1253                 goto out_no_va_region;
1254         }
1255
1256         *reg = new_reg;
1257
1258         /* map read only, noexec */
1259         vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
1260         /* the rest of the flags is added by the cpu_mmap handler */
1261
1262         dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
1263         return 0;
1264
1265 out_no_va_region:
1266 out_no_alloc:
1267         kbase_free_alloced_region(new_reg);
1268 out_no_region:
1269         if (owns_tb) {
1270                 kbase_device_trace_buffer_uninstall(kctx);
1271                 vfree(tb);
1272         }
1273 out:
1274         return err;
1275 }
1276
1277 static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
1278 {
1279         struct kbase_va_region *new_reg;
1280         void *kaddr;
1281         u32 nr_pages;
1282         size_t size;
1283         int err = 0;
1284
1285         dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
1286         size = (vma->vm_end - vma->vm_start);
1287         nr_pages = size >> PAGE_SHIFT;
1288
1289         kaddr = kbase_mmu_dump(kctx, nr_pages);
1290
1291         if (!kaddr) {
1292                 err = -ENOMEM;
1293                 goto out;
1294         }
1295
1296         new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
1297         if (!new_reg) {
1298                 err = -ENOMEM;
1299                 WARN_ON(1);
1300                 goto out;
1301         }
1302
1303         new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW);
1304         if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
1305                 err = -ENOMEM;
1306                 new_reg->cpu_alloc = NULL;
1307                 WARN_ON(1);
1308                 goto out_no_alloc;
1309         }
1310
1311         new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
1312
1313         new_reg->flags &= ~KBASE_REG_FREE;
1314         new_reg->flags |= KBASE_REG_CPU_CACHED;
1315         if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
1316                 err = -ENOMEM;
1317                 WARN_ON(1);
1318                 goto out_va_region;
1319         }
1320
1321         *kmap_addr = kaddr;
1322         *reg = new_reg;
1323
1324         dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
1325         return 0;
1326
1327 out_no_alloc:
1328 out_va_region:
1329         kbase_free_alloced_region(new_reg);
1330 out:
1331         return err;
1332 }
1333
1334
1335 void kbase_os_mem_map_lock(struct kbase_context *kctx)
1336 {
1337         struct mm_struct *mm = current->mm;
1338         (void)kctx;
1339         down_read(&mm->mmap_sem);
1340 }
1341
1342 void kbase_os_mem_map_unlock(struct kbase_context *kctx)
1343 {
1344         struct mm_struct *mm = current->mm;
1345         (void)kctx;
1346         up_read(&mm->mmap_sem);
1347 }
1348
1349 int kbase_mmap(struct file *file, struct vm_area_struct *vma)
1350 {
1351         struct kbase_context *kctx = file->private_data;
1352         struct kbase_va_region *reg;
1353         void *kaddr = NULL;
1354         size_t nr_pages;
1355         int err = 0;
1356         int free_on_close = 0;
1357         struct device *dev = kctx->kbdev->dev;
1358         size_t aligned_offset = 0;
1359
1360         dev_dbg(dev, "kbase_mmap\n");
1361         nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1362
1363         /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
1364         vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4);
1365
1366         if (0 == nr_pages) {
1367                 err = -EINVAL;
1368                 goto out;
1369         }
1370
1371         if (!(vma->vm_flags & VM_SHARED)) {
1372                 err = -EINVAL;
1373                 goto out;
1374         }
1375
1376         kbase_gpu_vm_lock(kctx);
1377
1378         if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
1379                 /* The non-mapped tracking helper page */
1380                 err = kbase_tracking_page_setup(kctx, vma);
1381                 goto out_unlock;
1382         }
1383
1384         /* if not the MTP, verify that the MTP has been mapped */
1385         rcu_read_lock();
1386         /* catches both when the special page isn't present or
1387          * when we've forked */
1388         if (rcu_dereference(kctx->process_mm) != current->mm) {
1389                 err = -EINVAL;
1390                 rcu_read_unlock();
1391                 goto out_unlock;
1392         }
1393         rcu_read_unlock();
1394
1395         switch (vma->vm_pgoff) {
1396         case PFN_DOWN(BASE_MEM_INVALID_HANDLE):
1397         case PFN_DOWN(BASE_MEM_WRITE_ALLOC_PAGES_HANDLE):
1398                 /* Illegal handle for direct map */
1399                 err = -EINVAL;
1400                 goto out_unlock;
1401         case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE):
1402                 err = kbase_trace_buffer_mmap(kctx, vma, &reg, &kaddr);
1403                 if (0 != err)
1404                         goto out_unlock;
1405                 dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
1406                 /* free the region on munmap */
1407                 free_on_close = 1;
1408                 goto map;
1409         case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
1410                 /* MMU dump */
1411                 err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
1412                 if (0 != err)
1413                         goto out_unlock;
1414                 /* free the region on munmap */
1415                 free_on_close = 1;
1416                 goto map;
1417         case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
1418              PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
1419                 /* SAME_VA stuff, fetch the right region */
1420                 int gpu_pc_bits;
1421                 int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
1422
1423                 gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
1424                 reg = kctx->pending_regions[cookie];
1425                 if (NULL != reg) {
1426                         if (reg->flags & KBASE_REG_ALIGNED) {
1427                                 /* nr_pages must be able to hold alignment pages
1428                                  * plus actual pages */
1429                                 if (nr_pages != ((1UL << gpu_pc_bits >>
1430                                                         PAGE_SHIFT) +
1431                                                         reg->nr_pages)) {
1432                                         /* incorrect mmap size */
1433                                         /* leave the cookie for a potential
1434                                          * later mapping, or to be reclaimed
1435                                          * later when the context is freed */
1436                                         err = -ENOMEM;
1437                                         goto out_unlock;
1438                                 }
1439
1440                                 aligned_offset = (vma->vm_start +
1441                                                   (1UL << gpu_pc_bits) - 1) &
1442                                                  ~((1UL << gpu_pc_bits) - 1);
1443                                 aligned_offset -= vma->vm_start;
1444                         } else if (reg->nr_pages != nr_pages) {
1445                                 /* incorrect mmap size */
1446                                 /* leave the cookie for a potential later
1447                                  * mapping, or to be reclaimed later when the
1448                                  * context is freed */
1449                                 err = -ENOMEM;
1450                                 goto out_unlock;
1451                         }
1452
1453                         if ((vma->vm_flags & VM_READ &&
1454                              !(reg->flags & KBASE_REG_CPU_RD)) ||
1455                             (vma->vm_flags & VM_WRITE &&
1456                              !(reg->flags & KBASE_REG_CPU_WR))) {
1457                                 /* VM flags inconsistent with region flags */
1458                                 err = -EPERM;
1459                                 dev_err(dev, "%s:%d inconsistent VM flags\n",
1460                                         __FILE__, __LINE__);
1461                                 goto out_unlock;
1462                         }
1463
1464                         /* adjust down nr_pages to what we have physically */
1465                         nr_pages = kbase_reg_current_backed_size(reg);
1466
1467                         if (kbase_gpu_mmap(kctx, reg,
1468                                         vma->vm_start + aligned_offset,
1469                                         reg->nr_pages, 1) != 0) {
1470                                 dev_err(dev, "%s:%d\n", __FILE__, __LINE__);
1471                                 /* Unable to map in GPU space. */
1472                                 WARN_ON(1);
1473                                 err = -ENOMEM;
1474                                 goto out_unlock;
1475                         }
1476
1477                         /* no need for the cookie anymore */
1478                         kctx->pending_regions[cookie] = NULL;
1479                         kctx->cookies |= (1UL << cookie);
1480
1481                         /*
1482                          * Overwrite the offset with the
1483                          * region start_pfn, so we effectively
1484                          * map from offset 0 in the region.
1485                          */
1486                         vma->vm_pgoff = reg->start_pfn;
1487
1488                         /* free the region on munmap */
1489                         free_on_close = 1;
1490                         goto map;
1491                 }
1492
1493                 err = -ENOMEM;
1494                 goto out_unlock;
1495         }
1496         default: {
1497                 reg = kbase_region_tracker_find_region_enclosing_address(kctx, (u64)vma->vm_pgoff << PAGE_SHIFT);
1498
1499                 if (reg && !(reg->flags & KBASE_REG_FREE)) {
1500                         /* will this mapping overflow the size of the region? */
1501                         if (nr_pages > (reg->nr_pages - (vma->vm_pgoff - reg->start_pfn)))
1502                                 goto overflow;
1503
1504                         if ((vma->vm_flags & VM_READ &&
1505                              !(reg->flags & KBASE_REG_CPU_RD)) ||
1506                             (vma->vm_flags & VM_WRITE &&
1507                              !(reg->flags & KBASE_REG_CPU_WR))) {
1508                                 /* VM flags inconsistent with region flags */
1509                                 err = -EPERM;
1510                                 dev_err(dev, "%s:%d inconsistent VM flags\n",
1511                                         __FILE__, __LINE__);
1512                                 goto out_unlock;
1513                         }
1514
1515 #ifdef CONFIG_DMA_SHARED_BUFFER
1516                         if (reg->cpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM)
1517                                 goto dma_map;
1518 #endif /* CONFIG_DMA_SHARED_BUFFER */
1519
1520                         /* limit what we map to the amount currently backed */
1521                         if (reg->cpu_alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) {
1522                                 if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents)
1523                                         nr_pages = 0;
1524                                 else
1525                                         nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn);
1526                         }
1527
1528                         goto map;
1529                 }
1530
1531 overflow:
1532                 err = -ENOMEM;
1533                 goto out_unlock;
1534         } /* default */
1535         } /* switch */
1536 map:
1537         err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close);
1538
1539         if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
1540                 /* MMU dump - userspace should now have a reference on
1541                  * the pages, so we can now free the kernel mapping */
1542                 vfree(kaddr);
1543         }
1544         goto out_unlock;
1545
1546 #ifdef CONFIG_DMA_SHARED_BUFFER
1547 dma_map:
1548         err = dma_buf_mmap(reg->cpu_alloc->imported.umm.dma_buf, vma, vma->vm_pgoff - reg->start_pfn);
1549 #endif                          /* CONFIG_DMA_SHARED_BUFFER */
1550 out_unlock:
1551         kbase_gpu_vm_unlock(kctx);
1552 out:
1553         if (err)
1554                 dev_err(dev, "mmap failed %d\n", err);
1555
1556         return err;
1557 }
1558
1559 KBASE_EXPORT_TEST_API(kbase_mmap);
1560
1561 void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
1562                 struct kbase_vmap_struct *map)
1563 {
1564         struct kbase_va_region *reg;
1565         unsigned long page_index;
1566         unsigned int offset = gpu_addr & ~PAGE_MASK;
1567         size_t page_count = PFN_UP(offset + size);
1568         phys_addr_t *page_array;
1569         struct page **pages;
1570         void *cpu_addr = NULL;
1571         pgprot_t prot;
1572         size_t i;
1573         bool sync_needed;
1574
1575         if (!size || !map)
1576                 return NULL;
1577
1578         /* check if page_count calculation will wrap */
1579         if (size > ((size_t)-1 / PAGE_SIZE))
1580                 return NULL;
1581
1582         kbase_gpu_vm_lock(kctx);
1583
1584         reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
1585         if (!reg || (reg->flags & KBASE_REG_FREE))
1586                 goto out_unlock;
1587
1588         page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn;
1589
1590         /* check if page_index + page_count will wrap */
1591         if (-1UL - page_count < page_index)
1592                 goto out_unlock;
1593
1594         if (page_index + page_count > kbase_reg_current_backed_size(reg))
1595                 goto out_unlock;
1596
1597         page_array = kbase_get_cpu_phy_pages(reg);
1598         if (!page_array)
1599                 goto out_unlock;
1600
1601         pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
1602         if (!pages)
1603                 goto out_unlock;
1604
1605         for (i = 0; i < page_count; i++)
1606                 pages[i] = pfn_to_page(PFN_DOWN(page_array[page_index + i]));
1607
1608         prot = PAGE_KERNEL;
1609         if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
1610                 /* Map uncached */
1611                 prot = pgprot_writecombine(prot);
1612         }
1613
1614         cpu_addr = vmap(pages, page_count, VM_MAP, prot);
1615
1616         kfree(pages);
1617
1618         if (!cpu_addr)
1619                 goto out_unlock;
1620
1621         map->gpu_addr = gpu_addr;
1622         map->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
1623         map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index];
1624         map->gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
1625         map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index];
1626         map->addr = (void *)((uintptr_t)cpu_addr + offset);
1627         map->size = size;
1628         map->is_cached = (reg->flags & KBASE_REG_CPU_CACHED) != 0;
1629         sync_needed = map->is_cached;
1630
1631         if (sync_needed) {
1632                 /* Sync first page */
1633                 size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
1634                 phys_addr_t cpu_pa = map->cpu_pages[0];
1635                 phys_addr_t gpu_pa = map->gpu_pages[0];
1636
1637                 kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz,
1638                                 KBASE_SYNC_TO_CPU);
1639
1640                 /* Sync middle pages (if any) */
1641                 for (i = 1; page_count > 2 && i < page_count - 1; i++) {
1642                         cpu_pa = map->cpu_pages[i];
1643                         gpu_pa = map->gpu_pages[i];
1644                         kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE,
1645                                         KBASE_SYNC_TO_CPU);
1646                 }
1647
1648                 /* Sync last page (if any) */
1649                 if (page_count > 1) {
1650                         cpu_pa = map->cpu_pages[page_count - 1];
1651                         gpu_pa = map->gpu_pages[page_count - 1];
1652                         sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
1653                         kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz,
1654                                         KBASE_SYNC_TO_CPU);
1655                 }
1656         }
1657         kbase_gpu_vm_unlock(kctx);
1658
1659         return map->addr;
1660
1661 out_unlock:
1662         kbase_gpu_vm_unlock(kctx);
1663         return NULL;
1664 }
1665
1666 void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
1667 {
1668         void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
1669         bool sync_needed = map->is_cached;
1670         vunmap(addr);
1671         if (sync_needed) {
1672                 off_t offset = (uintptr_t)map->addr & ~PAGE_MASK;
1673                 size_t size = map->size;
1674                 size_t page_count = PFN_UP(offset + size);
1675                 size_t i;
1676
1677                 /* Sync first page */
1678                 size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
1679                 phys_addr_t cpu_pa = map->cpu_pages[0];
1680                 phys_addr_t gpu_pa = map->gpu_pages[0];
1681
1682                 kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz,
1683                                 KBASE_SYNC_TO_DEVICE);
1684
1685                 /* Sync middle pages (if any) */
1686                 for (i = 1; page_count > 2 && i < page_count - 1; i++) {
1687                         cpu_pa = map->cpu_pages[i];
1688                         gpu_pa = map->gpu_pages[i];
1689                         kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE,
1690                                         KBASE_SYNC_TO_DEVICE);
1691                 }
1692
1693                 /* Sync last page (if any) */
1694                 if (page_count > 1) {
1695                         cpu_pa = map->cpu_pages[page_count - 1];
1696                         gpu_pa = map->gpu_pages[page_count - 1];
1697                         sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
1698                         kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz,
1699                                         KBASE_SYNC_TO_DEVICE);
1700                 }
1701         }
1702         map->gpu_addr = 0;
1703         map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
1704         map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
1705         map->cpu_pages = NULL;
1706         map->gpu_pages = NULL;
1707         map->addr = NULL;
1708         map->size = 0;
1709         map->is_cached = false;
1710 }
1711
1712 void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
1713 {
1714         struct mm_struct *mm;
1715
1716         rcu_read_lock();
1717         mm = rcu_dereference(kctx->process_mm);
1718         if (mm) {
1719                 atomic_add(pages, &kctx->nonmapped_pages);
1720 #ifdef SPLIT_RSS_COUNTING
1721                 add_mm_counter(mm, MM_FILEPAGES, pages);
1722 #else
1723                 spin_lock(&mm->page_table_lock);
1724                 add_mm_counter(mm, MM_FILEPAGES, pages);
1725                 spin_unlock(&mm->page_table_lock);
1726 #endif
1727         }
1728         rcu_read_unlock();
1729 }
1730
1731 static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
1732 {
1733         int pages;
1734         struct mm_struct *mm;
1735
1736         spin_lock(&kctx->mm_update_lock);
1737         mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
1738         if (!mm) {
1739                 spin_unlock(&kctx->mm_update_lock);
1740                 return;
1741         }
1742
1743         rcu_assign_pointer(kctx->process_mm, NULL);
1744         spin_unlock(&kctx->mm_update_lock);
1745         synchronize_rcu();
1746
1747         pages = atomic_xchg(&kctx->nonmapped_pages, 0);
1748 #ifdef SPLIT_RSS_COUNTING
1749         add_mm_counter(mm, MM_FILEPAGES, -pages);
1750 #else
1751         spin_lock(&mm->page_table_lock);
1752         add_mm_counter(mm, MM_FILEPAGES, -pages);
1753         spin_unlock(&mm->page_table_lock);
1754 #endif
1755 }
1756
1757 static void kbase_special_vm_close(struct vm_area_struct *vma)
1758 {
1759         struct kbase_context *kctx;
1760
1761         kctx = vma->vm_private_data;
1762         kbasep_os_process_page_usage_drain(kctx);
1763 }
1764
1765 static const struct vm_operations_struct kbase_vm_special_ops = {
1766         .close = kbase_special_vm_close,
1767 };
1768
1769 static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
1770 {
1771         /* check that this is the only tracking page */
1772         spin_lock(&kctx->mm_update_lock);
1773         if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
1774                 spin_unlock(&kctx->mm_update_lock);
1775                 return -EFAULT;
1776         }
1777
1778         rcu_assign_pointer(kctx->process_mm, current->mm);
1779
1780         spin_unlock(&kctx->mm_update_lock);
1781
1782         /* no real access */
1783         vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
1784 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
1785         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
1786 #else
1787         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
1788 #endif
1789         vma->vm_ops = &kbase_vm_special_ops;
1790         vma->vm_private_data = kctx;
1791
1792         return 0;
1793 }
1794 void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle)
1795 {
1796         int i;
1797         int res;
1798         void *va;
1799         dma_addr_t  dma_pa;
1800         struct kbase_va_region *reg;
1801         phys_addr_t *page_array;
1802 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1803         DEFINE_DMA_ATTRS(attrs);
1804 #endif
1805
1806         u32 pages = ((size - 1) >> PAGE_SHIFT) + 1;
1807         u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR |
1808                     BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR;
1809
1810         KBASE_DEBUG_ASSERT(kctx != NULL);
1811         KBASE_DEBUG_ASSERT(0 != size);
1812         KBASE_DEBUG_ASSERT(0 != pages);
1813
1814         if (size == 0)
1815                 goto err;
1816
1817         /* All the alloc calls return zeroed memory */
1818 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1819         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
1820         va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
1821 #else
1822         va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL);
1823 #endif
1824         if (!va)
1825                 goto err;
1826
1827         /* Store the state so we can free it later. */
1828         handle->cpu_va = va;
1829         handle->dma_pa = dma_pa;
1830         handle->size   = size;
1831
1832
1833         reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA);
1834         if (!reg)
1835                 goto no_reg;
1836
1837         reg->flags &= ~KBASE_REG_FREE;
1838         kbase_update_region_flags(reg, flags);
1839
1840         reg->cpu_alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW);
1841         if (IS_ERR_OR_NULL(reg->cpu_alloc))
1842                 goto no_alloc;
1843
1844         reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
1845
1846         page_array = kbase_get_cpu_phy_pages(reg);
1847
1848         for (i = 0; i < pages; i++)
1849                 page_array[i] = dma_pa + (i << PAGE_SHIFT);
1850
1851         reg->cpu_alloc->nents = pages;
1852
1853         kbase_gpu_vm_lock(kctx);
1854         res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1);
1855         kbase_gpu_vm_unlock(kctx);
1856         if (res)
1857                 goto no_mmap;
1858
1859         return va;
1860
1861 no_mmap:
1862         kbase_mem_phy_alloc_put(reg->cpu_alloc);
1863         kbase_mem_phy_alloc_put(reg->gpu_alloc);
1864 no_alloc:
1865         kfree(reg);
1866 no_reg:
1867 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1868         dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
1869 #else
1870         dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
1871 #endif
1872 err:
1873         return NULL;
1874 }
1875 KBASE_EXPORT_SYMBOL(kbase_va_alloc);
1876
1877 void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle)
1878 {
1879         struct kbase_va_region *reg;
1880         int err;
1881 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1882         DEFINE_DMA_ATTRS(attrs);
1883 #endif
1884
1885         KBASE_DEBUG_ASSERT(kctx != NULL);
1886         KBASE_DEBUG_ASSERT(handle->cpu_va != NULL);
1887
1888         kbase_gpu_vm_lock(kctx);
1889         reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va);
1890         KBASE_DEBUG_ASSERT(reg);
1891         err = kbase_gpu_munmap(kctx, reg);
1892         kbase_gpu_vm_unlock(kctx);
1893         KBASE_DEBUG_ASSERT(!err);
1894
1895         kbase_mem_phy_alloc_put(reg->cpu_alloc);
1896         kbase_mem_phy_alloc_put(reg->gpu_alloc);
1897         kfree(reg);
1898
1899 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1900         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
1901         dma_free_attrs(kctx->kbdev->dev, handle->size,
1902                         handle->cpu_va, handle->dma_pa, &attrs);
1903 #else
1904         dma_free_writecombine(kctx->kbdev->dev, handle->size,
1905                                 handle->cpu_va, handle->dma_pa);
1906 #endif
1907 }
1908 KBASE_EXPORT_SYMBOL(kbase_va_free);
1909