rockchip:midgard:update to r4p1_01dev0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_mem_linux.c
1 /*
2  *
3  * (C) COPYRIGHT ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /**
21  * @file mali_kbase_mem_linux.c
22  * Base kernel memory APIs, Linux implementation.
23  */
24
25 #include <linux/compat.h>
26 #include <linux/kernel.h>
27 #include <linux/bug.h>
28 #include <linux/mm.h>
29 #include <linux/fs.h>
30 #include <linux/version.h>
31 #include <linux/dma-mapping.h>
32 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
33         #include <linux/dma-attrs.h>
34 #endif
35 #ifdef CONFIG_DMA_SHARED_BUFFER
36 #include <linux/dma-buf.h>
37 #endif                          /* defined(CONFIG_DMA_SHARED_BUFFER) */
38
39 #include <mali_kbase.h>
40 #include <mali_kbase_mem_linux.h>
41 #include <mali_kbase_config_defaults.h>
42
43 static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
44 static const struct vm_operations_struct kbase_vm_ops;
45
46 struct kbase_va_region *kbase_mem_alloc(kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment)
47 {
48         int zone;
49         int gpu_pc_bits;
50         int cpu_va_bits;
51         struct kbase_va_region *reg;
52         struct device *dev;
53         KBASE_DEBUG_ASSERT(kctx);
54         KBASE_DEBUG_ASSERT(flags);
55         KBASE_DEBUG_ASSERT(gpu_va);
56         KBASE_DEBUG_ASSERT(va_alignment);
57
58         dev = kctx->kbdev->dev;
59         *va_alignment = 0; /* no alignment by default */
60         *gpu_va = 0; /* return 0 on failure */
61
62         gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
63         cpu_va_bits = BITS_PER_LONG;
64
65         if (0 == va_pages) {
66                 dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
67                 goto zero_size;
68         }
69
70 #if defined(CONFIG_64BIT)
71         if (is_compat_task())
72                 cpu_va_bits = 32;
73         else
74                 /* force SAME_VA if a 64-bit client */
75                 *flags |= BASE_MEM_SAME_VA;
76 #endif
77
78         if (!kbase_check_alloc_flags(*flags)) {
79                 dev_warn(dev,
80                                 "kbase_mem_alloc called with bad flags (%llx)",
81                                 (unsigned long long)*flags);
82                 goto bad_flags;
83         }
84
85         /* Limit GPU executable allocs to GPU PC size */
86         if ((*flags & BASE_MEM_PROT_GPU_EX) &&
87             (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT)))
88                 goto bad_ex_size;
89
90         /* find out which VA zone to use */
91         if (*flags & BASE_MEM_SAME_VA)
92                 zone = KBASE_REG_ZONE_SAME_VA;
93         else if (*flags & BASE_MEM_PROT_GPU_EX)
94                 zone = KBASE_REG_ZONE_EXEC;
95         else
96                 zone = KBASE_REG_ZONE_CUSTOM_VA;
97
98         reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
99         if (!reg) {
100                 dev_err(dev, "Failed to allocate free region");
101                 goto no_region;
102         }
103
104         if (MALI_ERROR_NONE != kbase_reg_prepare_native(reg, kctx)) {
105                 dev_err(dev, "Failed to prepare region");
106                 goto prepare_failed;
107         }
108
109         kbase_update_region_flags(reg, *flags);
110
111         if (*flags & BASE_MEM_GROW_ON_GPF)
112                 reg->extent = extent;
113         else
114                 reg->extent = 0;
115
116         if (kbase_alloc_phy_pages(reg, va_pages, commit_pages)) {
117                 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", 
118                               (unsigned long long)commit_pages, (unsigned long long)va_pages);
119                 goto no_mem;
120         }
121
122         kbase_gpu_vm_lock(kctx);
123
124         /* mmap needed to setup VA? */
125         if (*flags & BASE_MEM_SAME_VA) {
126                 /* Bind to a cookie */
127                 if (!kctx->cookies) {
128                         dev_err(dev, "No cookies available for allocation!");
129                         goto no_cookie;
130                 }
131                 /* return a cookie */
132                 *gpu_va = __ffs(kctx->cookies);
133                 kctx->cookies &= ~(1UL << *gpu_va);
134                 BUG_ON(kctx->pending_regions[*gpu_va]);
135                 kctx->pending_regions[*gpu_va] = reg;
136
137                 /* relocate to correct base */
138                 *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
139                 *gpu_va <<= PAGE_SHIFT;
140
141                 /* See if we must align memory due to GPU PC bits vs CPU VA */
142                 if ((*flags & BASE_MEM_PROT_GPU_EX) &&
143                     (cpu_va_bits > gpu_pc_bits)) {
144                         *va_alignment = gpu_pc_bits;
145                         reg->flags |= KBASE_REG_ALIGNED;
146                 }
147         } else /* we control the VA */ {
148                 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0, va_pages, 1)) {
149                         dev_warn(dev, "Failed to map memory on GPU");
150                         goto no_mmap;
151                 }
152                 /* return real GPU VA */
153                 *gpu_va = reg->start_pfn << PAGE_SHIFT;
154         }
155
156         kbase_gpu_vm_unlock(kctx);
157         return reg;
158
159 no_mmap:
160 no_cookie:
161         kbase_gpu_vm_unlock(kctx);
162 no_mem:
163         kbase_mem_phy_alloc_put(reg->alloc);
164 prepare_failed:
165         kfree(reg);
166 no_region:
167 bad_ex_size:
168 bad_flags:
169 zero_size:
170         return NULL;
171 }
172
173 mali_error kbase_mem_query(kbase_context *kctx, mali_addr64 gpu_addr, int query, u64 * const out)
174 {
175         kbase_va_region *reg;
176         mali_error ret = MALI_ERROR_FUNCTION_FAILED;
177
178         KBASE_DEBUG_ASSERT(kctx);
179         KBASE_DEBUG_ASSERT(out);
180
181         kbase_gpu_vm_lock(kctx);
182
183         /* Validate the region */
184         reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
185         if (!reg || (reg->flags & KBASE_REG_FREE) )
186                 goto out_unlock;
187
188         switch (query) {
189                 case KBASE_MEM_QUERY_COMMIT_SIZE:
190                         if (reg->alloc->type != KBASE_MEM_TYPE_ALIAS) {
191                                 *out = kbase_reg_current_backed_size(reg);
192                         } else {
193                                 size_t i;
194                                 struct kbase_aliased *aliased;
195                                 *out = 0;
196                                 aliased = reg->alloc->imported.alias.aliased;
197                                 for (i = 0; i < reg->alloc->imported.alias.nents; i++)
198                                         *out += aliased[i].length;
199                         }
200                         break;
201                 case KBASE_MEM_QUERY_VA_SIZE:
202                         *out = reg->nr_pages;
203                         break;
204                 case KBASE_MEM_QUERY_FLAGS:
205                 {
206                         *out = 0;
207                         if( KBASE_REG_GPU_WR & reg->flags )
208                                 *out |= BASE_MEM_PROT_GPU_WR;
209                         if( KBASE_REG_GPU_RD & reg->flags )
210                                 *out |= BASE_MEM_PROT_GPU_RD;
211                         if( !(KBASE_REG_GPU_NX & reg->flags) )
212                                 *out |= BASE_MEM_PROT_GPU_EX;
213                         if( KBASE_REG_SHARE_BOTH & reg->flags )
214                                 *out |= BASE_MEM_COHERENT_SYSTEM;
215                         if ( KBASE_REG_SHARE_IN & reg->flags )
216                                 *out |= BASE_MEM_COHERENT_LOCAL;
217                         break;
218                 }
219                 default:
220                         *out = 0;
221                         goto out_unlock;
222         }
223
224         ret = MALI_ERROR_NONE;
225
226 out_unlock:
227         kbase_gpu_vm_unlock(kctx);
228         return ret;
229 }
230
231 mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, unsigned int flags, unsigned int mask)
232 {
233         kbase_va_region *reg;
234         mali_error ret = MALI_ERROR_FUNCTION_FAILED;
235         unsigned int real_flags = 0;
236         unsigned int prev_flags = 0;
237
238         KBASE_DEBUG_ASSERT(kctx);
239
240         if (!gpu_addr)
241                 return MALI_ERROR_FUNCTION_FAILED;
242
243         /* nuke other bits */
244         flags &= mask;
245
246         /* check for only supported flags */
247         if (flags & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL))
248                 goto out;
249
250         /* mask covers bits we don't support? */
251         if (mask & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL))
252                 goto out;
253
254         /* convert flags */
255         if( BASE_MEM_COHERENT_SYSTEM & flags )
256                 real_flags |= KBASE_REG_SHARE_BOTH;
257         else if ( BASE_MEM_COHERENT_LOCAL & flags )
258                 real_flags |= KBASE_REG_SHARE_IN;
259
260         /* now we can lock down the context, and find the region */
261         kbase_gpu_vm_lock(kctx);
262
263         /* Validate the region */
264         reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
265         if (!reg || (reg->flags & KBASE_REG_FREE) )
266                 goto out_unlock;
267
268         /* limit to imported memory */
269         if ( (reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
270              (reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
271                 goto out_unlock;
272
273         /* no change? */
274         if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH)))
275         {
276                 ret = MALI_ERROR_NONE;
277                 goto out_unlock;
278         }
279
280         /* save for roll back */
281         prev_flags = reg->flags;
282         reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
283         reg->flags |= real_flags;
284
285         /* Currently supporting only imported memory */
286         switch(reg->alloc->type)
287         {
288 #ifdef CONFIG_UMP
289                 case KBASE_MEM_TYPE_IMPORTED_UMP:
290                         ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_phy_pages(reg), reg->alloc->nents, reg->flags);
291                         break;
292 #endif
293 #ifdef CONFIG_DMA_SHARED_BUFFER
294                 case KBASE_MEM_TYPE_IMPORTED_UMM:
295                         /* Future use will use the new flags, existing mapping will NOT be updated
296                          * as memory should not be in use by the GPU when updating the flags.
297                          */
298                         ret = MALI_ERROR_NONE;
299                         WARN_ON(reg->alloc->imported.umm.current_mapping_usage_count);
300                         break;
301 #endif
302                 default:
303                         break;
304         }
305
306         /* roll back on error, i.e. not UMP */
307         if (ret != MALI_ERROR_NONE)
308                 reg->flags = prev_flags;
309
310 out_unlock:
311         kbase_gpu_vm_unlock(kctx);
312 out:
313         return ret;
314 }
315
316 #ifdef CONFIG_UMP
317 static struct kbase_va_region *kbase_mem_from_ump(kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
318 {
319         struct kbase_va_region *reg;
320         ump_dd_handle umph;
321         u64 block_count;
322         const ump_dd_physical_block_64 *block_array;
323         u64 i, j;
324         int page = 0;
325         ump_alloc_flags ump_flags;
326         ump_alloc_flags cpu_flags;
327         ump_alloc_flags gpu_flags;
328
329         KBASE_DEBUG_ASSERT(kctx);
330         KBASE_DEBUG_ASSERT(va_pages);
331         KBASE_DEBUG_ASSERT(flags);
332
333         umph = ump_dd_from_secure_id(id);
334         if (UMP_DD_INVALID_MEMORY_HANDLE == umph)
335                 goto bad_id;
336
337         ump_flags = ump_dd_allocation_flags_get(umph);
338         cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK;
339         gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) &
340                         UMP_DEVICE_MASK;
341
342         *va_pages = ump_dd_size_get_64(umph);
343         *va_pages >>= PAGE_SHIFT;
344
345         if (!*va_pages)
346                 goto bad_size;
347
348         if (*flags & BASE_MEM_SAME_VA)
349                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
350         else
351                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
352
353         if (!reg)
354                 goto no_region;
355
356         /* we've got pages to map now, and support SAME_VA */
357         *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
358
359         reg->alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
360         if (IS_ERR_OR_NULL(reg->alloc))
361                 goto no_alloc_obj;
362         
363         reg->alloc->imported.ump_handle = umph;
364
365         reg->flags &= ~KBASE_REG_FREE;
366         reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
367         reg->flags &= ~KBASE_REG_GROWABLE;      /* UMP cannot be grown */
368
369         if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
370             (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
371                 reg->flags |= KBASE_REG_CPU_CACHED;
372                 *flags |= BASE_MEM_CACHED_CPU;
373         }
374
375         if (cpu_flags & UMP_PROT_DEVICE_WR) {
376                 reg->flags |= KBASE_REG_CPU_WR;
377                 *flags |= BASE_MEM_PROT_CPU_WR;
378         }
379
380         if (cpu_flags & UMP_PROT_DEVICE_RD) {
381                 reg->flags |= KBASE_REG_CPU_RD;
382                 *flags |= BASE_MEM_PROT_CPU_RD;
383         }
384
385         if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
386             (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR))
387                 reg->flags |= KBASE_REG_GPU_CACHED;
388
389         if (gpu_flags & UMP_PROT_DEVICE_WR) {
390                 reg->flags |= KBASE_REG_GPU_WR;
391                 *flags |= BASE_MEM_PROT_GPU_WR;
392         }
393
394         if (gpu_flags & UMP_PROT_DEVICE_RD) {
395                 reg->flags |= KBASE_REG_GPU_RD;
396                 *flags |= BASE_MEM_PROT_GPU_RD;
397         }
398
399         /* ump phys block query */
400         ump_dd_phys_blocks_get_64(umph, &block_count, &block_array);
401
402         for (i = 0; i < block_count; i++) {
403                 for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) {
404                         reg->alloc->pages[page] = block_array[i].addr + (j << PAGE_SHIFT);
405                         page++;
406                 }
407         }
408         reg->alloc->nents = *va_pages;
409         reg->extent = 0;
410
411         return reg;
412
413 no_alloc_obj:
414         kfree(reg);
415 no_region:
416 bad_size:
417         ump_dd_release(umph);
418 bad_id:
419         return NULL;
420
421 }
422 #endif                          /* CONFIG_UMP */
423
424 #ifdef CONFIG_DMA_SHARED_BUFFER
425 static struct kbase_va_region *kbase_mem_from_umm(kbase_context *kctx, int fd, u64 *va_pages, u64 *flags)
426 {
427         struct kbase_va_region *reg;
428         struct dma_buf *dma_buf;
429         struct dma_buf_attachment *dma_attachment;
430
431         dma_buf = dma_buf_get(fd);
432         if (IS_ERR_OR_NULL(dma_buf))
433                 goto no_buf;
434
435         dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
436         if (!dma_attachment)
437                 goto no_attachment;
438
439         *va_pages = PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT;
440         if (!*va_pages)
441                 goto bad_size;
442
443         /* ignore SAME_VA */
444         *flags &= ~BASE_MEM_SAME_VA;
445
446 #ifdef CONFIG_64BIT
447         if (!is_compat_task()) {
448                 /* 64-bit tasks must MMAP anyway, but not expose this address to clients */
449                 *flags |= KBASE_MEM_NEED_MMAP;
450                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
451         } else {
452 #else
453         if (1) {
454 #endif
455                 reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
456         }
457
458         if (!reg)
459                 goto no_region;
460
461         reg->alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM);
462         if (IS_ERR_OR_NULL(reg->alloc))
463                 goto no_alloc_obj;
464
465         /* No pages to map yet */
466         reg->alloc->nents = 0;
467
468         reg->flags &= ~KBASE_REG_FREE;
469         reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
470         reg->flags &= ~KBASE_REG_GROWABLE;      /* UMM cannot be grown */
471         reg->flags |= KBASE_REG_GPU_CACHED;
472
473         if (*flags & BASE_MEM_PROT_CPU_WR)
474                 reg->flags |= KBASE_REG_CPU_WR;
475
476         if (*flags & BASE_MEM_PROT_CPU_RD)
477                 reg->flags |= KBASE_REG_CPU_RD;
478
479         if (*flags & BASE_MEM_PROT_GPU_WR)
480                 reg->flags |= KBASE_REG_GPU_WR;
481
482         if (*flags & BASE_MEM_PROT_GPU_RD)
483                 reg->flags |= KBASE_REG_GPU_RD;
484
485         /* no read or write permission given on import, only on run do we give the right permissions */
486
487         reg->alloc->type = BASE_TMEM_IMPORT_TYPE_UMM;
488         reg->alloc->imported.umm.sgt = NULL;
489         reg->alloc->imported.umm.dma_buf = dma_buf;
490         reg->alloc->imported.umm.dma_attachment = dma_attachment;
491         reg->alloc->imported.umm.current_mapping_usage_count = 0;
492         reg->extent = 0;
493
494         return reg;
495
496 no_alloc_obj:
497         kfree(reg);
498 no_region:
499 bad_size:
500         dma_buf_detach(dma_buf, dma_attachment);
501 no_attachment:
502         dma_buf_put(dma_buf);
503 no_buf:
504         return NULL;
505 }
506 #endif  /* CONFIG_DMA_SHARED_BUFFER */
507
508 u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
509                     u64 nents, struct base_mem_aliasing_info *ai,
510                     u64 *num_pages)
511 {
512         kbase_va_region *reg;
513         u64 gpu_va;
514         size_t i;
515
516         KBASE_DEBUG_ASSERT(kctx);
517         KBASE_DEBUG_ASSERT(flags);
518         KBASE_DEBUG_ASSERT(ai);
519         KBASE_DEBUG_ASSERT(num_pages);
520
521         /* mask to only allowed flags */
522         *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
523                    BASE_MEM_HINT_GPU_RD | BASE_MEM_HINT_GPU_WR |
524                    BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL);
525
526         if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR) )) {
527                 dev_warn(kctx->kbdev->dev,
528                                 "kbase_mem_alias called with bad flags (%llx)",
529                                 (unsigned long long)*flags);
530                 goto bad_flags;
531         }
532
533         if (!stride)
534                 goto bad_stride;
535
536         if (!nents)
537                 goto bad_nents;
538
539         /* calculate the number of pages this alias will cover */
540         *num_pages = nents * stride;
541
542 #ifdef CONFIG_64BIT
543         if (!is_compat_task()) {
544                 /* 64-bit tasks must MMAP anyway, but not expose this address to
545                  * clients */
546                 *flags |= KBASE_MEM_NEED_MMAP;
547                 reg = kbase_alloc_free_region(kctx, 0, *num_pages,
548                                               KBASE_REG_ZONE_SAME_VA);
549         } else {
550 #else
551         if (1) {
552 #endif
553                 reg = kbase_alloc_free_region(kctx, 0, *num_pages,
554                                               KBASE_REG_ZONE_CUSTOM_VA);
555         }
556
557         if (!reg)
558                 goto no_reg;
559
560         /* zero-sized page array, as we don't need one/can support one */
561         reg->alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS);
562         if (IS_ERR_OR_NULL(reg->alloc))
563                 goto no_alloc_obj;
564
565         kbase_update_region_flags(reg, *flags);
566
567         reg->alloc->imported.alias.nents = nents;
568         reg->alloc->imported.alias.stride = stride;
569         reg->alloc->imported.alias.aliased = vzalloc(sizeof(*reg->alloc->imported.alias.aliased) * nents);
570         if (!reg->alloc->imported.alias.aliased)
571                 goto no_aliased_array;
572
573         kbase_gpu_vm_lock(kctx);
574
575         /* validate and add src handles */
576         for (i = 0; i < nents; i++) {
577                 if (ai[i].handle < BASE_MEM_FIRST_FREE_ADDRESS) {
578                         if (ai[i].handle != BASE_MEM_WRITE_ALLOC_PAGES_HANDLE)
579                                 goto bad_handle; /* unsupported magic handle */
580                         if (!ai[i].length)
581                                 goto bad_handle; /* must be > 0 */
582                         if (ai[i].length > stride)
583                                 goto bad_handle; /* can't be larger than the
584                                                     stride */
585                         reg->alloc->imported.alias.aliased[i].length = ai[i].length;
586                 } else {
587                         struct kbase_va_region *aliasing_reg;
588                         struct kbase_mem_phy_alloc *alloc;
589                         aliasing_reg = kbase_region_tracker_find_region_base_address(kctx, (ai[i].handle >> PAGE_SHIFT) << PAGE_SHIFT);
590
591                         /* validate found region */
592                         if (!aliasing_reg)
593                                 goto bad_handle; /* Not found */
594                         if (aliasing_reg->flags & KBASE_REG_FREE)
595                                 goto bad_handle; /* Free region */
596                         if (!aliasing_reg->alloc)
597                                 goto bad_handle; /* No alloc */
598                         if (aliasing_reg->alloc->type != KBASE_MEM_TYPE_NATIVE)
599                                 goto bad_handle; /* Not a native alloc */
600
601                         /* check size against stride */
602                         if (!ai[i].length)
603                                 goto bad_handle; /* must be > 0 */
604                         if (ai[i].length > stride)
605                                 goto bad_handle; /* can't be larger than the
606                                                     stride */
607
608                         alloc = aliasing_reg->alloc;
609
610                         /* check against the alloc's size */
611                         if (ai[i].offset > alloc->nents)
612                                 goto bad_handle; /* beyond end */
613                         if (ai[i].offset + ai[i].length > alloc->nents)
614                                 goto bad_handle; /* beyond end */
615
616                         reg->alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
617                         reg->alloc->imported.alias.aliased[i].length = ai[i].length;
618                         reg->alloc->imported.alias.aliased[i].offset = ai[i].offset;
619                 }
620         }
621
622 #ifdef CONFIG_64BIT
623         if (!is_compat_task()) {
624                 /* Bind to a cookie */
625                 if (!kctx->cookies) {
626                         dev_err(kctx->kbdev->dev, "No cookies "
627                                                 "available for allocation!");
628                         goto no_cookie;
629                 }
630                 /* return a cookie */
631                 gpu_va = __ffs(kctx->cookies);
632                 kctx->cookies &= ~(1UL << gpu_va);
633                 BUG_ON(kctx->pending_regions[gpu_va]);
634                 kctx->pending_regions[gpu_va] = reg;
635
636                 /* relocate to correct base */
637                 gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
638                 gpu_va <<= PAGE_SHIFT;
639         } else /* we control the VA */ {
640 #else
641         if (1) {
642 #endif
643                 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0,
644                                                       *num_pages, 1)) {
645                         dev_warn(kctx->kbdev->dev,
646                                                "Failed to map memory on GPU");
647                         goto no_mmap;
648                 }
649                 /* return real GPU VA */
650                 gpu_va = reg->start_pfn << PAGE_SHIFT;
651         }
652
653         reg->flags &= ~KBASE_REG_FREE;
654         reg->flags &= ~KBASE_REG_GROWABLE;
655
656         kbase_gpu_vm_unlock(kctx);
657
658         return gpu_va;
659
660 #ifdef CONFIG_64BIT
661 no_cookie:
662 #endif
663 no_mmap:
664 bad_handle:
665         kbase_gpu_vm_unlock(kctx);
666 no_aliased_array:
667         kbase_mem_phy_alloc_put(reg->alloc);
668 no_alloc_obj:
669         kfree(reg);
670 no_reg:
671 bad_nents:
672 bad_stride:
673 bad_flags:
674         return 0;
675 }
676
677 int kbase_mem_import(kbase_context *kctx, base_mem_import_type type, int handle, mali_addr64 * gpu_va, u64 * va_pages, u64 * flags)
678 {
679         kbase_va_region * reg;
680
681         KBASE_DEBUG_ASSERT(kctx);
682         KBASE_DEBUG_ASSERT(gpu_va);
683         KBASE_DEBUG_ASSERT(va_pages);
684         KBASE_DEBUG_ASSERT(flags);
685
686 #ifdef CONFIG_64BIT
687         if (!is_compat_task())
688                 *flags |= BASE_MEM_SAME_VA;
689 #endif
690
691         switch (type) {
692 #ifdef CONFIG_UMP
693         case BASE_MEM_IMPORT_TYPE_UMP:
694                 reg = kbase_mem_from_ump(kctx, (ump_secure_id)handle, va_pages, flags);
695                 break;
696 #endif /* CONFIG_UMP */
697 #ifdef CONFIG_DMA_SHARED_BUFFER
698         case BASE_MEM_IMPORT_TYPE_UMM:
699                 reg = kbase_mem_from_umm(kctx, handle, va_pages, flags);
700                 break;
701 #endif /* CONFIG_DMA_SHARED_BUFFER */
702         default:
703                 reg = NULL;
704                 break;
705         }
706
707         if (!reg)
708                 goto no_reg;
709
710         kbase_gpu_vm_lock(kctx);
711
712         /* mmap needed to setup VA? */
713         if (*flags & (BASE_MEM_SAME_VA | KBASE_MEM_NEED_MMAP)) {
714                 /* Bind to a cookie */
715                 if (!kctx->cookies)
716                         goto no_cookie;
717                 /* return a cookie */
718                 *gpu_va = __ffs(kctx->cookies);
719                 kctx->cookies &= ~(1UL << *gpu_va);
720                 BUG_ON(kctx->pending_regions[*gpu_va]);
721                 kctx->pending_regions[*gpu_va] = reg;
722
723                 /* relocate to correct base */
724                 *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
725                 *gpu_va <<= PAGE_SHIFT;
726
727         } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES)  {
728                 /* we control the VA, mmap now to the GPU */
729                 if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1))
730                         goto no_gpu_va;
731                 /* return real GPU VA */
732                 *gpu_va = reg->start_pfn << PAGE_SHIFT;
733         } else {
734                 /* we control the VA, but nothing to mmap yet */
735                 if (MALI_ERROR_NONE != kbase_add_va_region(kctx, reg, 0, *va_pages, 1))
736                         goto no_gpu_va;
737                 /* return real GPU VA */
738                 *gpu_va = reg->start_pfn << PAGE_SHIFT;
739         }
740
741         kbase_gpu_vm_unlock(kctx);
742
743         return 0;
744
745 no_gpu_va:
746 no_cookie:
747         kbase_gpu_vm_unlock(kctx);
748         kbase_mem_phy_alloc_put(reg->alloc);
749         kfree(reg);
750 no_reg:
751         *gpu_va = 0;
752         *va_pages = 0;
753         *flags = 0;
754         return -ENOMEM;
755 }
756
757
758
759 static int zap_range_nolock(struct mm_struct *mm,
760                 const struct vm_operations_struct *vm_ops,
761                 unsigned long start, unsigned long end)
762 {
763         struct vm_area_struct *vma;
764         int err = -EINVAL; /* in case end < start */
765
766         while (start < end) {
767                 unsigned long local_end;
768
769                 vma = find_vma_intersection(mm, start, end);
770                 if (!vma)
771                         break;
772
773                 /* is it ours? */
774                 if (vma->vm_ops != vm_ops)
775                         goto try_next;
776
777                 local_end = vma->vm_end;
778
779                 if (end < local_end)
780                         local_end = end;
781
782                 err = zap_vma_ptes(vma, start, local_end - start);
783                 if (unlikely(err))
784                         break;
785
786 try_next:
787                 /* go to next vma, if any */
788                 start = vma->vm_end;
789         }
790
791         return err;
792 }
793
794 int kbase_mem_commit(kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages, base_backing_threshold_status * failure_reason)
795 {
796         u64 old_pages;
797         u64 delta;
798         int res = -EINVAL;
799         kbase_va_region *reg;
800         phys_addr_t *phy_pages;
801
802         KBASE_DEBUG_ASSERT(kctx);
803         KBASE_DEBUG_ASSERT(failure_reason);
804         KBASE_DEBUG_ASSERT(gpu_addr != 0);
805
806         down_read(&current->mm->mmap_sem);
807         kbase_gpu_vm_lock(kctx);
808
809         /* Validate the region */
810         reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
811         if (!reg || (reg->flags & KBASE_REG_FREE)) {
812                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
813                 goto out_unlock;
814         }
815
816         KBASE_DEBUG_ASSERT(reg->alloc);
817
818         if (reg->alloc->type != KBASE_MEM_TYPE_NATIVE) {
819                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
820                 goto out_unlock;
821         }
822
823         if (0 == (reg->flags & KBASE_REG_GROWABLE)) {
824                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
825                 goto out_unlock;
826         }
827
828         if (new_pages > reg->nr_pages) {
829                 /* Would overflow the VA region */
830                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
831                 goto out_unlock;
832         }
833
834         /* can't be mapped more than once on the GPU */
835         if (atomic_read(&reg->alloc->gpu_mappings) > 1) {
836                 *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
837                 goto out_unlock;
838         }
839
840         if (new_pages == reg->alloc->nents) {
841                 /* no change */
842                 res = 0;
843                 goto out_unlock;
844         }
845
846         phy_pages = kbase_get_phy_pages(reg);
847         old_pages = kbase_reg_current_backed_size(reg);
848
849         if (new_pages > old_pages) {
850                 /* growing */
851                 mali_error err;
852                 delta = new_pages - old_pages;
853                 /* Allocate some more pages */
854                 if (MALI_ERROR_NONE != kbase_alloc_phy_pages_helper(reg->alloc, delta)) {
855                         *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
856                         goto out_unlock;
857                 }
858                 err = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages, phy_pages + old_pages, delta, reg->flags);
859                 if (MALI_ERROR_NONE != err) {
860                         kbase_free_phy_pages_helper(reg->alloc, delta);
861                         *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
862                         goto out_unlock;
863                 }
864         } else {
865                 /* shrinking */
866                 struct kbase_cpu_mapping * mapping;
867                 mali_error err;
868
869                 /* first, unmap from any mappings affected */
870                 list_for_each_entry(mapping, &reg->alloc->mappings, mappings_list) {
871                         unsigned long mapping_size = (mapping->vm_end - mapping->vm_start) >> PAGE_SHIFT;
872
873                         /* is this mapping affected ?*/
874                         if ((mapping->page_off + mapping_size) > new_pages) {
875                                 unsigned long first_bad = 0;
876                                 int zap_res;
877
878                                 if (new_pages > mapping->page_off)
879                                         first_bad = new_pages - mapping->page_off;
880
881                                 zap_res = zap_range_nolock(current->mm,
882                                                 &kbase_vm_ops,
883                                                 mapping->vm_start +
884                                                 (first_bad << PAGE_SHIFT),
885                                                 mapping->vm_end);
886                                 WARN(zap_res,
887                                      "Failed to zap VA range (0x%lx -0x%lx);\n",
888                                      mapping->vm_start +
889                                      (first_bad << PAGE_SHIFT),
890                                      mapping->vm_end
891                                      );
892                         }
893                 }
894
895                 /* Free some pages */
896                 delta = old_pages - new_pages;
897                 err = kbase_mmu_teardown_pages(kctx, reg->start_pfn + new_pages, delta);
898                 if (MALI_ERROR_NONE != err) {
899                         *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
900                         goto out_unlock;
901                 }
902
903                 if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) {
904                         /* Wait for GPU to flush write buffer before freeing physical pages */
905                         kbase_wait_write_flush(kctx);
906                 }
907
908                 kbase_free_phy_pages_helper(reg->alloc, delta);
909         }
910
911         res = 0;
912
913 out_unlock:
914         kbase_gpu_vm_unlock(kctx);
915         up_read(&current->mm->mmap_sem);
916
917         return res;
918
919 }
920
921 STATIC void kbase_cpu_vm_open(struct vm_area_struct *vma)
922 {
923         struct kbase_cpu_mapping *map = vma->vm_private_data;
924         KBASE_DEBUG_ASSERT(map);
925         KBASE_DEBUG_ASSERT(map->count > 0);
926         /* non-atomic as we're under Linux' mm lock */
927         map->count++;
928 }
929
930 STATIC void kbase_cpu_vm_close(struct vm_area_struct *vma)
931 {
932         struct kbase_cpu_mapping *map = vma->vm_private_data;
933         KBASE_DEBUG_ASSERT(map);
934         KBASE_DEBUG_ASSERT(map->count > 0);
935
936         /* non-atomic as we're under Linux' mm lock */
937         if (--map->count)
938                 return;
939
940         KBASE_DEBUG_ASSERT(map->kctx);
941         KBASE_DEBUG_ASSERT(map->alloc);
942
943         kbase_gpu_vm_lock(map->kctx);
944
945         if (map->region) {
946                 KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA);
947                 kbase_mem_free_region(map->kctx, map->region);
948         }
949
950         list_del(&map->mappings_list);
951
952         kbase_gpu_vm_unlock(map->kctx);
953
954         kbase_mem_phy_alloc_put(map->alloc);
955         kfree(map);
956 }
957
958 KBASE_EXPORT_TEST_API(kbase_cpu_vm_close)
959
960
961 STATIC int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
962 {
963         struct kbase_cpu_mapping *map = vma->vm_private_data;
964         pgoff_t rel_pgoff;
965         size_t i;
966
967         KBASE_DEBUG_ASSERT(map);
968         KBASE_DEBUG_ASSERT(map->count > 0);
969         KBASE_DEBUG_ASSERT(map->kctx);
970         KBASE_DEBUG_ASSERT(map->alloc);
971
972         /* we don't use vmf->pgoff as it's affected by our mmap with offset being a GPU VA or a cookie */
973         rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start) >> PAGE_SHIFT;
974
975         kbase_gpu_vm_lock(map->kctx);
976         if (map->page_off + rel_pgoff >= map->alloc->nents)
977                 goto locked_bad_fault;
978
979         /* insert all valid pages from the fault location */
980         for (i = rel_pgoff;
981                            i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
982                                      map->alloc->nents - map->page_off); i++) {
983                 int ret = vm_insert_pfn(vma, map->vm_start + (i << PAGE_SHIFT),
984                                PFN_DOWN(map->alloc->pages[map->page_off + i]));
985                 if (ret < 0 && ret != -EBUSY)
986                         goto locked_bad_fault;
987         }
988
989         kbase_gpu_vm_unlock(map->kctx);
990         /* we resolved it, nothing for VM to do */
991         return VM_FAULT_NOPAGE;
992
993 locked_bad_fault:
994         kbase_gpu_vm_unlock(map->kctx);
995         send_sig(SIGSEGV, current, 1);
996         return VM_FAULT_NOPAGE;
997 }
998
999 static const struct vm_operations_struct kbase_vm_ops = {
1000         .open  = kbase_cpu_vm_open,
1001         .close = kbase_cpu_vm_close,
1002         .fault = kbase_cpu_vm_fault
1003 };
1004
1005 static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, int free_on_close)
1006 {
1007         struct kbase_cpu_mapping *map;
1008         u64 start_off = vma->vm_pgoff - reg->start_pfn;
1009         phys_addr_t *page_array;
1010         int err = 0;
1011         int i;
1012
1013         map = kzalloc(sizeof(*map), GFP_KERNEL);
1014
1015         if (!map) {
1016                 WARN_ON(1);
1017                 err = -ENOMEM;
1018                 goto out;
1019         }
1020
1021         /*
1022          * VM_DONTCOPY - don't make this mapping available in fork'ed processes
1023          * VM_DONTEXPAND - disable mremap on this region
1024          * VM_IO - disables paging
1025          * VM_DONTDUMP - Don't include in core dumps (3.7 only)
1026          * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
1027          *               This is needed to support using the dedicated and
1028          *               the OS based memory backends together.
1029          */
1030         /*
1031          * This will need updating to propagate coherency flags
1032          * See MIDBASE-1057
1033          */
1034
1035 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
1036         vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
1037 #else
1038         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
1039 #endif
1040         vma->vm_ops = &kbase_vm_ops;
1041         vma->vm_private_data = map;
1042
1043         page_array = kbase_get_phy_pages(reg);
1044
1045         if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
1046             (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
1047                 /* We can't map vmalloc'd memory uncached.
1048                  * Other memory will have been returned from
1049                  * kbase_mem_allocator_alloc which would be
1050                  * suitable for mapping uncached.
1051                  */
1052                 BUG_ON(kaddr);
1053                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1054         }
1055
1056         if (!kaddr) {
1057                 vma->vm_flags |= VM_PFNMAP;
1058                 for (i = 0; i < nr_pages; i++) {
1059                         err = vm_insert_pfn(vma, vma->vm_start + (i << PAGE_SHIFT), page_array[i + start_off] >> PAGE_SHIFT);
1060                         if (WARN_ON(err))
1061                                 break;
1062                 }
1063         } else {
1064                 /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
1065                 vma->vm_flags |= VM_MIXEDMAP;
1066                 /* vmalloc remaping is easy... */
1067                 err = remap_vmalloc_range(vma, kaddr, 0);
1068                 WARN_ON(err);
1069         }
1070
1071         if (err) {
1072                 kfree(map);
1073                 goto out;
1074         }
1075
1076
1077         map->page_off = start_off;
1078         map->region = free_on_close ? reg : NULL;
1079         map->kctx = reg->kctx;
1080         map->vm_start = vma->vm_start;
1081         map->vm_end = vma->vm_end;
1082         map->alloc = kbase_mem_phy_alloc_get(reg->alloc);
1083         map->count = 1; /* start with one ref */
1084
1085         if (reg->flags & KBASE_REG_CPU_CACHED)
1086                 map->alloc->accessed_cached = 1;
1087
1088         list_add(&map->mappings_list, &map->alloc->mappings);
1089
1090  out:
1091         return err;
1092 }
1093
1094 static int kbase_trace_buffer_mmap(kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
1095 {
1096         struct kbase_va_region *new_reg;
1097         u32 nr_pages;
1098         size_t size;
1099         int err = 0;
1100         u32 *tb;
1101         int owns_tb = 1;
1102
1103         dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
1104         size = (vma->vm_end - vma->vm_start);
1105         nr_pages = size >> PAGE_SHIFT;
1106
1107         if (!kctx->jctx.tb) {
1108                 KBASE_DEBUG_ASSERT(0 != size);
1109                 tb = vmalloc_user(size);
1110
1111                 if (NULL == tb) {
1112                         err = -ENOMEM;
1113                         goto out;
1114                 }
1115
1116                 kbase_device_trace_buffer_install(kctx, tb, size);
1117         } else {
1118                 err = -EINVAL;
1119                 goto out;
1120         }
1121
1122         *kaddr = kctx->jctx.tb;
1123
1124         new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
1125         if (!new_reg) {
1126                 err = -ENOMEM;
1127                 WARN_ON(1);
1128                 goto out_no_region;
1129         }
1130
1131         new_reg->alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB);
1132         if (IS_ERR_OR_NULL(new_reg->alloc)) {
1133                 err = -ENOMEM;
1134                 new_reg->alloc = NULL;
1135                 WARN_ON(1);
1136                 goto out_no_alloc;
1137         }
1138
1139         new_reg->alloc->imported.kctx = kctx;
1140         new_reg->flags &= ~KBASE_REG_FREE;
1141         new_reg->flags |= KBASE_REG_CPU_CACHED;
1142
1143         /* alloc now owns the tb */
1144         owns_tb = 0;
1145
1146         if (MALI_ERROR_NONE != kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1)) {
1147                 err = -ENOMEM;
1148                 WARN_ON(1);
1149                 goto out_no_va_region;
1150         }
1151
1152         *reg = new_reg;
1153
1154         /* map read only, noexec */
1155         vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
1156         /* the rest of the flags is added by the cpu_mmap handler */
1157
1158         dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
1159         return 0;
1160
1161 out_no_va_region:
1162 out_no_alloc:
1163         kbase_free_alloced_region(new_reg);
1164 out_no_region:
1165         if (owns_tb) {
1166                 kbase_device_trace_buffer_uninstall(kctx);
1167                 vfree(tb);
1168         }
1169 out:
1170         return err;
1171
1172 }
1173
1174 static int kbase_mmu_dump_mmap(kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
1175 {
1176         struct kbase_va_region *new_reg;
1177         void *kaddr;
1178         u32 nr_pages;
1179         size_t size;
1180         int err = 0;
1181
1182         dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
1183         size = (vma->vm_end - vma->vm_start);
1184         nr_pages = size >> PAGE_SHIFT;
1185
1186         kaddr = kbase_mmu_dump(kctx, nr_pages);
1187
1188         if (!kaddr) {
1189                 err = -ENOMEM;
1190                 goto out;
1191         }
1192
1193         new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
1194         if (!new_reg) {
1195                 err = -ENOMEM;
1196                 WARN_ON(1);
1197                 goto out;
1198         }
1199
1200         new_reg->alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW);
1201         if (IS_ERR_OR_NULL(new_reg->alloc)) {
1202                 err = -ENOMEM;
1203                 new_reg->alloc = NULL;
1204                 WARN_ON(1);
1205                 goto out_no_alloc;
1206         }
1207
1208         new_reg->flags &= ~KBASE_REG_FREE;
1209         new_reg->flags |= KBASE_REG_CPU_CACHED;
1210         if (MALI_ERROR_NONE != kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1)) {
1211                 err = -ENOMEM;
1212                 WARN_ON(1);
1213                 goto out_va_region;
1214         }
1215
1216         *kmap_addr = kaddr;
1217         *reg = new_reg;
1218
1219         dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
1220         return 0;
1221
1222 out_no_alloc:
1223 out_va_region:
1224         kbase_free_alloced_region(new_reg);
1225 out:
1226         return err;
1227 }
1228
1229
1230 void kbase_os_mem_map_lock(kbase_context *kctx)
1231 {
1232         struct mm_struct *mm = current->mm;
1233         (void)kctx;
1234         down_read(&mm->mmap_sem);
1235 }
1236
1237 void kbase_os_mem_map_unlock(kbase_context *kctx)
1238 {
1239         struct mm_struct *mm = current->mm;
1240         (void)kctx;
1241         up_read(&mm->mmap_sem);
1242 }
1243
1244 int kbase_mmap(struct file *file, struct vm_area_struct *vma)
1245 {
1246         kbase_context *kctx = file->private_data;
1247         struct kbase_va_region *reg;
1248         void *kaddr = NULL;
1249         size_t nr_pages;
1250         int err = 0;
1251         int free_on_close = 0;
1252         struct device *dev = kctx->kbdev->dev;
1253
1254         dev_dbg(dev, "kbase_mmap\n");
1255         nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1256
1257         /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
1258         vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4);
1259
1260         if (0 == nr_pages) {
1261                 err = -EINVAL;
1262                 goto out;
1263         }
1264
1265         if (!(vma->vm_flags & VM_SHARED)) {
1266                 err = -EINVAL;
1267                 goto out;
1268         }
1269
1270         kbase_gpu_vm_lock(kctx);
1271
1272         if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
1273                 /* The non-mapped tracking helper page */
1274                 err = kbase_tracking_page_setup(kctx, vma);
1275                 goto out_unlock;
1276         }
1277
1278         /* if not the MTP, verify that the MTP has been mapped */
1279         rcu_read_lock();
1280         /* catches both when the special page isn't present or
1281          * when we've forked */
1282         if (rcu_dereference(kctx->process_mm) != current->mm) {
1283                 err = -EINVAL;
1284                 rcu_read_unlock();
1285                 goto out_unlock;
1286         }
1287         rcu_read_unlock();
1288
1289         switch (vma->vm_pgoff) {
1290         case PFN_DOWN(BASE_MEM_INVALID_HANDLE):
1291         case PFN_DOWN(BASE_MEM_WRITE_ALLOC_PAGES_HANDLE):
1292                 /* Illegal handle for direct map */
1293                 err = -EINVAL;
1294                 goto out_unlock;
1295         case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE):
1296                 err = kbase_trace_buffer_mmap(kctx, vma, &reg, &kaddr);
1297                 if (0 != err)
1298                         goto out_unlock;
1299                 dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
1300                 /* free the region on munmap */
1301                 free_on_close = 1;
1302                 goto map;
1303         case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
1304                 /* MMU dump */
1305                 err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
1306                 if (0 != err)
1307                         goto out_unlock;
1308                 /* free the region on munmap */
1309                 free_on_close = 1;
1310                 goto map;
1311         case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
1312              PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
1313                 /* SAME_VA stuff, fetch the right region */
1314                 int gpu_pc_bits;
1315                 int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
1316                 gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
1317                 reg = kctx->pending_regions[cookie];
1318                 if (NULL != reg) {
1319                         size_t aligned_offset = 0;
1320
1321                         if (reg->flags & KBASE_REG_ALIGNED) {
1322                                 /* nr_pages must be able to hold alignment pages
1323                                  * plus actual pages */
1324                                 if (nr_pages != ((1UL << gpu_pc_bits >>
1325                                                         PAGE_SHIFT) +
1326                                                         reg->nr_pages)) {
1327                                         /* incorrect mmap size */
1328                                         /* leave the cookie for a potential
1329                                          * later mapping, or to be reclaimed
1330                                          * later when the context is freed */
1331                                         err = -ENOMEM;
1332                                         goto out_unlock;
1333                                 }
1334
1335                                 aligned_offset = (vma->vm_start +
1336                                                   (1UL << gpu_pc_bits) - 1) &
1337                                                  ~((1UL << gpu_pc_bits) - 1);
1338                                 aligned_offset -= vma->vm_start;
1339                         } else if (reg->nr_pages != nr_pages) {
1340                                 /* incorrect mmap size */
1341                                 /* leave the cookie for a potential later
1342                                  * mapping, or to be reclaimed later when the
1343                                  * context is freed */
1344                                 err = -ENOMEM;
1345                                 goto out_unlock;
1346                         }
1347
1348                         if ((vma->vm_flags & VM_READ &&
1349                              !(reg->flags & KBASE_REG_CPU_RD)) ||
1350                             (vma->vm_flags & VM_WRITE &&
1351                              !(reg->flags & KBASE_REG_CPU_WR))) {
1352                                 /* VM flags inconsistent with region flags */
1353                                 err = -EPERM;
1354                                 dev_err(dev, "%s:%d inconsistent VM flags\n",
1355                                         __FILE__, __LINE__);
1356                                 goto out_unlock;
1357                         }
1358
1359                         /* adjust down nr_pages to what we have physically */
1360                         nr_pages = kbase_reg_current_backed_size(reg);
1361
1362                         if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg,
1363                                                               vma->vm_start +
1364                                                               aligned_offset,
1365                                                               reg->nr_pages,
1366                                                               1)) {
1367                                 dev_err(dev, "%s:%d\n", __FILE__, __LINE__);
1368                                 /* Unable to map in GPU space. */
1369                                 WARN_ON(1);
1370                                 err = -ENOMEM;
1371                                 goto out_unlock;
1372                         }
1373
1374                         /* no need for the cookie anymore */
1375                         kctx->pending_regions[cookie] = NULL;
1376                         kctx->cookies |= (1UL << cookie);
1377
1378                         /*
1379                          * Overwrite the offset with the
1380                          * region start_pfn, so we effectively
1381                          * map from offset 0 in the region.
1382                          */
1383                         vma->vm_pgoff = reg->start_pfn;
1384
1385                         /* free the region on munmap */
1386                         free_on_close = 1;
1387                         goto map;
1388                 }
1389
1390                 err = -ENOMEM;
1391                 goto out_unlock;
1392         }
1393         default: {
1394                 reg = kbase_region_tracker_find_region_enclosing_address(kctx, (u64)vma->vm_pgoff << PAGE_SHIFT);
1395
1396                 if (reg && !(reg->flags & KBASE_REG_FREE)) {
1397                         /* will this mapping overflow the size of the region? */
1398                         if (nr_pages > (reg->nr_pages - (vma->vm_pgoff - reg->start_pfn)))
1399                                 goto overflow;
1400
1401                         if ((vma->vm_flags & VM_READ &&
1402                              !(reg->flags & KBASE_REG_CPU_RD)) ||
1403                             (vma->vm_flags & VM_WRITE &&
1404                              !(reg->flags & KBASE_REG_CPU_WR))) {
1405                                 /* VM flags inconsistent with region flags */
1406                                 err = -EPERM;
1407                                 printk(KERN_ERR "%s:%d inconsistent VM flags\n",
1408                                         __FILE__, __LINE__);
1409                                 goto out_unlock;
1410                         }
1411
1412 #ifdef CONFIG_DMA_SHARED_BUFFER
1413                         if (reg->alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM)
1414                                 goto dma_map;
1415 #endif /* CONFIG_DMA_SHARED_BUFFER */
1416
1417                         /* limit what we map to the amount currently backed */
1418                         if (reg->alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) {
1419                                 if ((vma->vm_pgoff - reg->start_pfn) >= reg->alloc->nents)
1420                                         nr_pages = 0;
1421                                 else
1422                                         nr_pages = reg->alloc->nents - (vma->vm_pgoff - reg->start_pfn);
1423                         }
1424
1425                         goto map;
1426                 }
1427
1428 overflow:
1429                 err = -ENOMEM;
1430                 goto out_unlock;
1431         } /* default */
1432         } /* switch */
1433 map:
1434         err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, free_on_close);
1435
1436         if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
1437                 /* MMU dump - userspace should now have a reference on
1438                  * the pages, so we can now free the kernel mapping */
1439                 vfree(kaddr);
1440         }
1441         goto out_unlock;
1442
1443 #ifdef CONFIG_DMA_SHARED_BUFFER
1444 dma_map:
1445         err = dma_buf_mmap(reg->alloc->imported.umm.dma_buf, vma, vma->vm_pgoff - reg->start_pfn);
1446 #endif                          /* CONFIG_DMA_SHARED_BUFFER */
1447 out_unlock:
1448         kbase_gpu_vm_unlock(kctx);
1449 out:
1450         if (err)
1451                 dev_err(dev, "mmap failed %d\n", err);
1452
1453         return err;
1454 }
1455
1456 KBASE_EXPORT_TEST_API(kbase_mmap)
1457
1458 void kbasep_os_process_page_usage_update( kbase_context *kctx, int pages )
1459 {
1460         struct mm_struct *mm;
1461
1462         rcu_read_lock();
1463         mm = rcu_dereference(kctx->process_mm);
1464         if (mm)
1465         {
1466                 atomic_add(pages, &kctx->nonmapped_pages);
1467 #ifdef SPLIT_RSS_COUNTING
1468                 add_mm_counter(mm, MM_FILEPAGES, pages);
1469 #else
1470                 spin_lock(&mm->page_table_lock);
1471                 add_mm_counter(mm, MM_FILEPAGES, pages);
1472                 spin_unlock(&mm->page_table_lock);
1473 #endif
1474         }
1475         rcu_read_unlock();
1476 }
1477
1478 static void kbasep_os_process_page_usage_drain(kbase_context * kctx)
1479 {
1480         int pages;
1481         struct mm_struct * mm;
1482
1483         spin_lock(&kctx->mm_update_lock);
1484         mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
1485         if (!mm)
1486         {
1487                 spin_unlock(&kctx->mm_update_lock);
1488                 return;
1489         }
1490
1491         rcu_assign_pointer(kctx->process_mm, NULL);
1492         spin_unlock(&kctx->mm_update_lock);
1493         synchronize_rcu();
1494
1495         pages = atomic_xchg(&kctx->nonmapped_pages, 0);
1496 #ifdef SPLIT_RSS_COUNTING
1497         add_mm_counter(mm, MM_FILEPAGES, -pages);
1498 #else
1499         spin_lock(&mm->page_table_lock);
1500         add_mm_counter(mm, MM_FILEPAGES, -pages);
1501         spin_unlock(&mm->page_table_lock);
1502 #endif
1503 }
1504
1505 static void kbase_special_vm_close(struct vm_area_struct *vma)
1506 {
1507         kbase_context * kctx;
1508         kctx = vma->vm_private_data;
1509         kbasep_os_process_page_usage_drain(kctx);
1510 }
1511
1512 static const struct vm_operations_struct kbase_vm_special_ops = {
1513         .close = kbase_special_vm_close,
1514 };
1515
1516 static int kbase_tracking_page_setup(struct kbase_context * kctx, struct vm_area_struct * vma)
1517 {
1518         /* check that this is the only tracking page */
1519         spin_lock(&kctx->mm_update_lock);
1520         if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock)))
1521         {
1522                 spin_unlock(&kctx->mm_update_lock);
1523                 return -EFAULT;
1524         }
1525
1526         rcu_assign_pointer(kctx->process_mm, current->mm);
1527
1528         spin_unlock(&kctx->mm_update_lock);
1529
1530         /* no real access */
1531         vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
1532 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
1533         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
1534 #else
1535         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
1536 #endif
1537         vma->vm_ops = &kbase_vm_special_ops;
1538         vma->vm_private_data = kctx;
1539
1540         return 0;
1541 }
1542 void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handle)
1543 {
1544         int i;
1545         int res;
1546         void *va;
1547         dma_addr_t  dma_pa;
1548         struct kbase_va_region *reg;
1549         phys_addr_t *page_array;
1550 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1551         DEFINE_DMA_ATTRS(attrs);
1552 #endif
1553
1554         u32 pages = ((size - 1) >> PAGE_SHIFT) + 1;
1555         u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR |
1556                     BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR;
1557
1558         KBASE_DEBUG_ASSERT(kctx != NULL);
1559         KBASE_DEBUG_ASSERT(0 != size);
1560         KBASE_DEBUG_ASSERT(0 != pages);
1561
1562         if (size == 0)
1563                 goto err;
1564
1565 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1566         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
1567         va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
1568 #else
1569         va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL);
1570 #endif
1571         if (!va)
1572                 goto err;
1573
1574         memset(va, 0x0, size);
1575
1576         /* Store the state so we can free it later. */
1577         handle->cpu_va = va;
1578         handle->dma_pa = dma_pa;
1579         handle->size   = size;
1580
1581
1582         reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA);
1583         if (!reg)
1584                 goto no_reg;
1585
1586         reg->flags &= ~KBASE_REG_FREE;
1587         kbase_update_region_flags(reg, flags);
1588
1589         reg->alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW);
1590         if (IS_ERR_OR_NULL(reg->alloc))
1591                 goto no_alloc;
1592
1593         page_array = kbase_get_phy_pages(reg);
1594
1595         for (i = 0; i < pages; i++) {
1596                 page_array[i] = dma_pa + (i << PAGE_SHIFT);
1597         }
1598
1599         reg->alloc->nents = pages;
1600
1601         kbase_gpu_vm_lock(kctx);
1602         res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1);
1603         kbase_gpu_vm_unlock(kctx);
1604         if (res)
1605                 goto no_mmap;
1606
1607         return va;
1608
1609 no_mmap:
1610         kbase_mem_phy_alloc_put(reg->alloc);
1611 no_alloc:
1612         kfree(reg);
1613 no_reg:
1614 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1615         dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
1616 #else
1617         dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
1618 #endif
1619 err:
1620         return NULL;
1621 }
1622 KBASE_EXPORT_SYMBOL(kbase_va_alloc);
1623
1624 void kbase_va_free(kbase_context *kctx, kbase_hwc_dma_mapping *handle)
1625 {
1626         struct kbase_va_region *reg;
1627         mali_error err;
1628 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1629         DEFINE_DMA_ATTRS(attrs);
1630 #endif
1631
1632         KBASE_DEBUG_ASSERT(kctx != NULL);
1633         KBASE_DEBUG_ASSERT(handle->cpu_va != NULL);
1634
1635         kbase_gpu_vm_lock(kctx);
1636         reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va);
1637         KBASE_DEBUG_ASSERT(reg);
1638         err = kbase_gpu_munmap(kctx, reg);
1639         kbase_gpu_vm_unlock(kctx);
1640         KBASE_DEBUG_ASSERT(err == MALI_ERROR_NONE);
1641
1642         kbase_mem_phy_alloc_put(reg->alloc);
1643         kfree(reg);
1644
1645 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
1646         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
1647         dma_free_attrs(kctx->kbdev->dev, handle->size,
1648                         handle->cpu_va, handle->dma_pa, &attrs);
1649 #else
1650         dma_free_writecombine(kctx->kbdev->dev, handle->size,
1651                                 handle->cpu_va, handle->dma_pa);
1652 #endif
1653 }
1654 KBASE_EXPORT_SYMBOL(kbase_va_free);
1655