RK3368 GPU version Rogue M 1.28
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / rogue_m / services / server / env / linux / mmap.c
1 /*************************************************************************/ /*!
2 @File
3 @Title          Linux mmap interface
4 @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @License        Dual MIT/GPLv2
6
7 The contents of this file are subject to the MIT license as set out below.
8
9 Permission is hereby granted, free of charge, to any person obtaining a copy
10 of this software and associated documentation files (the "Software"), to deal
11 in the Software without restriction, including without limitation the rights
12 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 copies of the Software, and to permit persons to whom the Software is
14 furnished to do so, subject to the following conditions:
15
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
18
19 Alternatively, the contents of this file may be used under the terms of
20 the GNU General Public License Version 2 ("GPL") in which case the provisions
21 of GPL are applicable instead of those above.
22
23 If you wish to allow use of your version of this file only under the terms of
24 GPL, and not to allow others to use your version of this file under the terms
25 of the MIT license, indicate your decision by deleting the provisions above
26 and replace them with the notice and other provisions required by GPL as set
27 out in the file called "GPL-COPYING" included in this distribution. If you do
28 not delete the provisions above, a recipient may use your version of this file
29 under the terms of either the MIT license or GPL.
30
31 This License is also included in this distribution in the file called
32 "MIT-COPYING".
33
34 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
35 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
36 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
37 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
38 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
39 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */ /**************************************************************************/
42 #include <linux/version.h>
43 #include <asm/io.h>
44 #include <linux/mm.h>
45 #include <asm/page.h>
46
47 #include "img_defs.h"
48 #include "pvr_debug.h"
49 #include "linkage.h"
50 #include "handle.h"
51 #include "pvrsrv.h"
52 #include "connection_server.h"
53 #include "devicemem_server_utils.h"
54 #include "allocmem.h"
55
56 #include "private_data.h"
57 #include "driverlock.h"
58
59 #if defined(SUPPORT_DRM)
60 #include "pvr_drm.h"
61 #endif
62
63 /* WARNING!
64  * The mmap code has its own mutex, to prevent a possible deadlock,
65  * when using gPVRSRVLock.
66  * The Linux kernel takes the mm->mmap_sem before calling the mmap
67  * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
68  * entry point may take mm->mmap_sem during fault handling, or 
69  * before calling get_user_pages.  If gPVRSRVLock was used in the
70  * mmap entry points, a deadlock could result, due to the ioctl
71  * and mmap code taking the two locks in different orders.
72  * As a corollary to this, the mmap entry points must not call
73  * any driver code that relies on gPVRSRVLock is held.
74  */
75 static DEFINE_MUTEX(g_sMMapMutex);
76
77 #include "pmr.h"
78
79 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
80 #include "process_stats.h"
81 #endif
82
83 /*
84  * x86_32:
85  * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
86  * pages with default memory attributes; these HIGHMEM pages are skipped in
87  * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
88  * Also vm_insert_page is faster.
89  *
90  * x86_64:
91  * Use vm_insert_page because it is faster.
92  *
93  * Other platforms:
94  * Use remap_pfn_range by default because it does not issue a cache flush.
95  * It is known that ARM32 benefits from this. When other platforms become
96  * available it has to be investigated if this asumption holds for them as well.
97  *
98  * Since vm_insert_page does more precise memory accounting we have the build
99  * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
100  * feature.
101  *
102  */
103 #if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
104 #define MMAP_USE_VM_INSERT_PAGE 1
105 #endif
106
107 static void MMapPMROpen(struct vm_area_struct* ps_vma)
108 {
109         PMR *psPMR = ps_vma->vm_private_data;
110
111         /* Our VM flags should ensure this function never gets called */
112         PVR_DPF((PVR_DBG_WARNING,
113                          "%s: Unexpected mmap open call, this is probably an application bug.",
114                          __func__));
115         PVR_DPF((PVR_DBG_WARNING,
116                          "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
117                          __func__,
118                          ps_vma,
119                          ps_vma->vm_start,
120                          ps_vma->vm_end - ps_vma->vm_start,
121                          psPMR));
122
123         /* In case we get called anyway let's do things right by increasing the refcount and
124          * locking down the physical addresses. */
125         PMRRefPMR(psPMR);
126
127         if (PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT) != PVRSRV_OK)
128         {
129                 PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
130                 PMRUnrefPMR(psPMR);
131         }
132 }
133
134 static void MMapPMRClose(struct vm_area_struct *ps_vma)
135 {
136     PMR *psPMR;
137     IMG_UINTPTR_T  vAddr = ps_vma->vm_start;
138     IMG_SIZE_T pageSize = OSGetPageSize();
139
140     psPMR = ps_vma->vm_private_data;
141
142     while (vAddr < ps_vma->vm_end)
143     {
144 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
145     /* USER MAPPING */
146 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
147     PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
148 #else
149         PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT64)vAddr);
150 #endif
151 #endif
152         vAddr += pageSize;
153     }
154
155     PMRUnlockSysPhysAddresses(psPMR);
156     PMRUnrefPMR(psPMR);
157 }
158
159 /*
160  * This vma operation is used to read data from mmap regions. It is called
161  * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
162  * requests and reads from /proc/<pid>/mem.
163  */
164 static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
165                                            void *buf, int len, int write)
166 {
167     PMR *psPMR;
168     unsigned long ulOffset;
169     IMG_SIZE_T uiBytesCopied;
170     PVRSRV_ERROR eError;
171     int iRetVal = -EINVAL;
172
173     psPMR = ps_vma->vm_private_data;
174
175     ulOffset = addr - ps_vma->vm_start;
176
177         if (write)
178         {
179                 eError = PMR_WriteBytes(psPMR,
180                                                                 (IMG_DEVMEM_OFFSET_T) ulOffset,
181                                                                 buf,
182                                                                 len,
183                                                                 &uiBytesCopied);
184         }
185         else
186         {
187                 eError = PMR_ReadBytes(psPMR,
188                                                            (IMG_DEVMEM_OFFSET_T) ulOffset,
189                                                            buf,
190                                                            len,
191                                                            &uiBytesCopied);
192         }
193
194         if (eError != PVRSRV_OK)
195         {
196                 PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
197                                  __FUNCTION__,
198                                  write?"PMR_WriteBytes":"PMR_ReadBytes",
199                                  eError));
200         }
201         else
202         {
203                 iRetVal = uiBytesCopied;
204         }
205
206         return iRetVal;
207 }
208
209 static struct vm_operations_struct gsMMapOps =
210 {
211         .open=&MMapPMROpen,
212         .close=&MMapPMRClose,
213         .access=MMapVAccess,
214 };
215
216 int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma)
217 {
218         PVRSRV_ERROR eError;
219         IMG_HANDLE hSecurePMRHandle;
220         IMG_SIZE_T uiLength;
221         IMG_DEVMEM_OFFSET_T uiOffset;
222         unsigned long uiPFN;
223         PMR *psPMR;
224         PMR_FLAGS_T ulPMRFlags;
225         IMG_UINT32 ui32CPUCacheFlags;
226         unsigned long ulNewFlags = 0;
227         pgprot_t sPageProt;
228         CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
229     IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
230     IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
231         IMG_UINT32 uiOffsetIdx, uiNumOfPFNs;
232         IMG_CPU_PHYADDR *psCpuPAddr;
233         IMG_BOOL *pbValid;
234 #if defined(MMAP_USE_VM_INSERT_PAGE)
235         IMG_BOOL bMixedMap = IMG_FALSE;
236 #endif
237
238         if(psConnection == IMG_NULL)
239         {
240                 PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
241                 goto em0;
242         }
243
244         /*
245          * The bridge lock used here to protect Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr
246          * is replaced by a specific lock considering that the handle functions have now their own lock
247          * and ResManFindPrivateDataByPtr is going to be removed.
248          *  This change was necessary to solve the lockdep issues related with the MMapPMR
249          */
250         mutex_lock(&g_sMMapMutex);
251         PMRLock();
252
253 #if defined(SUPPORT_DRM_DC_MODULE)
254         psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma);
255         if (!psPMR)
256 #endif
257         {
258                 hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);
259
260                 eError = PVRSRVLookupHandle(psConnection->psHandleBase,
261                                             (void **)&psPMR,
262                                             hSecurePMRHandle,
263                                             PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
264                 if (eError != PVRSRV_OK)
265                 {
266                         goto e0;
267                 }
268         }
269
270         /*
271          * Take a reference on the PMR, make's sure that it can't be freed
272          * while it's mapped into the user process
273          */
274         PMRRefPMR(psPMR);
275
276         PMRUnlock();
277
278         eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
279         if (eError != PVRSRV_OK)
280         {
281                 goto e1;
282         }
283
284         if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
285             ((ps_vma->vm_flags & VM_SHARED) == 0))
286         {
287                 eError = PVRSRV_ERROR_INVALID_PARAMS;
288                 goto e1;
289         }
290
291         /*
292          * We ought to call PMR_Flags() here to check the permissions
293          * against the requested mode, and possibly to set up the cache
294          * control protflags
295          */
296         eError = PMR_Flags(psPMR, &ulPMRFlags);
297         if (eError != PVRSRV_OK)
298         {
299                 goto e1;
300         }
301
302         ulNewFlags = ps_vma->vm_flags;
303 #if 0
304         /* Discard user read/write request, we will pull these flags from the PMR */
305         ulNewFlags &= ~(VM_READ | VM_WRITE);
306
307         if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
308         {
309                 ulNewFlags |= VM_READ;
310         }
311         if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
312         {
313                 ulNewFlags |= VM_WRITE;
314         }
315 #endif
316
317         ps_vma->vm_flags = ulNewFlags;
318
319 #if defined (CONFIG_ARM64)
320         sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags));
321 #elif defined(CONFIG_ARM)
322         sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
323 #elif defined(CONFIG_X86)
324         sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags));
325 #elif defined(CONFIG_METAG) || defined(CONFIG_MIPS)
326         sPageProt = vm_get_page_prot(ulNewFlags);
327 #else
328 #error Please add pgprot_modify equivalent for your system
329 #endif
330         ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
331         switch (ui32CPUCacheFlags)
332         {
333                 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
334                                 sPageProt = pgprot_noncached(sPageProt);
335                                 break;
336
337                 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
338                                 sPageProt = pgprot_writecombine(sPageProt);
339                                 break;
340
341                 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
342                                 break;
343
344                 default:
345                                 eError = PVRSRV_ERROR_INVALID_PARAMS;
346                                 goto e1;
347         }
348         ps_vma->vm_page_prot = sPageProt;
349
350     uiLength = ps_vma->vm_end - ps_vma->vm_start;
351
352     ps_vma->vm_flags |= VM_IO;
353
354 /* Don't include the mapping in core dumps */
355 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
356     ps_vma->vm_flags |= VM_DONTDUMP;
357 #else
358     ps_vma->vm_flags |= VM_RESERVED;
359 #endif
360
361     /*
362      * Disable mremap because our nopage handler assumes all
363      * page requests have already been validated.
364      */
365     ps_vma->vm_flags |= VM_DONTEXPAND;
366     
367     /* Don't allow mapping to be inherited across a process fork */
368     ps_vma->vm_flags |= VM_DONTCOPY;
369
370     /* Can we use stack allocations */
371     uiNumOfPFNs = uiLength >> PAGE_SHIFT;
372     if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
373     {
374         psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(IMG_CPU_PHYADDR));
375         if (psCpuPAddr == IMG_NULL)
376         {
377                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
378                 goto e2;
379         }
380         
381         /* Should allocation fail, clean-up here before exiting */
382         pbValid = OSAllocMem(uiNumOfPFNs * sizeof(IMG_BOOL));
383         if (pbValid == IMG_NULL)
384         {
385                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
386                 OSFreeMem(psCpuPAddr);
387                 goto e2;
388         }
389     }
390     else
391     {
392                 psCpuPAddr = asCpuPAddr;
393                 pbValid = abValid;
394     }
395     
396     /* Obtain map range pfns */
397         eError = PMR_CpuPhysAddr(psPMR,
398                                                          PAGE_SHIFT,
399                                                          uiNumOfPFNs,
400                                                          0,
401                                                          psCpuPAddr,
402                                                          pbValid);
403         if (eError)
404         {
405                 goto e3;
406         }
407
408 #if defined(MMAP_USE_VM_INSERT_PAGE)
409         /*
410          * Scan the map range for pfns without struct page* handling. If
411          * we find one, this is a mixed map, and we can't use
412          * vm_insert_page()
413          */
414         for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
415         {
416                 if (pbValid[uiOffsetIdx])
417                 {
418                         uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
419                         PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
420
421                         if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
422                         {
423                                 bMixedMap = IMG_TRUE;
424                                 break;
425                         }
426                 }
427         }
428
429         if (bMixedMap)
430         {
431                 ps_vma->vm_flags |= VM_MIXEDMAP;
432         }
433 #else
434         ps_vma->vm_flags |= VM_PFNMAP;
435 #endif /* MMAP_USE_VM_INSERT_PAGE */
436
437     for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
438     {
439         IMG_SIZE_T uiNumContiguousBytes;
440         IMG_INT32 iStatus;
441
442         uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
443         uiOffsetIdx = uiOffset >> PAGE_SHIFT;
444
445                 /*
446                         Only map in pages that are valid, any that aren't will be picked up
447                         by the nopage handler which will return a zeroed page for us
448                 */
449                 if (pbValid[uiOffsetIdx])
450                 {
451                 uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
452                 PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
453
454 #if defined(MMAP_USE_VM_INSERT_PAGE)
455                         if (bMixedMap)
456                         {
457                                 /*
458                                  * This path is just for debugging. It should be
459                                  * equivalent to the remap_pfn_range() path.
460                                  */
461                                 iStatus = vm_insert_mixed(ps_vma,
462                                                           ps_vma->vm_start + uiOffset,
463                                                           uiPFN);
464                         }
465                         else
466                         {
467                                 /* Since kernel 3.7 this sets VM_MIXEDMAP internally */
468                                 iStatus = vm_insert_page(ps_vma,
469                                                          ps_vma->vm_start + uiOffset,
470                                                          pfn_to_page(uiPFN));
471                         }
472 #else
473                         iStatus = remap_pfn_range(ps_vma,
474                                                   ps_vma->vm_start + uiOffset,
475                                                   uiPFN,
476                                                   uiNumContiguousBytes,
477                                                   ps_vma->vm_page_prot);
478 #endif  /* MMAP_USE_VM_INSERT_PAGE */
479
480                 PVR_ASSERT(iStatus == 0);
481                 if(iStatus)
482                 {
483                     // N.B. not the right error code, but, it doesn't get propagated anyway... :(
484                     eError = PVRSRV_ERROR_OUT_OF_MEMORY;
485         
486                     goto e3;
487                 }
488
489 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
490     /* USER MAPPING*/
491 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
492             PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
493 #else
494         PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
495                                                          (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset),
496                                                          psCpuPAddr[uiOffsetIdx],
497                                                                  PAGE_SIZE,
498                                                                  IMG_NULL);
499 #endif
500 #endif
501
502                 }
503         (void)pFile;
504     }
505     
506     if (psCpuPAddr != asCpuPAddr)
507     {
508         OSFreeMem(psCpuPAddr);
509         OSFreeMem(pbValid);
510     }
511
512     /* let us see the PMR so we can unlock it later */
513     ps_vma->vm_private_data = psPMR;
514
515     /* Install open and close handlers for ref-counting */
516     ps_vma->vm_ops = &gsMMapOps;
517
518         mutex_unlock(&g_sMMapMutex);
519
520     return 0;
521
522     /*
523       error exit paths follow
524     */
525  e3:
526         if (psCpuPAddr != asCpuPAddr)
527         {
528                 OSFreeMem(psCpuPAddr);
529                 OSFreeMem(pbValid);
530         }
531  e2:
532     PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
533     PMRUnlockSysPhysAddresses(psPMR);
534  e1:
535         PMRUnrefPMR(psPMR);
536         goto em1;
537  e0:
538     PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section"));
539         PMRUnlock();
540  em1:
541     PVR_ASSERT(eError != PVRSRV_OK);
542     PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
543         mutex_unlock(&g_sMMapMutex);
544  em0:
545     return -ENOENT; // -EAGAIN // or what?
546 }