1 /*************************************************************************/ /*!
3 @Title Linux mmap interface
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @License Dual MIT/GPLv2
7 The contents of this file are subject to the MIT license as set out below.
9 Permission is hereby granted, free of charge, to any person obtaining a copy
10 of this software and associated documentation files (the "Software"), to deal
11 in the Software without restriction, including without limitation the rights
12 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 copies of the Software, and to permit persons to whom the Software is
14 furnished to do so, subject to the following conditions:
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
19 Alternatively, the contents of this file may be used under the terms of
20 the GNU General Public License Version 2 ("GPL") in which case the provisions
21 of GPL are applicable instead of those above.
23 If you wish to allow use of your version of this file only under the terms of
24 GPL, and not to allow others to use your version of this file under the terms
25 of the MIT license, indicate your decision by deleting the provisions above
26 and replace them with the notice and other provisions required by GPL as set
27 out in the file called "GPL-COPYING" included in this distribution. If you do
28 not delete the provisions above, a recipient may use your version of this file
29 under the terms of either the MIT license or GPL.
31 This License is also included in this distribution in the file called
34 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
35 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
36 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
37 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
38 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
39 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */ /**************************************************************************/
42 #include <linux/version.h>
48 #include "pvr_debug.h"
52 #include "connection_server.h"
53 #include "devicemem_server_utils.h"
56 #include "private_data.h"
57 #include "driverlock.h"
59 #if defined(SUPPORT_DRM)
64 * The mmap code has its own mutex, to prevent a possible deadlock,
65 * when using gPVRSRVLock.
66 * The Linux kernel takes the mm->mmap_sem before calling the mmap
67 * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
68 * entry point may take mm->mmap_sem during fault handling, or
69 * before calling get_user_pages. If gPVRSRVLock was used in the
70 * mmap entry points, a deadlock could result, due to the ioctl
71 * and mmap code taking the two locks in different orders.
72 * As a corollary to this, the mmap entry points must not call
73 * any driver code that relies on gPVRSRVLock is held.
75 static DEFINE_MUTEX(g_sMMapMutex);
79 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
80 #include "process_stats.h"
85 * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
86 * pages with default memory attributes; these HIGHMEM pages are skipped in
87 * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
88 * Also vm_insert_page is faster.
91 * Use vm_insert_page because it is faster.
94 * Use remap_pfn_range by default because it does not issue a cache flush.
95 * It is known that ARM32 benefits from this. When other platforms become
96 * available it has to be investigated if this asumption holds for them as well.
98 * Since vm_insert_page does more precise memory accounting we have the build
99 * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
103 #if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
104 #define MMAP_USE_VM_INSERT_PAGE 1
107 static void MMapPMROpen(struct vm_area_struct* ps_vma)
109 PMR *psPMR = ps_vma->vm_private_data;
111 /* Our VM flags should ensure this function never gets called */
112 PVR_DPF((PVR_DBG_WARNING,
113 "%s: Unexpected mmap open call, this is probably an application bug.",
115 PVR_DPF((PVR_DBG_WARNING,
116 "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
120 ps_vma->vm_end - ps_vma->vm_start,
123 /* In case we get called anyway let's do things right by increasing the refcount and
124 * locking down the physical addresses. */
127 if (PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT) != PVRSRV_OK)
129 PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
134 static void MMapPMRClose(struct vm_area_struct *ps_vma)
137 IMG_UINTPTR_T vAddr = ps_vma->vm_start;
138 IMG_SIZE_T pageSize = OSGetPageSize();
140 psPMR = ps_vma->vm_private_data;
142 while (vAddr < ps_vma->vm_end)
144 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
146 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
147 PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
149 PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT64)vAddr);
155 PMRUnlockSysPhysAddresses(psPMR);
160 * This vma operation is used to read data from mmap regions. It is called
161 * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
162 * requests and reads from /proc/<pid>/mem.
164 static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
165 void *buf, int len, int write)
168 unsigned long ulOffset;
169 IMG_SIZE_T uiBytesCopied;
171 int iRetVal = -EINVAL;
173 psPMR = ps_vma->vm_private_data;
175 ulOffset = addr - ps_vma->vm_start;
179 eError = PMR_WriteBytes(psPMR,
180 (IMG_DEVMEM_OFFSET_T) ulOffset,
187 eError = PMR_ReadBytes(psPMR,
188 (IMG_DEVMEM_OFFSET_T) ulOffset,
194 if (eError != PVRSRV_OK)
196 PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
198 write?"PMR_WriteBytes":"PMR_ReadBytes",
203 iRetVal = uiBytesCopied;
209 static struct vm_operations_struct gsMMapOps =
212 .close=&MMapPMRClose,
216 int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma)
219 IMG_HANDLE hSecurePMRHandle;
221 IMG_DEVMEM_OFFSET_T uiOffset;
224 PMR_FLAGS_T ulPMRFlags;
225 IMG_UINT32 ui32CPUCacheFlags;
226 unsigned long ulNewFlags = 0;
228 CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
229 IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
230 IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
231 IMG_UINT32 uiOffsetIdx, uiNumOfPFNs;
232 IMG_CPU_PHYADDR *psCpuPAddr;
234 #if defined(MMAP_USE_VM_INSERT_PAGE)
235 IMG_BOOL bMixedMap = IMG_FALSE;
238 if(psConnection == IMG_NULL)
240 PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
245 * The bridge lock used here to protect Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr
246 * is replaced by a specific lock considering that the handle functions have now their own lock
247 * and ResManFindPrivateDataByPtr is going to be removed.
248 * This change was necessary to solve the lockdep issues related with the MMapPMR
250 mutex_lock(&g_sMMapMutex);
253 #if defined(SUPPORT_DRM_DC_MODULE)
254 psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma);
258 hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);
260 eError = PVRSRVLookupHandle(psConnection->psHandleBase,
263 PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
264 if (eError != PVRSRV_OK)
271 * Take a reference on the PMR, make's sure that it can't be freed
272 * while it's mapped into the user process
278 eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
279 if (eError != PVRSRV_OK)
284 if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
285 ((ps_vma->vm_flags & VM_SHARED) == 0))
287 eError = PVRSRV_ERROR_INVALID_PARAMS;
292 * We ought to call PMR_Flags() here to check the permissions
293 * against the requested mode, and possibly to set up the cache
296 eError = PMR_Flags(psPMR, &ulPMRFlags);
297 if (eError != PVRSRV_OK)
302 ulNewFlags = ps_vma->vm_flags;
304 /* Discard user read/write request, we will pull these flags from the PMR */
305 ulNewFlags &= ~(VM_READ | VM_WRITE);
307 if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
309 ulNewFlags |= VM_READ;
311 if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
313 ulNewFlags |= VM_WRITE;
317 ps_vma->vm_flags = ulNewFlags;
319 #if defined (CONFIG_ARM64)
320 sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags));
321 #elif defined(CONFIG_ARM)
322 sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
323 #elif defined(CONFIG_X86)
324 sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags));
325 #elif defined(CONFIG_METAG) || defined(CONFIG_MIPS)
326 sPageProt = vm_get_page_prot(ulNewFlags);
328 #error Please add pgprot_modify equivalent for your system
330 ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
331 switch (ui32CPUCacheFlags)
333 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
334 sPageProt = pgprot_noncached(sPageProt);
337 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
338 sPageProt = pgprot_writecombine(sPageProt);
341 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
345 eError = PVRSRV_ERROR_INVALID_PARAMS;
348 ps_vma->vm_page_prot = sPageProt;
350 uiLength = ps_vma->vm_end - ps_vma->vm_start;
352 ps_vma->vm_flags |= VM_IO;
354 /* Don't include the mapping in core dumps */
355 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
356 ps_vma->vm_flags |= VM_DONTDUMP;
358 ps_vma->vm_flags |= VM_RESERVED;
362 * Disable mremap because our nopage handler assumes all
363 * page requests have already been validated.
365 ps_vma->vm_flags |= VM_DONTEXPAND;
367 /* Don't allow mapping to be inherited across a process fork */
368 ps_vma->vm_flags |= VM_DONTCOPY;
370 /* Can we use stack allocations */
371 uiNumOfPFNs = uiLength >> PAGE_SHIFT;
372 if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
374 psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(IMG_CPU_PHYADDR));
375 if (psCpuPAddr == IMG_NULL)
377 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
381 /* Should allocation fail, clean-up here before exiting */
382 pbValid = OSAllocMem(uiNumOfPFNs * sizeof(IMG_BOOL));
383 if (pbValid == IMG_NULL)
385 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
386 OSFreeMem(psCpuPAddr);
392 psCpuPAddr = asCpuPAddr;
396 /* Obtain map range pfns */
397 eError = PMR_CpuPhysAddr(psPMR,
408 #if defined(MMAP_USE_VM_INSERT_PAGE)
410 * Scan the map range for pfns without struct page* handling. If
411 * we find one, this is a mixed map, and we can't use
414 for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
416 if (pbValid[uiOffsetIdx])
418 uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
419 PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
421 if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
423 bMixedMap = IMG_TRUE;
431 ps_vma->vm_flags |= VM_MIXEDMAP;
434 ps_vma->vm_flags |= VM_PFNMAP;
435 #endif /* MMAP_USE_VM_INSERT_PAGE */
437 for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
439 IMG_SIZE_T uiNumContiguousBytes;
442 uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
443 uiOffsetIdx = uiOffset >> PAGE_SHIFT;
446 Only map in pages that are valid, any that aren't will be picked up
447 by the nopage handler which will return a zeroed page for us
449 if (pbValid[uiOffsetIdx])
451 uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
452 PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
454 #if defined(MMAP_USE_VM_INSERT_PAGE)
458 * This path is just for debugging. It should be
459 * equivalent to the remap_pfn_range() path.
461 iStatus = vm_insert_mixed(ps_vma,
462 ps_vma->vm_start + uiOffset,
467 /* Since kernel 3.7 this sets VM_MIXEDMAP internally */
468 iStatus = vm_insert_page(ps_vma,
469 ps_vma->vm_start + uiOffset,
473 iStatus = remap_pfn_range(ps_vma,
474 ps_vma->vm_start + uiOffset,
476 uiNumContiguousBytes,
477 ps_vma->vm_page_prot);
478 #endif /* MMAP_USE_VM_INSERT_PAGE */
480 PVR_ASSERT(iStatus == 0);
483 // N.B. not the right error code, but, it doesn't get propagated anyway... :(
484 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
489 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
491 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
492 PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
494 PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
495 (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset),
496 psCpuPAddr[uiOffsetIdx],
506 if (psCpuPAddr != asCpuPAddr)
508 OSFreeMem(psCpuPAddr);
512 /* let us see the PMR so we can unlock it later */
513 ps_vma->vm_private_data = psPMR;
515 /* Install open and close handlers for ref-counting */
516 ps_vma->vm_ops = &gsMMapOps;
518 mutex_unlock(&g_sMMapMutex);
523 error exit paths follow
526 if (psCpuPAddr != asCpuPAddr)
528 OSFreeMem(psCpuPAddr);
532 PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!"));
533 PMRUnlockSysPhysAddresses(psPMR);
538 PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section"));
541 PVR_ASSERT(eError != PVRSRV_OK);
542 PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
543 mutex_unlock(&g_sMMapMutex);
545 return -ENOENT; // -EAGAIN // or what?