1 /*************************************************************************/ /*!
3 @Title dmabuf memory allocator
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Part of the memory management. This module is responsible for
6 implementing the function callbacks for dmabuf memory.
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
45 #if defined(SUPPORT_DRM)
49 #if !defined(SUPPORT_DRM) || defined(PVR_DRM_USE_PRIME)
51 #include "img_types.h"
52 #include "pvr_debug.h"
53 #include "pvrsrv_error.h"
54 #include "pvrsrv_memallocflags.h"
59 #include "pdump_physmem.h"
62 #include "physmem_dmabuf.h"
64 #if defined(SUPPORT_ION)
69 #if defined(PVR_RI_DEBUG)
70 #include "ri_server.h"
73 #include <linux/err.h>
74 #include <linux/slab.h>
75 #include <linux/dma-buf.h>
76 #include <linux/scatterlist.h>
79 #include <linux/pci.h>
80 #elif defined(LDM_PLATFORM)
81 #include <linux/platform_device.h>
83 #error Either LDM_PCI or LDM_PLATFORM must be defined
86 typedef struct _PMR_DMA_BUF_DATA_
88 /* Filled in at PMR create time */
89 PHYS_HEAP *psPhysHeap;
90 struct dma_buf_attachment *psAttachment;
91 PFN_DESTROY_DMABUF_PMR pfnDestroy;
92 IMG_BOOL bPoisonOnFree;
93 IMG_HANDLE hPDumpAllocInfo;
95 /* Modified by PMR lock/unlock */
96 struct sg_table *psSgTable;
97 IMG_DEV_PHYADDR *pasDevPhysAddr;
98 IMG_UINT32 ui32PageCount;
101 /* Start size of the g_psDmaBufHash hash table */
102 #define DMA_BUF_HASH_SIZE 20
107 #elif defined(LDM_PLATFORM)
108 struct platform_device
112 #if defined(SUPPORT_ION)
113 static HASH_TABLE *g_psDmaBufHash = IMG_NULL;
114 static IMG_UINT32 g_ui32HashRefCount = 0;
117 #if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
118 #define pvr_sg_length(sg) ((sg)->length)
120 #define pvr_sg_length(sg) sg_dma_len(sg)
123 static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
124 static const IMG_UINT32 _AllocPoisonSize = 7;
125 static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
126 static const IMG_UINT32 _FreePoisonSize = 11;
128 static void _Poison(IMG_PVOID pvKernAddr,
129 IMG_DEVMEM_SIZE_T uiBufferSize,
130 const IMG_CHAR *pacPoisonData,
131 IMG_SIZE_T uiPoisonSize)
133 IMG_DEVMEM_SIZE_T uiDestByteIndex;
134 IMG_CHAR *pcDest = pvKernAddr;
135 IMG_UINT32 uiSrcByteIndex = 0;
137 for (uiDestByteIndex = 0; uiDestByteIndex < uiBufferSize; uiDestByteIndex++)
139 pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
141 if (uiSrcByteIndex == uiPoisonSize)
149 /*****************************************************************************
150 * PMR callback functions *
151 *****************************************************************************/
153 static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
155 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
156 struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
159 if (psPrivData->hPDumpAllocInfo)
161 PDumpPMRFree(psPrivData->hPDumpAllocInfo);
162 psPrivData->hPDumpAllocInfo = NULL;
165 if (psPrivData->bPoisonOnFree)
167 IMG_PVOID pvKernAddr;
170 err = dma_buf_begin_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_FROM_DEVICE);
173 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to begin cpu access for free poisoning", __func__));
174 PVR_ASSERT(IMG_FALSE);
178 for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
180 pvKernAddr = dma_buf_kmap(psDmaBuf, i);
181 if (IS_ERR_OR_NULL(pvKernAddr))
183 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before free", __func__));
184 PVR_ASSERT(IMG_FALSE);
185 goto exit_end_access;
188 _Poison(pvKernAddr, PAGE_SIZE, _FreePoison, _FreePoisonSize);
190 dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
194 dma_buf_end_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_TO_DEVICE);
198 if (psPrivData->pfnDestroy)
200 eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
201 if (eError != PVRSRV_OK)
207 OSFreeMem(psPrivData);
212 static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
213 IMG_UINT32 uiLog2DevPageSize)
215 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
216 struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
217 IMG_DEV_PHYADDR *pasDevPhysAddr = NULL;
218 IMG_CPU_PHYADDR sCpuPhysAddr;
219 IMG_UINT32 ui32PageCount = 0;
220 struct scatterlist *sg;
221 struct sg_table *table;
225 table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
228 eError = PVRSRV_ERROR_INVALID_PARAMS;
233 * We do a two pass process, 1st workout how many pages there
234 * are, 2nd fill in the data.
236 for_each_sg(table->sgl, sg, table->nents, i)
238 ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
241 if (WARN_ON(!ui32PageCount))
243 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lock dmabuf with no pages",
245 eError = PVRSRV_ERROR_INVALID_PARAMS;
246 goto fail_page_count;
249 pasDevPhysAddr = OSAllocMem(sizeof(*pasDevPhysAddr) * ui32PageCount);
252 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
258 for_each_sg(table->sgl, sg, table->nents, i)
262 for (j = 0; j < pvr_sg_length(sg); j += PAGE_SIZE)
264 /* Pass 2: Get the page data */
265 sCpuPhysAddr.uiAddr = sg_phys(sg) + j;
267 PhysHeapCpuPAddrToDevPAddr(psPrivData->psPhysHeap,
269 &pasDevPhysAddr[ui32PageCount],
275 psPrivData->pasDevPhysAddr = pasDevPhysAddr;
276 psPrivData->ui32PageCount = ui32PageCount;
277 psPrivData->psSgTable = table;
283 dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
286 PVR_ASSERT(eError!= PVRSRV_OK);
290 static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
292 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
293 struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
294 struct sg_table *psSgTable = psPrivData->psSgTable;
296 OSFreeMem(psPrivData->pasDevPhysAddr);
298 psPrivData->pasDevPhysAddr = NULL;
299 psPrivData->ui32PageCount = 0;
301 dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
306 static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
307 IMG_UINT32 ui32NumOfPages,
308 IMG_DEVMEM_OFFSET_T *puiOffset,
310 IMG_DEV_PHYADDR *psDevPAddr)
312 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
313 IMG_UINT32 ui32PageIndex;
316 for (idx=0; idx < ui32NumOfPages; idx++)
320 IMG_UINT32 ui32InPageOffset;
322 ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
323 ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
325 PVR_ASSERT(ui32PageIndex < psPrivData->ui32PageCount);
326 PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
328 psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
336 PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
339 void **ppvKernelAddressOut,
340 IMG_HANDLE *phHandleOut,
343 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
344 struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
345 IMG_PVOID pvKernAddr;
349 err = dma_buf_begin_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_BIDIRECTIONAL);
352 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
356 pvKernAddr = dma_buf_vmap(psDmaBuf);
357 if (IS_ERR_OR_NULL(pvKernAddr))
359 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
363 *ppvKernelAddressOut = pvKernAddr + uiOffset;
364 *phHandleOut = pvKernAddr;
369 dma_buf_end_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_BIDIRECTIONAL);
372 PVR_ASSERT(eError != PVRSRV_OK);
376 static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
379 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
380 struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
381 IMG_PVOID pvKernAddr = hHandle;
383 dma_buf_vunmap(psDmaBuf, pvKernAddr);
385 dma_buf_end_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_BIDIRECTIONAL);
388 static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
390 .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf,
391 .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf,
392 .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf,
393 .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf,
394 .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf,
395 .pfnFinalize = PMRFinalizeDmaBuf,
398 /*****************************************************************************
399 * Public facing interface *
400 *****************************************************************************/
403 PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
404 struct dma_buf_attachment *psAttachment,
405 PFN_DESTROY_DMABUF_PMR pfnDestroy,
406 PVRSRV_MEMALLOCFLAGS_T uiFlags,
409 struct dma_buf *psDmaBuf = psAttachment->dmabuf;
410 PMR_DMA_BUF_DATA *psPrivData;
411 IMG_BOOL bMappingTable = IMG_TRUE;
412 PMR_FLAGS_T uiPMRFlags;
413 IMG_BOOL bZeroOnAlloc;
414 IMG_BOOL bPoisonOnAlloc;
415 IMG_BOOL bPoisonOnFree;
418 if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
420 bZeroOnAlloc = IMG_TRUE;
424 bZeroOnAlloc = IMG_FALSE;
427 if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
429 bPoisonOnAlloc = IMG_TRUE;
433 bPoisonOnAlloc = IMG_FALSE;
436 if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
438 bPoisonOnFree = IMG_TRUE;
442 bPoisonOnFree = IMG_FALSE;
445 if (bZeroOnAlloc && bPoisonOnFree)
447 /* Zero on Alloc and Poison on Alloc are mutually exclusive */
448 eError = PVRSRV_ERROR_INVALID_PARAMS;
452 psPrivData = OSAllocZMem(sizeof(*psPrivData));
453 if (psPrivData == NULL)
455 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
456 goto fail_priv_alloc;
459 psPrivData->psPhysHeap = psHeap;
460 psPrivData->psAttachment = psAttachment;
461 psPrivData->pfnDestroy = pfnDestroy;
462 psPrivData->bPoisonOnFree = bPoisonOnFree;
464 if (bZeroOnAlloc || bPoisonOnAlloc)
466 IMG_PVOID pvKernAddr;
469 err = dma_buf_begin_cpu_access(psDmaBuf,
475 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
479 for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
481 pvKernAddr = dma_buf_kmap(psDmaBuf, i);
482 if (IS_ERR_OR_NULL(pvKernAddr))
484 PVR_DPF((PVR_DBG_ERROR,
485 "%s: Failed to map page for %s",
487 bZeroOnAlloc ? "zeroing" : "poisoning"));
488 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
490 dma_buf_end_cpu_access(psDmaBuf,
500 memset(pvKernAddr, 0, PAGE_SIZE);
504 _Poison(pvKernAddr, PAGE_SIZE, _AllocPoison, _AllocPoisonSize);
507 dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
510 dma_buf_end_cpu_access(psDmaBuf,
516 uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
519 * Check no significant bits were lost in cast due to different
520 * bit widths for flags
522 PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
524 eError = PMRCreatePMR(psHeap,
536 &psPrivData->hPDumpAllocInfo,
538 if (eError != PVRSRV_OK)
540 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR", __func__));
541 goto fail_create_pmr;
544 #if defined(PVR_RI_DEBUG)
545 eError = RIWritePMREntryKM(*ppsPMRPtr,
549 if (eError != PVRSRV_OK)
551 PVR_DPF((PVR_DBG_WARNING,
552 "%s: Failed to write PMR entry (%s)",
553 __func__, PVRSRVGetErrorStringKM(eError)));
562 OSFreeMem(psPrivData);
566 PVR_ASSERT(eError != PVRSRV_OK);
570 #if defined(SUPPORT_ION)
571 static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
572 struct dma_buf_attachment *psAttachment)
574 struct dma_buf *psDmaBuf = psAttachment->dmabuf;
576 HASH_Remove(g_psDmaBufHash, (IMG_UINTPTR_T) psDmaBuf);
577 g_ui32HashRefCount--;
579 if (g_ui32HashRefCount == 0)
581 HASH_Delete(g_psDmaBufHash);
582 g_psDmaBufHash = IMG_NULL;
585 PhysHeapRelease(psHeap);
587 dma_buf_detach(psDmaBuf, psAttachment);
588 dma_buf_put(psDmaBuf);
594 PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
596 PVRSRV_MEMALLOCFLAGS_T uiFlags,
598 IMG_DEVMEM_SIZE_T *puiSize,
599 IMG_DEVMEM_ALIGN_T *puiAlign)
602 struct dma_buf_attachment *psAttachment;
603 struct dma_buf *psDmaBuf;
609 eError = PVRSRV_ERROR_INVALID_PARAMS;
613 /* Get the buffer handle */
614 psDmaBuf = dma_buf_get(fd);
615 if (IS_ERR_OR_NULL(psDmaBuf))
617 PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed", __func__));
618 eError = PVRSRV_ERROR_BAD_MAPPING;
619 goto fail_dma_buf_get;
624 /* We have a hash table so check if we've seen this dmabuf before */
625 psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (IMG_UINTPTR_T) psDmaBuf);
628 /* Reuse the PMR we already created */
632 *puiSize = psDmaBuf->size;
633 *puiAlign = PAGE_SIZE;
635 dma_buf_put(psDmaBuf);
641 /* Attach a fake device to to the dmabuf */
642 psAttachment = dma_buf_attach(psDmaBuf, &gpsPVRLDMDev->dev);
643 if (IS_ERR_OR_NULL(psAttachment))
645 PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed", __func__));
646 eError = PVRSRV_ERROR_BAD_MAPPING;
647 goto fail_dma_buf_attach;
651 * Get the physical heap for this PMR
654 * While we have no way to determine the type of the buffer
655 * we just assume that all dmabufs are from the same
658 eError = PhysHeapAcquire(IonPhysHeapID(), &psHeap);
659 if (eError != PVRSRV_OK)
661 PVR_DPF((PVR_DBG_ERROR, "%s: Failed PhysHeapAcquire", __func__));
665 eError = PhysmemCreateNewDmaBufBackedPMR(psHeap,
667 PhysmemDestroyDmaBuf,
670 if (eError != PVRSRV_OK)
672 goto fail_create_new_pmr;
678 * As different processes may import the same dmabuf we need to
679 * create a hash table so we don't generate a duplicate PMR but
680 * rather just take a reference on an existing one.
682 g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
685 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
686 goto fail_hash_create;
690 /* First time we've seen this dmabuf so store it in the hash table */
691 HASH_Insert(g_psDmaBufHash, (IMG_UINTPTR_T) psDmaBuf, (IMG_UINTPTR_T) psPMR);
692 g_ui32HashRefCount++;
695 *puiSize = psDmaBuf->size;
696 *puiAlign = PAGE_SIZE;
704 PhysHeapRelease(psHeap);
707 dma_buf_detach(psDmaBuf, psAttachment);
710 dma_buf_put(psDmaBuf);
714 PVR_ASSERT(eError != PVRSRV_OK);
717 #endif /* defined(SUPPORT_ION) */
718 #endif /* !defined(SUPPORT_DRM) || defined(PVR_DRM_USE_PRIME) */