1 /*************************************************************************/ /*!
3 @Title dmabuf memory allocator
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Part of the memory management. This module is responsible for
6 implementing the function callbacks for dmabuf memory.
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
45 #include "img_types.h"
46 #include "pvr_debug.h"
47 #include "pvrsrv_error.h"
48 #include "pvrsrv_memallocflags.h"
53 #include "physmem_lma.h"
54 #include "pdump_physmem.h"
57 #include "physmem_dmabuf.h"
61 #if defined(PVR_RI_DEBUG)
62 #include "ri_server.h"
65 #include <linux/err.h>
66 #include <linux/slab.h>
67 #include <linux/dma-buf.h>
68 #include <linux/scatterlist.h>
71 #include <linux/pci.h>
72 #elif defined(LDM_PLATFORM)
73 #include <linux/platform_device.h>
75 #error Either LDM_PCI or LDM_PLATFORM must be defined
78 typedef struct _PMR_DMA_BUF_DATA_
80 struct dma_buf_attachment *psAttachment;
81 struct sg_table *psSgTable;
82 struct dma_buf *psDmaBuf;
84 IMG_DEVMEM_SIZE_T uiSize;
85 IMG_PVOID *pvKernAddr;
87 IMG_DEV_PHYADDR *pasDevPhysAddr;
88 IMG_UINT32 ui32PageCount;
89 PHYS_HEAP *psPhysHeap;
91 IMG_BOOL bPoisonOnFree;
92 IMG_BOOL bPDumpMalloced;
93 IMG_HANDLE hPDumpAllocInfo;
96 /* Start size of the g_psDmaBufHash hash table */
97 #define DMA_BUF_HASH_SIZE 20
102 #elif defined(LDM_PLATFORM)
103 struct platform_device
107 static HASH_TABLE *g_psDmaBufHash = IMG_NULL;
108 static IMG_UINT32 g_ui32HashRefCount = 0;
110 #if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
111 #define pvr_sg_length(sg) ((sg)->length)
113 #define pvr_sg_length(sg) sg_dma_len(sg)
116 /*****************************************************************************
117 * DMA-BUF specific functions *
118 *****************************************************************************/
121 Obtain a list of physical pages from the dmabuf.
123 static PVRSRV_ERROR DmaBufPhysAddrAcquire(PMR_DMA_BUF_DATA *psPrivData, int fd)
125 struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
126 IMG_DEV_PHYADDR *pasDevPhysAddr = NULL;
127 IMG_CPU_PHYADDR sCpuPhysAddr;
128 IMG_UINT32 ui32PageCount = 0;
129 struct scatterlist *sg;
130 struct sg_table *table;
134 table = dma_buf_map_attachment(psAttachment, DMA_NONE);
137 eError = PVRSRV_ERROR_INVALID_PARAMS;
142 We do a two pass process, 1st workout how many pages there
143 are, 2nd fill in the data.
145 for_each_sg(table->sgl, sg, table->nents, i)
147 ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
150 if (WARN_ON(!ui32PageCount))
152 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to import dmabuf with no pages",
154 eError = PVRSRV_ERROR_INVALID_PARAMS;
158 pasDevPhysAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR)*ui32PageCount);
161 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
167 for_each_sg(table->sgl, sg, table->nents, i)
171 for (j = 0; j < pvr_sg_length(sg); j += PAGE_SIZE)
173 /* Pass 2: Get the page data */
174 sCpuPhysAddr.uiAddr = sg_phys(sg);
176 pasDevPhysAddr[ui32PageCount] =
177 IonCPUPhysToDevPhys(sCpuPhysAddr, j);
182 psPrivData->pasDevPhysAddr = pasDevPhysAddr;
183 psPrivData->ui32PageCount = ui32PageCount;
184 psPrivData->uiSize = (IMG_DEVMEM_SIZE_T)ui32PageCount * PAGE_SIZE;
185 psPrivData->psSgTable = table;
191 PVR_ASSERT(eError!= PVRSRV_OK);
195 static void DmaBufPhysAddrRelease(PMR_DMA_BUF_DATA *psPrivData)
197 struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
198 struct sg_table *psSgTable = psPrivData->psSgTable;
200 dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_NONE);
202 OSFreeMem(psPrivData->pasDevPhysAddr);
205 static IMG_BOOL _DmaBufKeyCompare(IMG_SIZE_T uKeySize, void *pKey1, void *pKey2)
207 IMG_DEV_PHYADDR *psKey1 = pKey1;
208 IMG_DEV_PHYADDR *psKey2 = pKey2;
209 PVR_ASSERT(uKeySize == sizeof(IMG_DEV_PHYADDR));
211 return psKey1->uiAddr == psKey2->uiAddr;
214 /*****************************************************************************
215 * PMR callback functions *
216 *****************************************************************************/
219 static void _Poison(IMG_PVOID pvKernAddr, IMG_DEVMEM_SIZE_T uiBufferSize,
220 const IMG_CHAR *pacPoisonData, IMG_SIZE_T uiPoisonSize)
222 IMG_DEVMEM_SIZE_T uiDestByteIndex;
223 IMG_CHAR *pcDest = pvKernAddr;
224 IMG_UINT32 uiSrcByteIndex = 0;
226 for(uiDestByteIndex=0; uiDestByteIndex<uiBufferSize; uiDestByteIndex++)
228 pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
230 if (uiSrcByteIndex == uiPoisonSize)
237 static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
238 static const IMG_UINT32 _AllocPoisonSize = 7;
239 static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
240 static const IMG_UINT32 _FreePoisonSize = 11;
242 static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
244 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
246 HASH_Remove_Extended(g_psDmaBufHash, &psPrivData->pasDevPhysAddr[0]);
247 g_ui32HashRefCount--;
248 if (g_ui32HashRefCount == 0)
250 HASH_Delete(g_psDmaBufHash);
251 g_psDmaBufHash = IMG_NULL;
254 if (psPrivData->bPDumpMalloced)
256 PDumpPMRFree(psPrivData->hPDumpAllocInfo);
259 if (psPrivData->bPoisonOnFree)
261 IMG_PVOID pvKernAddr;
264 err = dma_buf_begin_cpu_access(psPrivData->psDmaBuf, 0,
265 psPrivData->uiSize, DMA_NONE);
268 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to begin cpu access", __func__));
269 PVR_ASSERT(IMG_FALSE);
272 for (i = 0; i < psPrivData->uiSize / PAGE_SIZE; i++)
274 pvKernAddr = dma_buf_kmap(psPrivData->psDmaBuf, i);
276 if (IS_ERR_OR_NULL(pvKernAddr))
278 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before free", __func__));
279 PVR_ASSERT(IMG_FALSE);
282 _Poison(pvKernAddr, PAGE_SIZE, _FreePoison, _FreePoisonSize);
284 dma_buf_kunmap(psPrivData->psDmaBuf, i, pvKernAddr);
287 dma_buf_end_cpu_access(psPrivData->psDmaBuf, 0,
288 psPrivData->uiSize, DMA_NONE);
291 DmaBufPhysAddrRelease(psPrivData);
292 dma_buf_detach(psPrivData->psDmaBuf, psPrivData->psAttachment);
293 dma_buf_put(psPrivData->psDmaBuf);
294 PhysHeapRelease(psPrivData->psPhysHeap);
295 OSFreeMem(psPrivData);
301 Lock and unlock function for physical address
302 don't do anything for as we acquire the physical
303 address at create time.
305 static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
306 IMG_UINT32 uiLog2DevPageSize)
308 PVR_UNREFERENCED_PARAMETER(pvPriv);
309 PVR_UNREFERENCED_PARAMETER(uiLog2DevPageSize);
315 static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
317 PVR_UNREFERENCED_PARAMETER(pvPriv);
322 static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
323 IMG_DEVMEM_OFFSET_T uiOffset,
324 IMG_DEV_PHYADDR *psDevPAddr)
326 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
327 IMG_UINT32 ui32PageCount;
328 IMG_UINT32 ui32PageIndex;
329 IMG_UINT32 ui32InPageOffset;
331 ui32PageCount = psPrivData->ui32PageCount;
333 ui32PageIndex = uiOffset >> PAGE_SHIFT;
334 ui32InPageOffset = uiOffset - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
335 PVR_ASSERT(ui32PageIndex < ui32PageCount);
336 PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
338 psDevPAddr->uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
344 PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
347 void **ppvKernelAddressOut,
348 IMG_HANDLE *phHandleOut,
351 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
352 IMG_PVOID pvKernAddr;
356 PVR_ASSERT(psPrivData->pvKernAddr == IMG_NULL);
358 err = dma_buf_begin_cpu_access(psPrivData->psDmaBuf, 0,
359 psPrivData->uiSize, DMA_NONE);
362 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
366 pvKernAddr = dma_buf_vmap(psPrivData->psDmaBuf);
367 if (IS_ERR_OR_NULL(pvKernAddr))
369 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
373 *ppvKernelAddressOut = pvKernAddr + uiOffset;
374 psPrivData->pvKernAddr = pvKernAddr;
378 dma_buf_end_cpu_access(psPrivData->psDmaBuf, 0,
379 psPrivData->uiSize, DMA_NONE);
381 PVR_ASSERT(eError != PVRSRV_OK);
385 static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
388 PMR_DMA_BUF_DATA *psPrivData = pvPriv;
390 PVR_UNREFERENCED_PARAMETER(hHandle);
392 dma_buf_vunmap(psPrivData->psDmaBuf, psPrivData->pvKernAddr);
393 psPrivData->pvKernAddr = IMG_NULL;
395 dma_buf_end_cpu_access(psPrivData->psDmaBuf, 0,
396 psPrivData->uiSize, DMA_NONE);
399 static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
401 .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf,
402 .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf,
403 .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf,
404 .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf,
405 .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf,
406 .pfnFinalize = PMRFinalizeDmaBuf,
409 /*****************************************************************************
410 * Public facing interface *
411 *****************************************************************************/
414 PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
416 PVRSRV_MEMALLOCFLAGS_T uiFlags,
418 IMG_DEVMEM_SIZE_T *puiSize,
419 IMG_DEVMEM_ALIGN_T *puiAlign)
421 PMR_DMA_BUF_DATA *psPrivData = IMG_NULL;
422 IMG_HANDLE hPDumpAllocInfo = IMG_NULL;
423 IMG_BOOL bMappingTable = IMG_TRUE;
424 IMG_BOOL bPoisonOnAlloc;
425 IMG_BOOL bPoisonOnFree;
426 PMR_FLAGS_T uiPMRFlags;
427 PMR *psPMR = IMG_NULL;
431 if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
440 if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
442 bPoisonOnAlloc = IMG_TRUE;
446 bPoisonOnAlloc = IMG_FALSE;
449 if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
451 bPoisonOnFree = IMG_TRUE;
455 bPoisonOnFree = IMG_FALSE;
458 if ((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
459 (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
461 /* Zero on Alloc and Poison on Alloc are mutually exclusive */
462 eError = PVRSRV_ERROR_INVALID_PARAMS;
468 eError = PVRSRV_ERROR_INVALID_PARAMS;
472 psPrivData = OSAllocMem(sizeof(*psPrivData));
473 if (psPrivData == NULL)
475 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
480 Get the physical heap for this PMR
483 While we have no way to determine the type of the buffer
484 we just assume that all dmabufs are from the same
487 eError = PhysHeapAcquire(IonPhysHeapID(), &psPrivData->psPhysHeap);
488 if (eError != PVRSRV_OK)
490 PVR_DPF((PVR_DBG_ERROR, "%s: Failed PhysHeapAcquire", __func__));
494 /* Get the buffer handle */
495 psPrivData->psDmaBuf = dma_buf_get(fd);
497 if (IS_ERR_OR_NULL(psPrivData->psDmaBuf))
499 PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed", __func__));
500 eError = PVRSRV_ERROR_BAD_MAPPING;
501 goto fail_dma_buf_get;
504 /* Attach a fake device to to the dmabuf */
505 psPrivData->psAttachment = dma_buf_attach(psPrivData->psDmaBuf, &gpsPVRLDMDev->dev);
507 if (IS_ERR_OR_NULL(psPrivData->psAttachment))
509 PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed", __func__));
510 eError = PVRSRV_ERROR_BAD_MAPPING;
517 We could defer the import until lock address time but we
518 do it here as then we can detect any errors at import time.
519 Also we need to know the dmabuf size here and there seems
520 to be no other way to find that other then map the buffer for dma.
522 eError = DmaBufPhysAddrAcquire(psPrivData, fd);
523 if (eError != PVRSRV_OK)
525 PVR_DPF((PVR_DBG_ERROR, "%s: DmaBufPhysAddrAcquire failed", __func__));
529 if (g_psDmaBufHash == IMG_NULL)
532 As different processes may import the same dmabuf we need to
533 create a hash table so we don't generate a duplicate PMR but
534 rather just take a reference on an existing one.
536 g_psDmaBufHash = HASH_Create_Extended(DMA_BUF_HASH_SIZE, sizeof(psPrivData->pasDevPhysAddr[0]), HASH_Func_Default, _DmaBufKeyCompare);
537 if (g_psDmaBufHash == IMG_NULL)
539 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
545 We have a hash table so check if have already seen this
548 psPMR = (PMR *) HASH_Retrieve_Extended(g_psDmaBufHash, &psPrivData->pasDevPhysAddr[0]);
549 if (psPMR != IMG_NULL)
552 We already know about this dmabuf but we had to do a bunch
553 for work to determine that so here we have to undo it
555 DmaBufPhysAddrRelease(psPrivData);
556 dma_buf_detach(psPrivData->psDmaBuf, psPrivData->psAttachment);
557 dma_buf_put(psPrivData->psDmaBuf);
558 PhysHeapRelease(psPrivData->psPhysHeap);
559 OSFreeMem(psPrivData);
561 /* Reuse the PMR we already created */
565 psPrivData = PMRGetPrivateDataHack(psPMR, &_sPMRDmaBufFuncTab);
566 *puiSize = psPrivData->uiSize;
567 *puiAlign = PAGE_SIZE;
572 if (bZero || bPoisonOnAlloc)
574 IMG_PVOID pvKernAddr;
577 err = dma_buf_begin_cpu_access(psPrivData->psDmaBuf, 0,
578 psPrivData->uiSize, DMA_NONE);
581 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
585 for (i = 0; i < psPrivData->uiSize / PAGE_SIZE; i++)
587 pvKernAddr = dma_buf_kmap(psPrivData->psDmaBuf, i);
589 if (IS_ERR_OR_NULL(pvKernAddr))
591 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before free", __func__));
592 eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
598 memset(pvKernAddr, 0, PAGE_SIZE);
602 _Poison(pvKernAddr, PAGE_SIZE, _AllocPoison, _AllocPoisonSize);
605 dma_buf_kunmap(psPrivData->psDmaBuf, i, pvKernAddr);
608 dma_buf_end_cpu_access(psPrivData->psDmaBuf, 0,
609 psPrivData->uiSize, DMA_NONE);
612 psPrivData->bPoisonOnFree = bPoisonOnFree;
614 uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
615 /* check no significant bits were lost in cast due to different
616 bit widths for flags */
617 PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
619 eError = PMRCreatePMR(psPrivData->psPhysHeap,
633 if (eError != PVRSRV_OK)
635 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR", __func__));
639 #if defined(PVR_RI_DEBUG)
641 eError = RIWritePMREntryKM (psPMR,
648 psPrivData->hPDumpAllocInfo = hPDumpAllocInfo;
649 psPrivData->bPDumpMalloced = IMG_TRUE;
651 /* First time we've seen this dmabuf so store it in the hash table */
652 HASH_Insert_Extended(g_psDmaBufHash, &psPrivData->pasDevPhysAddr[0], (IMG_UINTPTR_T) psPMR);
653 g_ui32HashRefCount++;
656 *puiSize = psPrivData->uiSize;
657 *puiAlign = PAGE_SIZE;
662 dma_buf_end_cpu_access(psPrivData->psDmaBuf, 0,
663 psPrivData->uiSize, DMA_NONE);
665 DmaBufPhysAddrRelease(psPrivData);
667 dma_buf_detach(psPrivData->psDmaBuf, psPrivData->psAttachment);
669 dma_buf_put(psPrivData->psDmaBuf);
671 PhysHeapRelease(psPrivData->psPhysHeap);
673 OSFreeMem(psPrivData);
677 PVR_ASSERT(eError != PVRSRV_OK);