1 /*************************************************************************/ /*!
3 @Title Device Memory Management core internal
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Services internal interface to core device memory management
6 functions that are shared between client and server code.
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
45 #ifndef SRVCLIENT_DEVICEMEM_H
46 #define SRVCLIENT_DEVICEMEM_H
48 /********************************************************************************
50 * +------------+ +------------+ +--------------+ +--------------+ *
51 * | a sub- | | a sub- | | an | | allocation | *
52 * | allocation | | allocation | | allocation | | also mapped | *
53 * | | | | | in proc 1 | | into proc 2 | *
54 * +------------+ +------------+ +--------------+ +--------------+ *
56 * +--------------+ +--------------+ +--------------+ *
57 * | page gran- | | page gran- | | page gran- | *
58 * | ular mapping | | ular mapping | | ular mapping | *
59 * +--------------+ +--------------+ +--------------+ *
63 * +--------------+ +--------------+ *
65 * | A "P.M.R." | | A "P.M.R." | *
67 * +--------------+ +--------------+ *
69 ********************************************************************************/
72 All device memory allocations are ultimately a view upon (not
73 necessarily the whole of) a "PMR".
75 A PMR is a "Physical Memory Resource", which may be a
76 "pre-faulted" lump of physical memory, or it may be a
77 representation of some physical memory that will be instantiated
80 PMRs always represent multiple of some power-of-2 "contiguity"
81 promised by the PMR, which will allow them to be mapped in whole
82 pages into the device MMU. As memory allocations may be smaller
83 than a page, these mappings may be suballocated and thus shared
84 between multiple allocations in one process. A PMR may also be
85 mapped simultaneously into multiple device memory contexts
86 (cross-process scenario), however, for security reasons, it is not
87 legal to share a PMR "both ways" at once, that is, mapped into
88 multiple processes and divided up amongst several suballocations.
90 This PMR terminology is introduced here for background
91 information, but is generally of little concern to the caller of
92 this API. This API handles suballocations and mappings, and the
93 caller thus deals primarily with MEMORY DESCRIPTORS representing
94 an allocation or suballocation, HEAPS representing ranges of
95 virtual addresses in a CONTEXT.
99 |<---------------------------context------------------------------>|
100 |<-------heap------->| |<-------heap------->|<-------heap------->|
101 |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| |
104 #include "img_types.h"
105 #include "devicemem_typedefs.h"
106 #include "pdumpdefs.h"
107 #include "pvrsrv_error.h"
108 #include "pvrsrv_memallocflags.h"
112 /* Use GET and SET function to access this */
113 IMG_INTERNAL extern IMG_UINT32 g_uiLog2PageSize;
115 #define GET_LOG2_PAGESIZE() ( (const IMG_UINT32) g_uiLog2PageSize )
116 #define SET_LOG2_PAGESIZE(ui32Log2PageSize) \
118 PVR_ASSERT( (ui32Log2PageSize > 11) && (ui32Log2PageSize < 22) ); \
119 g_uiLog2PageSize = (IMG_UINT32) ui32Log2PageSize; \
122 typedef IMG_UINT32 DEVMEM_HEAPCFGID;
123 #define DEVMEM_HEAPCFG_FORCLIENTS 0
124 #define DEVMEM_HEAPCFG_META 1
127 In order to call the server side functions, we need a bridge handle.
128 We abstract that here, as we may wish to change its form.
131 typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
134 * DevmemCreateContext()
136 * Create a device memory context
138 * This must be called before any heap is created in this context
140 * Caller to provide bridge handle which will be squirreled away
141 * internally and used for all future operations on items from this
142 * memory context. Caller also to provide devicenode handle, as this
143 * is used for MMU configuration and also to determine the heap
144 * configuration for the auto-instantiated heaps.
146 * Note that when compiled in services/server, the hBridge is not used
147 * and is thrown away by the "fake" direct bridge. (This may change.
148 * It is recommended that IMG_NULL be passed for the handle for now)
150 * hDeviceNode and uiHeapBlueprintID shall together dictate which
151 * heap-config to use.
153 * This will cause the server side counterpart to be created also.
155 * If you call DevmemCreateContext() (and the call succeeds) you
156 * are promising that you will later call Devmem_ContextDestroy(),
157 * except for abnormal process termination in which case it is
158 * expected it will be destroyed as part of handle clean up.
160 * Caller to provide storage for the pointer to the NEWDEVMEM_CONTEXT
161 * object thusly created.
164 DevmemCreateContext(DEVMEM_BRIDGE_HANDLE hBridge,
165 IMG_HANDLE hDeviceNode,
166 DEVMEM_HEAPCFGID uiHeapBlueprintID,
167 DEVMEM_CONTEXT **ppsCtxPtr);
170 * DevmemAcquireDevPrivData()
172 * Acquire the device private data for this memory context
175 DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
176 IMG_HANDLE *hPrivData);
179 * DevmemReleaseDevPrivData()
181 * Release the device private data for this memory context
184 DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
187 * DevmemDestroyContext()
189 * Undoes that done by DevmemCreateContext()
192 DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
197 * Create a heap in the given context.
199 * N.B. Not intended to be called directly, though it can be.
200 * Normally, heaps are instantiated at context creation time according
201 * to the specified blueprint. See DevmemCreateContext() for details.
203 * This will cause MMU code to set up data structures for the heap,
204 * but may not cause page tables to be modified until allocations are
205 * made from the heap.
207 * The "Quantum" is both the device MMU page size to be configured for
208 * this heap, and the unit multiples of which "quantized" allocations
209 * are made (allocations smaller than this, known as "suballocations"
210 * will be made from a "sub alloc RA" and will "import" chunks
211 * according to this quantum)
213 * Where imported PMRs (or, for example, PMRs created by device class
214 * buffers) are mapped into this heap, it is important that the
215 * physical contiguity guarantee offered by the PMR is greater than or
216 * equal to the quantum size specified here, otherwise the attempt to
217 * map it will fail. "Normal" allocations via Devmem_Allocate
218 * shall automatically meet this requirement, as each "import" will
219 * trigger the creation of a PMR with the desired contiguity. The
220 * supported quantum sizes in that case shall be dictated by the OS
221 * specific implementation of PhysmemNewOSRamBackedPMR() (see)
224 DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
225 /* base and length of heap */
226 IMG_DEV_VIRTADDR sBaseAddress,
227 IMG_DEVMEM_SIZE_T uiLength,
228 /* log2 of allocation quantum, i.e. "page" size.
229 All allocations (that go to server side) are
230 multiples of this. We use a client-side RA to
231 make sub-allocations from this */
232 IMG_UINT32 ui32Log2Quantum,
233 /* The minimum import alignment for this heap */
234 IMG_UINT32 ui32Log2ImportAlignment,
235 /* Name of heap for debug */
236 /* N.B. Okay to exist on caller's stack - this
237 func takes a copy if it needs it. */
238 const IMG_CHAR *pszName,
239 DEVMEM_HEAPCFGID uiHeapBlueprintID,
240 DEVMEM_HEAP **ppsHeapPtr);
242 * DevmemDestroyHeap()
244 * Reverses DevmemCreateHeap()
246 * N.B. All allocations must have been freed and all mappings must
247 * have been unmapped before invoking this call
250 DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
253 * DevmemExportalignAdjustSizeAndAlign()
254 * Compute the Size and Align passed to avoid suballocations (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN)
256 IMG_INTERNAL IMG_VOID
257 DevmemExportalignAdjustSizeAndAlign(DEVMEM_HEAP *psHeap, IMG_DEVMEM_SIZE_T *puiSize, IMG_DEVMEM_ALIGN_T *puiAlign);
262 * Makes an allocation (possibly a "suballocation", as described
263 * below) of device virtual memory from this heap.
265 * The size and alignment of the allocation will be honoured by the RA
266 * that allocates the "suballocation". The resulting allocation will
267 * be mapped into GPU virtual memory and the physical memory to back
268 * it will exist, by the time this call successfully completes.
270 * The size must be a positive integer multiple of the alignment.
271 * (i.e. the aligment specifies the alignment of both the start and
272 * the end of the resulting allocation.)
274 * Allocations made via this API are routed though a "suballocation
275 * RA" which is responsible for ensuring that small allocations can be
276 * made without wasting physical memory in the server. Furthermore,
277 * such suballocations can be made entirely client side without
278 * needing to go to the server unless the allocation spills into a new
281 * Such suballocations cause many allocations to share the same "PMR".
282 * This happens only when the flags match exactly.
286 PVRSRV_ERROR DevmemAllocate(DEVMEM_HEAP *psHeap,
287 IMG_DEVMEM_SIZE_T uiSize,
288 IMG_DEVMEM_ALIGN_T uiAlign,
289 DEVMEM_FLAGS_T uiFlags,
290 const IMG_PCHAR pszText,
291 DEVMEM_MEMDESC **ppsMemDescPtr);
294 DevmemAllocateExportable(IMG_HANDLE hBridge,
295 IMG_HANDLE hDeviceNode,
296 IMG_DEVMEM_SIZE_T uiSize,
297 IMG_DEVMEM_ALIGN_T uiAlign,
298 DEVMEM_FLAGS_T uiFlags,
299 const IMG_PCHAR pszText,
300 DEVMEM_MEMDESC **ppsMemDescPtr);
303 DevmemAllocateSparse(IMG_HANDLE hBridge,
304 IMG_HANDLE hDeviceNode,
305 IMG_DEVMEM_SIZE_T uiSize,
306 IMG_DEVMEM_SIZE_T uiChunkSize,
307 IMG_UINT32 ui32NumPhysChunks,
308 IMG_UINT32 ui32NumVirtChunks,
309 IMG_BOOL *pabMappingTable,
310 IMG_DEVMEM_ALIGN_T uiAlign,
311 DEVMEM_FLAGS_T uiFlags,
312 const IMG_PCHAR pszText,
313 DEVMEM_MEMDESC **ppsMemDescPtr);
318 * Reverses that done by DevmemAllocate() N.B. The underlying
319 * mapping and server side allocation _may_ not be torn down, for
320 * example, if the allocation has been exported, or if multiple
321 * allocations were suballocated from the same mapping, but this is
322 * properly refcounted, so the caller does not have to care.
326 DevmemFree(DEVMEM_MEMDESC *psMemDesc);
331 Map an allocation to the device is was allocated from.
332 This function _must_ be called before any call to
333 DevmemAcquireDevVirtAddr is made as it binds the allocation
335 DevmemReleaseDevVirtAddr is used to release the reference
336 to the device mapping this function created, but it doesn't
337 mean that the memory will actually be unmapped from the
338 device as other references to the mapping obtained via
339 DevmemAcquireDevVirtAddr could still be active.
341 PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
343 IMG_DEV_VIRTADDR *psDevVirtAddr);
346 DevmemAcquireDevVirtAddr
348 Acquire the MemDesc's device virtual address.
349 This function _must_ be called after DevmemMapToDevice
350 and is expected to be used be functions which didn't allocate
351 the MemDesc but need to know it's address
353 PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
354 IMG_DEV_VIRTADDR *psDevVirtAddrRet);
356 * DevmemReleaseDevVirtAddr()
358 * give up the licence to use the device virtual address that was
359 * acquired by "Acquire" or "MapToDevice"
362 DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
365 * DevmemAcquireCpuVirtAddr()
367 * Acquires a license to use the cpu virtual address of this mapping.
368 * Note that the memory may not have been mapped into cpu virtual
369 * memory prior to this call. On first "acquire" the memory will be
370 * mapped in (if it wasn't statically mapped in) and on last put it
371 * _may_ become unmapped. Later calling "Acquire" again, _may_ cause
372 * the memory to be mapped at a different address.
374 PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
375 IMG_VOID **ppvCpuVirtAddr);
377 * DevmemReleaseDevVirtAddr()
379 * give up the licence to use the cpu virtual address that was granted
380 * with the "Get" call.
383 DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
388 * Given a memory allocation allocated with DevmemAllocateExportable()
389 * create a "cookie" that can be passed intact by the caller's own choice
390 * of secure IPC to another process and used as the argument to "map"
391 * to map this memory into a heap in the target processes. N.B. This can
392 * also be used to map into multiple heaps in one process, though that's not
395 * Note, the caller must later call Unexport before freeing the
398 PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
399 DEVMEM_EXPORTCOOKIE *psExportCookie);
402 IMG_VOID DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
403 DEVMEM_EXPORTCOOKIE *psExportCookie);
406 DevmemImport(IMG_HANDLE hBridge,
407 DEVMEM_EXPORTCOOKIE *psCookie,
408 DEVMEM_FLAGS_T uiFlags,
409 DEVMEM_MEMDESC **ppsMemDescPtr);
412 * DevmemIsValidExportCookie()
413 * Check whether the Export Cookie contains a valid export */
415 DevmemIsValidExportCookie(DEVMEM_EXPORTCOOKIE *psExportCookie);
418 * DevmemMakeServerExportClientExport()
420 * This is a "special case" function for making a server export cookie
421 * which went through the direct bridge into an export cookie that can
422 * be passed through the client bridge.
425 DevmemMakeServerExportClientExport(DEVMEM_BRIDGE_HANDLE hBridge,
426 DEVMEM_SERVER_EXPORTCOOKIE hServerExportCookie,
427 DEVMEM_EXPORTCOOKIE *psExportCookie);
430 * DevmemUnmakeServerExportClientExport()
432 * Free any resource associated with the Make operation
435 DevmemUnmakeServerExportClientExport(DEVMEM_BRIDGE_HANDLE hBridge,
436 DEVMEM_EXPORTCOOKIE *psExportCookie);
440 * The following set of functions is specific to the heap "blueprint"
441 * stuff, for automatic creation of heaps when a context is created
446 /* Devmem_HeapConfigCount: returns the number of heap configs that
447 this device has. Note that there is no acquire/release semantics
448 required, as this data is guaranteed to be constant for the
449 lifetime of the device node */
451 DevmemHeapConfigCount(DEVMEM_BRIDGE_HANDLE hBridge,
452 IMG_HANDLE hDeviceNode,
453 IMG_UINT32 *puiNumHeapConfigsOut);
455 /* Devmem_HeapCount: returns the number of heaps that a given heap
456 config on this device has. Note that there is no acquire/release
457 semantics required, as this data is guaranteed to be constant for
458 the lifetime of the device node */
460 DevmemHeapCount(DEVMEM_BRIDGE_HANDLE hBridge,
461 IMG_HANDLE hDeviceNode,
462 IMG_UINT32 uiHeapConfigIndex,
463 IMG_UINT32 *puiNumHeapsOut);
464 /* Devmem_HeapConfigName: return the name of the given heap config.
465 The caller is to provide the storage for the returned string and
466 indicate the number of bytes (including null terminator) for such
467 string in the BufSz arg. Note that there is no acquire/release
468 semantics required, as this data is guaranteed to be constant for
469 the lifetime of the device node.
472 DevmemHeapConfigName(DEVMEM_BRIDGE_HANDLE hBridge,
473 IMG_HANDLE hDeviceNode,
474 IMG_UINT32 uiHeapConfigIndex,
475 IMG_CHAR *pszConfigNameOut,
476 IMG_UINT32 uiConfigNameBufSz);
478 /* Devmem_HeapDetails: fetches all the metadata that is recorded in
479 this heap "blueprint". Namely: heap name (caller to provide
480 storage, and indicate buffer size (including null terminator) in
481 BufSz arg), device virtual address and length, log2 of data page
482 size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
483 Note that there is no acquire/release semantics required, as this
484 data is guaranteed to be constant for the lifetime of the device
487 DevmemHeapDetails(DEVMEM_BRIDGE_HANDLE hBridge,
488 IMG_HANDLE hDeviceNode,
489 IMG_UINT32 uiHeapConfigIndex,
490 IMG_UINT32 uiHeapIndex,
491 IMG_CHAR *pszHeapNameOut,
492 IMG_UINT32 uiHeapNameBufSz,
493 IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
494 IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
495 IMG_UINT32 *puiLog2DataPageSize,
496 IMG_UINT32 *puiLog2ImportAlignmentOut);
499 * Devmem_FindHeapByName()
501 * returns the heap handle for the named _automagic_ heap in this
502 * context. "automagic" heaps are those that are born with the
503 * context from a blueprint
506 DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
507 const IMG_CHAR *pszHeapName,
508 DEVMEM_HEAP **ppsHeapRet);
511 * DevmemGetHeapBaseDevVAddr()
513 * returns the device virtual address of the base of the heap.
517 DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
518 IMG_DEV_VIRTADDR *pDevVAddr);
521 DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
522 IMG_HANDLE *phImport);
525 DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
526 IMG_UINT64 *pui64UID);
529 DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
530 IMG_HANDLE *hReservation);
532 IMG_INTERNAL PVRSRV_ERROR
533 DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
535 IMG_DEVMEM_OFFSET_T *puiPMROffset);
537 IMG_INTERNAL PVRSRV_ERROR
538 DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
539 DEVMEM_FLAGS_T *puiFlags);
542 DevmemLocalImport(IMG_HANDLE hBridge,
543 IMG_HANDLE hExtHandle,
544 DEVMEM_FLAGS_T uiFlags,
545 DEVMEM_MEMDESC **ppsMemDescPtr,
546 IMG_DEVMEM_SIZE_T *puiSizePtr);
548 IMG_INTERNAL PVRSRV_ERROR
549 DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
550 IMG_DEV_VIRTADDR sDevVAddr);
552 /* DevmemGetHeapLog2ImportAlignment()
554 * Get the import alignment used for a certain heap.
557 DevmemGetHeapLog2ImportAlignment(DEVMEM_HEAP *psHeap);
559 #endif /* #ifndef SRVCLIENT_DEVICEMEM_CLIENT_H */