1 /**************************************************************************/ /*!
3 @Title Device Memory Management
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Header file for server side component of device memory management
6 @License Dual MIT/GPLv2
8 The contents of this file are subject to the MIT license as set out below.
10 Permission is hereby granted, free of charge, to any person obtaining a copy
11 of this software and associated documentation files (the "Software"), to deal
12 in the Software without restriction, including without limitation the rights
13 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 copies of the Software, and to permit persons to whom the Software is
15 furnished to do so, subject to the following conditions:
17 The above copyright notice and this permission notice shall be included in
18 all copies or substantial portions of the Software.
20 Alternatively, the contents of this file may be used under the terms of
21 the GNU General Public License Version 2 ("GPL") in which case the provisions
22 of GPL are applicable instead of those above.
24 If you wish to allow use of your version of this file only under the terms of
25 GPL, and not to allow others to use your version of this file under the terms
26 of the MIT license, indicate your decision by deleting the provisions above
27 and replace them with the notice and other provisions required by GPL as set
28 out in the file called "GPL-COPYING" included in this distribution. If you do
29 not delete the provisions above, a recipient may use your version of this file
30 under the terms of either the MIT license or GPL.
32 This License is also included in this distribution in the file called
35 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
36 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
37 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
38 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
39 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
40 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
41 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42 */ /***************************************************************************/
44 #ifndef __DEVICEMEM_SERVER_H__
45 #define __DEVICEMEM_SERVER_H__
47 #include "device.h" /* For device node */
48 #include "img_types.h"
49 #include "pvr_debug.h"
50 #include "pvrsrv_error.h"
54 typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
55 typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
56 typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
57 /* FIXME: can we unify RESERVATION and MAPPING to save data structures? */
58 typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
59 typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
62 * DevmemServerGetImportHandle()
64 * For given exportable memory descriptor returns PMR handle
68 DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
69 IMG_HANDLE *phImport);
72 * DevmemServerGetHeapHandle()
74 * For given reservation returns the Heap handle
78 DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
82 * DevmemIntCtxCreate()
84 * Create a Server-side Device Memory Context. This is usually the
85 * counterpart of the client side memory context, and indeed is
86 * usually created at the same time.
88 * You must have one of these before creating any heaps.
90 * All heaps must have been destroyed before calling
91 * DevmemIntCtxDestroy()
93 * If you call DevmemIntCtxCreate() (and it succeeds) you are promising
94 * to later call DevmemIntCtxDestroy()
96 * Note that this call will cause the device MMU code to do some work
97 * for creating the device memory context, but it does not guarantee
98 * that a page catalogue will have been created, as this may be
99 * deferred until first allocation.
101 * Caller to provide storage for a pointer to the DEVMEM_CTX object
102 * that will be created by this call.
106 PVRSRV_DEVICE_NODE *psDeviceNode,
107 /* devnode / perproc etc */
109 DEVMEMINT_CTX **ppsDevmemCtxPtr,
110 IMG_HANDLE *hPrivData
113 * DevmemIntCtxDestroy()
115 * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
119 DEVMEMINT_CTX *psDevmemCtx
123 * DevmemIntCtxExport()
125 * Export a device memory context created with DevmemIntCtxCreate to another
130 DevmemIntCtxExport(DEVMEMINT_CTX *psDevmemCtx,
131 DEVMEMINT_CTX_EXPORT **ppsExport);
134 * DevmemIntCtxUnexport
136 * Unexport an exported a device memory context.
139 DevmemIntCtxUnexport(DEVMEMINT_CTX_EXPORT *psExport);
144 * Import an exported a device memory context.
147 DevmemIntCtxImport(DEVMEMINT_CTX_EXPORT *psExport,
148 DEVMEMINT_CTX **ppsDevmemCtxPtr,
149 IMG_HANDLE *hPrivData);
152 * DevmemIntHeapCreate()
154 * Creates a new heap in this device memory context. This will cause
155 * a call into the MMU code to allocate various data structures for
156 * managing this heap. It will not necessarily cause any page tables
157 * to be set up, as this can be deferred until first allocation.
158 * (i.e. we shouldn't care - it's up to the MMU code)
160 * Note that the data page size must be specified (as log 2). The
161 * data page size as specified here will be communicated to the mmu
162 * module, and thus may determine the page size configured in page
163 * directory entries for subsequent allocations from this heap. It is
164 * essential that the page size here is less than or equal to the
165 * "minimum contiguity guarantee" of any PMR that you subsequently
166 * attempt to map to this heap.
168 * If you call DevmemIntHeapCreate() (and the call succeeds) you are
169 * promising that you shall subsequently call DevmemIntHeapDestroy()
171 * Caller to provide storage for a pointer to the DEVMEM_HEAP object
172 * that will be created by this call.
176 DEVMEMINT_CTX *psDevmemCtx,
177 IMG_DEV_VIRTADDR sHeapBaseAddr,
178 IMG_DEVMEM_SIZE_T uiHeapLength,
179 IMG_UINT32 uiLog2DataPageSize,
180 DEVMEMINT_HEAP **ppsDevmemHeapPtr
183 * DevmemIntHeapDestroy()
185 * Destroys a heap previously created with DevmemIntHeapCreate()
187 * All allocations from his heap must have been freed before this
191 DevmemIntHeapDestroy(
192 DEVMEMINT_HEAP *psDevmemHeap
198 * Maps the given PMR to the virtual range previously allocated with
199 * DevmemIntReserveRange()
201 * If appropriate, the PMR must have had its physical backing
202 * committed, as this call will call into the MMU code to set up the
203 * page tables for this allocation, which shall in turn request the
204 * physical addresses from the PMR. Alternatively, the PMR
205 * implementation can choose to do so off the back of the "lock"
206 * callback, which it will receive as a result (indirectly) of this
209 * This function makes no promise w.r.t. the circumstances that it can
210 * be called, and these would be "inherited" from the implementation
211 * of the PMR. For example if the PMR "lock" callback causes pages to
212 * be pinned at that time (which may cause scheduling or disk I/O
213 * etc.) then it would not be legal to "Map" the PMR in a context
214 * where scheduling events are disallowed.
216 * If you call DevmemIntMapPMR() (and the call succeeds) then you are
217 * promising that you shall later call DevmemIntUnmapPMR()
220 DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
221 DEVMEMINT_RESERVATION *psReservation,
223 PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
224 DEVMEMINT_MAPPING **ppsMappingPtr);
226 * DevmemIntUnmapPMR()
228 * Reverses the mapping caused by DevmemIntMapPMR()
231 DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
234 * DevmemIntReserveRange()
236 * Indicates that the specified range should be reserved from the
239 * In turn causes the page tables to be allocated to cover the
242 * If you call DevmemIntReserveRange() (and the call succeeds) then you
243 * are promising that you shall later call DevmemIntUnreserveRange()
246 DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
247 IMG_DEV_VIRTADDR sAllocationDevVAddr,
248 IMG_DEVMEM_SIZE_T uiAllocationSize,
249 DEVMEMINT_RESERVATION **ppsReservationPtr);
251 * DevmemIntUnreserveRange()
253 * Undoes the state change caused by DevmemIntReserveRage()
256 DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
259 * SLCFlushInvalRequest()
261 * Schedules an SLC Flush & Invalidate on the firmware if required.
262 * If the request is performed depends on the caching attributes
263 * of the allocation and hence depends on the underlying PMR
266 DevmemSLCFlushInvalRequest(PVRSRV_DEVICE_NODE *psDeviceNode, PMR *psPmr);
269 DevmemIntIsVDevAddrValid(DEVMEMINT_CTX *psDevMemContext,
270 IMG_DEV_VIRTADDR sDevAddr);
274 * DevmemIntPDumpSaveToFileVirtual()
276 * Writes out PDump "SAB" commands with the data found in memory at
277 * the given virtual address.
279 /* FIXME: uiArraySize shouldn't be here, and is an
280 artefact of the bridging */
282 DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
283 IMG_DEV_VIRTADDR sDevAddrStart,
284 IMG_DEVMEM_SIZE_T uiSize,
285 IMG_UINT32 uiArraySize,
286 const IMG_CHAR *pszFilename,
287 IMG_UINT32 ui32FileOffset,
288 IMG_UINT32 ui32PDumpFlags);
291 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
294 DevmemIntPDumpBitmap(PVRSRV_DEVICE_NODE *psDeviceNode,
295 IMG_CHAR *pszFileName,
296 IMG_UINT32 ui32FileOffset,
297 IMG_UINT32 ui32Width,
298 IMG_UINT32 ui32Height,
299 IMG_UINT32 ui32StrideInBytes,
300 IMG_DEV_VIRTADDR sDevBaseAddr,
301 DEVMEMINT_CTX *psDevMemContext,
303 PDUMP_PIXEL_FORMAT ePixelFormat,
304 IMG_UINT32 ui32AddrMode,
305 IMG_UINT32 ui32PDumpFlags);
308 #ifdef INLINE_IS_PRAGMA
309 #pragma inline(PVRSRVSyncPrimPDumpPolKM)
311 static INLINE PVRSRV_ERROR
312 DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
313 IMG_DEV_VIRTADDR sDevAddrStart,
314 IMG_DEVMEM_SIZE_T uiSize,
315 IMG_UINT32 uiArraySize,
316 const IMG_CHAR *pszFilename,
317 IMG_UINT32 ui32FileOffset,
318 IMG_UINT32 ui32PDumpFlags)
320 PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
321 PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
322 PVR_UNREFERENCED_PARAMETER(uiSize);
323 PVR_UNREFERENCED_PARAMETER(uiArraySize);
324 PVR_UNREFERENCED_PARAMETER(pszFilename);
325 PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
326 PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
330 #ifdef INLINE_IS_PRAGMA
331 #pragma inline(PVRSRVSyncPrimPDumpPolKM)
333 static INLINE PVRSRV_ERROR
334 DevmemIntPDumpBitmap(PVRSRV_DEVICE_NODE *psDeviceNode,
335 IMG_CHAR *pszFileName,
336 IMG_UINT32 ui32FileOffset,
337 IMG_UINT32 ui32Width,
338 IMG_UINT32 ui32Height,
339 IMG_UINT32 ui32StrideInBytes,
340 IMG_DEV_VIRTADDR sDevBaseAddr,
341 DEVMEMINT_CTX *psDevMemContext,
343 PDUMP_PIXEL_FORMAT ePixelFormat,
344 IMG_UINT32 ui32AddrMode,
345 IMG_UINT32 ui32PDumpFlags)
347 PVR_UNREFERENCED_PARAMETER(psDeviceNode);
348 PVR_UNREFERENCED_PARAMETER(pszFileName);
349 PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
350 PVR_UNREFERENCED_PARAMETER(ui32Width);
351 PVR_UNREFERENCED_PARAMETER(ui32Height);
352 PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
353 PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
354 PVR_UNREFERENCED_PARAMETER(psDevMemContext);
355 PVR_UNREFERENCED_PARAMETER(ui32Size);
356 PVR_UNREFERENCED_PARAMETER(ePixelFormat);
357 PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
358 PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
362 #endif /* ifndef __DEVICEMEM_SERVER_H__ */