1 /*************************************************************************/ /*!
3 @Title Device Memory Management
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description This file defines flags used on memory allocations and mappings
6 These flags are relevant throughout the memory management
7 software stack and are specified by users of services and
8 understood by all levels of the memory management in both
10 @License Dual MIT/GPLv2
12 The contents of this file are subject to the MIT license as set out below.
14 Permission is hereby granted, free of charge, to any person obtaining a copy
15 of this software and associated documentation files (the "Software"), to deal
16 in the Software without restriction, including without limitation the rights
17 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18 copies of the Software, and to permit persons to whom the Software is
19 furnished to do so, subject to the following conditions:
21 The above copyright notice and this permission notice shall be included in
22 all copies or substantial portions of the Software.
24 Alternatively, the contents of this file may be used under the terms of
25 the GNU General Public License Version 2 ("GPL") in which case the provisions
26 of GPL are applicable instead of those above.
28 If you wish to allow use of your version of this file only under the terms of
29 GPL, and not to allow others to use your version of this file under the terms
30 of the MIT license, indicate your decision by deleting the provisions above
31 and replace them with the notice and other provisions required by GPL as set
32 out in the file called "GPL-COPYING" included in this distribution. If you do
33 not delete the provisions above, a recipient may use your version of this file
34 under the terms of either the MIT license or GPL.
36 This License is also included in this distribution in the file called
39 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
40 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
41 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
42 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
43 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
44 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
45 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
46 */ /**************************************************************************/
48 #ifndef PVRSRV_MEMALLOCFLAGS_H
49 #define PVRSRV_MEMALLOCFLAGS_H
51 #include "img_types.h"
52 #if defined(SUPPORT_RGX)
53 #include "rgx_memallocflags.h"
55 typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T;
58 * **********************************************************
62 * **********************************************************
64 * PVRSRV_MEMALLOCFLAG_GPU_READABLE
66 * This flag affects the device MMU protection flags, and specifies
67 * that the memory may be read by the GPU (is this always true?)
69 * Typically all device memory allocations would specify this flag.
71 * At the moment, memory allocations without this flag are not supported
73 * This flag will live with the PMR, thus subsequent mappings would
76 * This is a dual purpose flag. It specifies that memory is permitted
77 * to be read by the GPU, and also requests that the allocation is
78 * mapped into the GPU as a readable mapping
81 * - When used as an argument on PMR creation; it specifies
82 * that GPU readable mappings will be _permitted_
83 * - When used as an argument to a "map" function: it specifies
84 * that a GPU readable mapping is _desired_
85 * - When used as an argument to "AllocDeviceMem": it specifies
86 * that the PMR will be created with permission to be mapped
87 * with a GPU readable mapping, _and_ that this PMR will be
88 * mapped with a GPU readble mapping.
89 * This distinction becomes important when (a) we export allocations;
90 * and (b) when we separate the creation of the PMR from the mapping.
92 #define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1U<<0)
93 #define PVRSRV_CHECK_GPU_READABLE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0)
96 * PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
98 * This flag affects the device MMU protection flags, and specifies
99 * that the memory may be written by the GPU
101 * Using this flag on an allocation signifies that the allocation is
102 * intended to be written by the GPU.
104 * Omitting this flag causes a read-only mapping.
106 * This flag will live with the PMR, thus subsequent mappings would
109 * This is a dual purpose flag. It specifies that memory is permitted
110 * to be written by the GPU, and also requests that the allocation is
111 * mapped into the GPU as a writeable mapping (see note above about
112 * permission vs. mapping mode, and why this flag causes permissions
113 * to be inferred from mapping mode on first allocation)
115 * N.B. This flag has no relevance to the CPU's MMU mapping, if any,
116 * and would therefore not enforce read-only mapping on CPU.
118 #define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1U<<1) /*!< mapped as writeable to the GPU */
119 #define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0)
121 #define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1U<<2) /*!< can be mapped is GPU readable in another GPU mem context */
122 #define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0)
124 #define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3) /*!< can be mapped is GPU writable in another GPU mem context */
125 #define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0)
127 #define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1U<<4) /*!< mapped as readable to the CPU */
128 #define PVRSRV_CHECK_CPU_READABLE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0)
130 #define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1U<<5) /*!< mapped as writeable to the CPU */
131 #define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0)
133 #define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1U<<6) /*!< can be mapped is CPU readable in another CPU mem context */
134 #define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0)
136 #define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7) /*!< can be mapped is CPU writable in another CPU mem context */
137 #define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0)
141 * **********************************************************
143 * * CACHE CONTROL FLAGS *
145 * **********************************************************
152 The following defines are used to control the GPU cache bit field.
153 The defines are mutually exclusive.
155 A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU cache
156 bit field from the flags. This should be used whenever the GPU cache mode
157 needs to be determined.
161 GPU domain. Request uncached memory. This means that any writes to memory
162 allocated with this flag are written straight to memory and thus are coherent
163 for any device in the system.
165 #define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (0U<<8)
166 #define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) != 0)
169 GPU domain. Use write combiner (if supported) to combine sequential writes
170 together to reduce memory access by doing burst writes.
172 #define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE (1U<<8)
173 #define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE) != 0)
176 GPU domain. This flag affects the device MMU protection flags.
178 This flag ensures that the GPU and the CPU will always be coherent.
179 This is done by either by snooping each others caches or, if this is
180 not supported, by making the allocation uncached. Please note that
181 this will _not_ guaranty coherency with memory so if this memory
182 is accessed by another device (eg display controller) a flush will
185 #define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2U<<8)
186 #define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) != 0)
189 GPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
190 This means that if the allocation needs to transition from one device
191 to another services has to be informed so it can flush/invalidate the
194 Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
197 #define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3U<<8)
198 #define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) != 0)
203 Request cached cached coherent memory. This is like
204 PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT but doesn't fall back on
205 uncached memory if the system doesn't support cache-snooping
206 but rather returns an error.
208 #define PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT (4U<<8)
209 #define PVRSRV_CHECK_GPU_CACHED_CACHE_COHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT) != 0)
214 This flag is for internal use only and is used to indicate
215 that the underlying allocation should be cached on the GPU
216 after all the snooping and coherent checks have been done
218 #define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7U<<8)
219 #define PVRSRV_CHECK_GPU_CACHED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHED) != 0)
226 #define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7U<<8)
227 #define PVRSRV_GPU_CACHE_MODE(uiFlags) (uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
234 The following defines are used to control the CPU cache bit field.
235 The defines are mutually exclusive.
237 A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU cache
238 bit field from the flags. This should be used whenever the CPU cache mode
239 needs to be determined.
243 CPU domain. Request uncached memory. This means that any writes to memory
244 allocated with this flag are written straight to memory and thus are coherent
245 for any device in the system.
247 #define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (0U<<11)
248 #define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) != 0)
251 CPU domain. Use write combiner (if supported) to combine sequential writes
252 together to reduce memory access by doing burst writes.
254 #define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE (1U<<11)
255 #define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) != 0)
258 CPU domain. This flag affects the device MMU protection flags.
260 This flag ensures that the GPU and the CPU will always be coherent.
261 This is done by either by snooping each others caches or, if this is
262 not supported, by making the allocation uncached. Please note that
263 this will _not_ guaranty coherency with memory so if this memory
264 is accessed by another device (eg display controller) a flush will
267 #define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2U<<11)
268 #define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) != 0)
271 CPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
272 This means that if the allocation needs to transition from one device
273 to another services has to be informed so it can flush/invalidate the
276 Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
279 #define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3U<<11)
280 #define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) != 0)
285 Request cached cached coherent memory. This is like
286 PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT but doesn't fall back on
287 uncached memory if the system doesn't support cache-snooping
288 but rather returns an error.
290 #define PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT (4U<<11)
291 #define PVRSRV_CHECK_CPU_CACHED_CACHE_COHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT) != 0)
296 This flag is for internal use only and is used to indicate
297 that the underlying allocation should be cached on the CPU
298 after all the snooping and coherent checks have been done
300 #define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7U<<11)
301 #define PVRSRV_CHECK_CPU_CACHED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHED) != 0)
308 #define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7U<<11)
309 #define PVRSRV_CPU_CACHE_MODE(uiFlags) (uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
311 /* Helper flags for usual cases */
312 #define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) /*!< Memory will be uncached */
313 #define PVRSRV_CHECK_UNCACHED(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_UNCACHED) != 0)
315 #define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE (PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) /*!< Memory will be write-combined */
316 #define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_WRITE_COMBINE) != 0)
318 #define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) /*!< Memory will be cache-coherent */
319 #define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) != 0)
321 #define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) /*!< Memory will be cache-incoherent */
322 #define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) != 0)
326 CPU MMU Flags mask -- intended for use internal to services only
328 #define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
329 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
330 PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
333 MMU Flags mask -- intended for use internal to services only - used
334 for partitioning the flags bits and determining which flags to pass
337 #define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
338 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
339 PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
342 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
344 Indicates that the PMR created due to this allocation will support
345 in-kernel CPU mappings. Only privileged processes may use this
346 flag as it may cause wastage of precious kernel virtual memory on
349 #define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1U<<14)
350 #define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0)
356 * **********************************************************
358 * * ALLOC MEMORY FLAGS *
360 * **********************************************************
365 #define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1U<<15)
366 #define PVRSRV_CHECK_ON_DEMAND(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0)
369 PVRSRV_MEMALLOCFLAG_CPU_LOCAL
371 Indicates that the allocation will primarily be accessed by
372 the CPU, so a UMA allocation (if available) is preferable.
373 If not set, the allocation will primarily be accessed by
374 the GPU, so LMA allocation (if available) is preferable.
376 #define PVRSRV_MEMALLOCFLAG_CPU_LOCAL (1U<<16)
377 #define PVRSRV_CHECK_CPU_LOCAL(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0)
381 * **********************************************************
383 * * MEMORY ZEROING AND POISONING FLAGS *
385 * **********************************************************
387 * Zero / Poison, on alloc/free
389 * We think the following usecases are required:
391 * don't poison or zero on alloc or free
392 * (normal operation, also most efficient)
394 * (for helping to highlight bugs)
395 * poison on alloc and free
396 * (for helping to highlight bugs)
398 * (avoid highlighting security issues in other uses of memory)
399 * zero on alloc and poison on free
400 * (avoid highlighting security issues in other uses of memory,
401 * while helping to highlight a subset of bugs e.g. memory
404 * Since there are more than 4, we can't encode this in just two bits,
405 * so we might as well have a separate flag for each of the three
410 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
411 Ensures that the memory allocated is initialized with zeroes.
413 #define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1U<<31)
414 #define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0)
417 VRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
419 Scribbles over the allocated memory with a poison value
421 Not compatible with ZERO_ON_ALLOC
423 Poisoning is very deliberately _not_ reflected in PDump as we want
424 a simulation to cry loudly if the initialised data propogates to a
427 #define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1U<<30)
428 #define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0)
431 PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
433 Causes memory to be trashed when freed, as a lazy man's security
436 #define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29)
437 #define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ((uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0)
441 * **********************************************************
443 * * Device specific MMU flags *
445 * **********************************************************
449 * Some services controled devices have device specific control
450 * bits in their page table entries, we need to allow these flags
451 * to be passed down the memory managament layers so the user
452 * can control these bits.
455 #define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 24
456 #define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK 0x0f000000UL
457 #define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \
458 (((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
459 PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
462 PMR flags mask -- for internal services use only. This is the set
463 of flags that will be passed down and stored with the PMR, this also
464 includes the MMU flags which the PMR has to pass down to mm_common.c
467 #define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
468 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
469 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
470 PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
471 PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
472 PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
473 PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
474 PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
475 PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
477 #if ((~(PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK) & PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK) != 0)
478 #error PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK
482 RA differentiation mask
484 for use internal to services
486 this is the set of flags bits that are able to determine whether a
487 pair of allocations are permitted to live in the same page table.
488 Allocations whose flags differ in any of these places would be
489 allocated from separate RA Imports and therefore would never coexist
492 #define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
493 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
494 PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
495 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
496 PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
497 PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
498 PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
499 PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK | \
500 PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
502 #if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK) != 0)
503 #error PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
507 Flags that affect _allocation_
509 #define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
512 Flags that affect _mapping_
514 #define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
515 PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
516 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
517 PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
518 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
519 PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
520 PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
521 PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
523 #if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0)
524 #error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
527 #endif /* #ifndef PVRSRV_MEMALLOCFLAGS_H */