1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 struct intel_hw_status_page {
7 struct drm_gem_object *obj;
10 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12 #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16 #define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
19 struct drm_i915_gem_execbuffer2;
20 struct intel_ring_buffer {
30 struct drm_device *dev;
31 struct drm_gem_object *gem_object;
37 struct intel_hw_status_page status_page;
39 u32 irq_gem_seqno; /* last seq seem at irq time */
40 u32 waiting_gem_seqno;
41 int user_irq_refcount;
42 void (*user_irq_get)(struct drm_device *dev,
43 struct intel_ring_buffer *ring);
44 void (*user_irq_put)(struct drm_device *dev,
45 struct intel_ring_buffer *ring);
47 int (*init)(struct drm_device *dev,
48 struct intel_ring_buffer *ring);
50 void (*write_tail)(struct drm_device *dev,
51 struct intel_ring_buffer *ring,
53 void (*flush)(struct drm_device *dev,
54 struct intel_ring_buffer *ring,
55 u32 invalidate_domains,
57 u32 (*add_request)(struct drm_device *dev,
58 struct intel_ring_buffer *ring,
60 u32 (*get_seqno)(struct drm_device *dev,
61 struct intel_ring_buffer *ring);
62 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
63 struct intel_ring_buffer *ring,
64 struct drm_i915_gem_execbuffer2 *exec,
65 struct drm_clip_rect *cliprects,
66 uint64_t exec_offset);
67 void (*cleanup)(struct intel_ring_buffer *ring);
70 * List of objects currently involved in rendering from the
73 * Includes buffers having the contents of their GPU caches
74 * flushed, not necessarily primitives. last_rendering_seqno
75 * represents when the rendering involved will be completed.
77 * A reference is held on the buffer while on this list.
79 struct list_head active_list;
82 * List of breadcrumbs associated with GPU requests currently
85 struct list_head request_list;
88 * List of objects currently pending a GPU write flush.
90 * All elements on this list will belong to either the
91 * active_list or flushing_list, last_rendering_seqno can
92 * be used to differentiate between the two elements.
94 struct list_head gpu_write_list;
97 * Do we have some not yet emitted requests outstanding?
99 bool outstanding_lazy_request;
101 wait_queue_head_t irq_queue;
108 intel_read_status_page(struct intel_ring_buffer *ring,
111 u32 *regs = ring->status_page.page_addr;
115 int intel_init_ring_buffer(struct drm_device *dev,
116 struct intel_ring_buffer *ring);
117 void intel_cleanup_ring_buffer(struct drm_device *dev,
118 struct intel_ring_buffer *ring);
119 int intel_wait_ring_buffer(struct drm_device *dev,
120 struct intel_ring_buffer *ring, int n);
121 void intel_ring_begin(struct drm_device *dev,
122 struct intel_ring_buffer *ring, int n);
124 static inline void intel_ring_emit(struct drm_device *dev,
125 struct intel_ring_buffer *ring,
128 unsigned int *virt = ring->virtual_start + ring->tail;
133 void intel_ring_advance(struct drm_device *dev,
134 struct intel_ring_buffer *ring);
136 u32 intel_ring_get_seqno(struct drm_device *dev,
137 struct intel_ring_buffer *ring);
139 int intel_init_render_ring_buffer(struct drm_device *dev);
140 int intel_init_bsd_ring_buffer(struct drm_device *dev);
141 int intel_init_blt_ring_buffer(struct drm_device *dev);
143 u32 intel_ring_get_active_head(struct drm_device *dev,
144 struct intel_ring_buffer *ring);
145 void intel_ring_setup_status_page(struct drm_device *dev,
146 struct intel_ring_buffer *ring);
148 #endif /* _INTEL_RINGBUFFER_H_ */