Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 /*
37  * 965+ support PIPE_CONTROL commands, which provide finer grained control
38  * over cache flushing.
39  */
40 struct pipe_control {
41         struct drm_i915_gem_object *obj;
42         volatile u32 *cpu_page;
43         u32 gtt_offset;
44 };
45
46 static inline int ring_space(struct intel_ring_buffer *ring)
47 {
48         int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
49         if (space < 0)
50                 space += ring->size;
51         return space;
52 }
53
54 static int
55 gen2_render_ring_flush(struct intel_ring_buffer *ring,
56                        u32      invalidate_domains,
57                        u32      flush_domains)
58 {
59         u32 cmd;
60         int ret;
61
62         cmd = MI_FLUSH;
63         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
64                 cmd |= MI_NO_WRITE_FLUSH;
65
66         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
67                 cmd |= MI_READ_FLUSH;
68
69         ret = intel_ring_begin(ring, 2);
70         if (ret)
71                 return ret;
72
73         intel_ring_emit(ring, cmd);
74         intel_ring_emit(ring, MI_NOOP);
75         intel_ring_advance(ring);
76
77         return 0;
78 }
79
80 static int
81 gen4_render_ring_flush(struct intel_ring_buffer *ring,
82                        u32      invalidate_domains,
83                        u32      flush_domains)
84 {
85         struct drm_device *dev = ring->dev;
86         u32 cmd;
87         int ret;
88
89         /*
90          * read/write caches:
91          *
92          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
94          * also flushed at 2d versus 3d pipeline switches.
95          *
96          * read-only caches:
97          *
98          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99          * MI_READ_FLUSH is set, and is always flushed on 965.
100          *
101          * I915_GEM_DOMAIN_COMMAND may not exist?
102          *
103          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104          * invalidated when MI_EXE_FLUSH is set.
105          *
106          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107          * invalidated with every MI_FLUSH.
108          *
109          * TLBs:
110          *
111          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114          * are flushed at any MI_FLUSH.
115          */
116
117         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
118         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
119                 cmd &= ~MI_NO_WRITE_FLUSH;
120         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121                 cmd |= MI_EXE_FLUSH;
122
123         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
124             (IS_G4X(dev) || IS_GEN5(dev)))
125                 cmd |= MI_INVALIDATE_ISP;
126
127         ret = intel_ring_begin(ring, 2);
128         if (ret)
129                 return ret;
130
131         intel_ring_emit(ring, cmd);
132         intel_ring_emit(ring, MI_NOOP);
133         intel_ring_advance(ring);
134
135         return 0;
136 }
137
138 /**
139  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140  * implementing two workarounds on gen6.  From section 1.4.7.1
141  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142  *
143  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144  * produced by non-pipelined state commands), software needs to first
145  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146  * 0.
147  *
148  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150  *
151  * And the workaround for these two requires this workaround first:
152  *
153  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154  * BEFORE the pipe-control with a post-sync op and no write-cache
155  * flushes.
156  *
157  * And this last workaround is tricky because of the requirements on
158  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159  * volume 2 part 1:
160  *
161  *     "1 of the following must also be set:
162  *      - Render Target Cache Flush Enable ([12] of DW1)
163  *      - Depth Cache Flush Enable ([0] of DW1)
164  *      - Stall at Pixel Scoreboard ([1] of DW1)
165  *      - Depth Stall ([13] of DW1)
166  *      - Post-Sync Operation ([13] of DW1)
167  *      - Notify Enable ([8] of DW1)"
168  *
169  * The cache flushes require the workaround flush that triggered this
170  * one, so we can't use it.  Depth stall would trigger the same.
171  * Post-sync nonzero is what triggered this second workaround, so we
172  * can't use that one either.  Notify enable is IRQs, which aren't
173  * really our business.  That leaves only stall at scoreboard.
174  */
175 static int
176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177 {
178         struct pipe_control *pc = ring->private;
179         u32 scratch_addr = pc->gtt_offset + 128;
180         int ret;
181
182
183         ret = intel_ring_begin(ring, 6);
184         if (ret)
185                 return ret;
186
187         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
188         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
189                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
190         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
191         intel_ring_emit(ring, 0); /* low dword */
192         intel_ring_emit(ring, 0); /* high dword */
193         intel_ring_emit(ring, MI_NOOP);
194         intel_ring_advance(ring);
195
196         ret = intel_ring_begin(ring, 6);
197         if (ret)
198                 return ret;
199
200         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
201         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
202         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
203         intel_ring_emit(ring, 0);
204         intel_ring_emit(ring, 0);
205         intel_ring_emit(ring, MI_NOOP);
206         intel_ring_advance(ring);
207
208         return 0;
209 }
210
211 static int
212 gen6_render_ring_flush(struct intel_ring_buffer *ring,
213                          u32 invalidate_domains, u32 flush_domains)
214 {
215         u32 flags = 0;
216         struct pipe_control *pc = ring->private;
217         u32 scratch_addr = pc->gtt_offset + 128;
218         int ret;
219
220         /* Force SNB workarounds for PIPE_CONTROL flushes */
221         ret = intel_emit_post_sync_nonzero_flush(ring);
222         if (ret)
223                 return ret;
224
225         /* Just flush everything.  Experiments have shown that reducing the
226          * number of bits based on the write domains has little performance
227          * impact.
228          */
229         if (flush_domains) {
230                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
231                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
232                 /*
233                  * Ensure that any following seqno writes only happen
234                  * when the render cache is indeed flushed.
235                  */
236                 flags |= PIPE_CONTROL_CS_STALL;
237         }
238         if (invalidate_domains) {
239                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
240                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
241                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
242                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
243                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
244                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
245                 /*
246                  * TLB invalidate requires a post-sync write.
247                  */
248                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
249         }
250
251         ret = intel_ring_begin(ring, 4);
252         if (ret)
253                 return ret;
254
255         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
256         intel_ring_emit(ring, flags);
257         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
258         intel_ring_emit(ring, 0);
259         intel_ring_advance(ring);
260
261         return 0;
262 }
263
264 static int
265 gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
266 {
267         int ret;
268
269         ret = intel_ring_begin(ring, 4);
270         if (ret)
271                 return ret;
272
273         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
275                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
276         intel_ring_emit(ring, 0);
277         intel_ring_emit(ring, 0);
278         intel_ring_advance(ring);
279
280         return 0;
281 }
282
283 static int
284 gen7_render_ring_flush(struct intel_ring_buffer *ring,
285                        u32 invalidate_domains, u32 flush_domains)
286 {
287         u32 flags = 0;
288         struct pipe_control *pc = ring->private;
289         u32 scratch_addr = pc->gtt_offset + 128;
290         int ret;
291
292         /*
293          * Ensure that any following seqno writes only happen when the render
294          * cache is indeed flushed.
295          *
296          * Workaround: 4th PIPE_CONTROL command (except the ones with only
297          * read-cache invalidate bits set) must have the CS_STALL bit set. We
298          * don't try to be clever and just set it unconditionally.
299          */
300         flags |= PIPE_CONTROL_CS_STALL;
301
302         /* Just flush everything.  Experiments have shown that reducing the
303          * number of bits based on the write domains has little performance
304          * impact.
305          */
306         if (flush_domains) {
307                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
308                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
309         }
310         if (invalidate_domains) {
311                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
312                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
313                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
314                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
315                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
316                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
317                 /*
318                  * TLB invalidate requires a post-sync write.
319                  */
320                 flags |= PIPE_CONTROL_QW_WRITE;
321                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
322
323                 /* Workaround: we must issue a pipe_control with CS-stall bit
324                  * set before a pipe_control command that has the state cache
325                  * invalidate bit set. */
326                 gen7_render_ring_cs_stall_wa(ring);
327         }
328
329         ret = intel_ring_begin(ring, 4);
330         if (ret)
331                 return ret;
332
333         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
334         intel_ring_emit(ring, flags);
335         intel_ring_emit(ring, scratch_addr);
336         intel_ring_emit(ring, 0);
337         intel_ring_advance(ring);
338
339         return 0;
340 }
341
342 static void ring_write_tail(struct intel_ring_buffer *ring,
343                             u32 value)
344 {
345         drm_i915_private_t *dev_priv = ring->dev->dev_private;
346         I915_WRITE_TAIL(ring, value);
347 }
348
349 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
350 {
351         drm_i915_private_t *dev_priv = ring->dev->dev_private;
352         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
353                         RING_ACTHD(ring->mmio_base) : ACTHD;
354
355         return I915_READ(acthd_reg);
356 }
357
358 static int init_ring_common(struct intel_ring_buffer *ring)
359 {
360         struct drm_device *dev = ring->dev;
361         drm_i915_private_t *dev_priv = dev->dev_private;
362         struct drm_i915_gem_object *obj = ring->obj;
363         int ret = 0;
364         u32 head;
365
366         if (HAS_FORCE_WAKE(dev))
367                 gen6_gt_force_wake_get(dev_priv);
368
369         /* Stop the ring if it's running. */
370         I915_WRITE_CTL(ring, 0);
371         I915_WRITE_HEAD(ring, 0);
372         ring->write_tail(ring, 0);
373
374         head = I915_READ_HEAD(ring) & HEAD_ADDR;
375
376         /* G45 ring initialization fails to reset head to zero */
377         if (head != 0) {
378                 DRM_DEBUG_KMS("%s head not reset to zero "
379                               "ctl %08x head %08x tail %08x start %08x\n",
380                               ring->name,
381                               I915_READ_CTL(ring),
382                               I915_READ_HEAD(ring),
383                               I915_READ_TAIL(ring),
384                               I915_READ_START(ring));
385
386                 I915_WRITE_HEAD(ring, 0);
387
388                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
389                         DRM_ERROR("failed to set %s head to zero "
390                                   "ctl %08x head %08x tail %08x start %08x\n",
391                                   ring->name,
392                                   I915_READ_CTL(ring),
393                                   I915_READ_HEAD(ring),
394                                   I915_READ_TAIL(ring),
395                                   I915_READ_START(ring));
396                 }
397         }
398
399         /* Initialize the ring. This must happen _after_ we've cleared the ring
400          * registers with the above sequence (the readback of the HEAD registers
401          * also enforces ordering), otherwise the hw might lose the new ring
402          * register values. */
403         I915_WRITE_START(ring, obj->gtt_offset);
404         I915_WRITE_CTL(ring,
405                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
406                         | RING_VALID);
407
408         /* If the head is still not zero, the ring is dead */
409         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
410                      I915_READ_START(ring) == obj->gtt_offset &&
411                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
412                 DRM_ERROR("%s initialization failed "
413                                 "ctl %08x head %08x tail %08x start %08x\n",
414                                 ring->name,
415                                 I915_READ_CTL(ring),
416                                 I915_READ_HEAD(ring),
417                                 I915_READ_TAIL(ring),
418                                 I915_READ_START(ring));
419                 ret = -EIO;
420                 goto out;
421         }
422
423         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
424                 i915_kernel_lost_context(ring->dev);
425         else {
426                 ring->head = I915_READ_HEAD(ring);
427                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
428                 ring->space = ring_space(ring);
429                 ring->last_retired_head = -1;
430         }
431
432 out:
433         if (HAS_FORCE_WAKE(dev))
434                 gen6_gt_force_wake_put(dev_priv);
435
436         return ret;
437 }
438
439 static int
440 init_pipe_control(struct intel_ring_buffer *ring)
441 {
442         struct pipe_control *pc;
443         struct drm_i915_gem_object *obj;
444         int ret;
445
446         if (ring->private)
447                 return 0;
448
449         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
450         if (!pc)
451                 return -ENOMEM;
452
453         obj = i915_gem_alloc_object(ring->dev, 4096);
454         if (obj == NULL) {
455                 DRM_ERROR("Failed to allocate seqno page\n");
456                 ret = -ENOMEM;
457                 goto err;
458         }
459
460         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
461
462         ret = i915_gem_object_pin(obj, 4096, true, false);
463         if (ret)
464                 goto err_unref;
465
466         pc->gtt_offset = obj->gtt_offset;
467         pc->cpu_page =  kmap(sg_page(obj->pages->sgl));
468         if (pc->cpu_page == NULL)
469                 goto err_unpin;
470
471         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
472                          ring->name, pc->gtt_offset);
473
474         pc->obj = obj;
475         ring->private = pc;
476         return 0;
477
478 err_unpin:
479         i915_gem_object_unpin(obj);
480 err_unref:
481         drm_gem_object_unreference(&obj->base);
482 err:
483         kfree(pc);
484         return ret;
485 }
486
487 static void
488 cleanup_pipe_control(struct intel_ring_buffer *ring)
489 {
490         struct pipe_control *pc = ring->private;
491         struct drm_i915_gem_object *obj;
492
493         obj = pc->obj;
494
495         kunmap(sg_page(obj->pages->sgl));
496         i915_gem_object_unpin(obj);
497         drm_gem_object_unreference(&obj->base);
498
499         kfree(pc);
500 }
501
502 static int init_render_ring(struct intel_ring_buffer *ring)
503 {
504         struct drm_device *dev = ring->dev;
505         struct drm_i915_private *dev_priv = dev->dev_private;
506         int ret = init_ring_common(ring);
507
508         if (INTEL_INFO(dev)->gen > 3)
509                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
510
511         /* We need to disable the AsyncFlip performance optimisations in order
512          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
513          * programmed to '1' on all products.
514          */
515         if (INTEL_INFO(dev)->gen >= 6)
516                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
517
518         /* Required for the hardware to program scanline values for waiting */
519         if (INTEL_INFO(dev)->gen == 6)
520                 I915_WRITE(GFX_MODE,
521                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
522
523         if (IS_GEN7(dev))
524                 I915_WRITE(GFX_MODE_GEN7,
525                            _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
526                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
527
528         if (INTEL_INFO(dev)->gen >= 5) {
529                 ret = init_pipe_control(ring);
530                 if (ret)
531                         return ret;
532         }
533
534         if (IS_GEN6(dev)) {
535                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
536                  * "If this bit is set, STCunit will have LRA as replacement
537                  *  policy. [...] This bit must be reset.  LRA replacement
538                  *  policy is not supported."
539                  */
540                 I915_WRITE(CACHE_MODE_0,
541                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
542
543                 /* This is not explicitly set for GEN6, so read the register.
544                  * see intel_ring_mi_set_context() for why we care.
545                  * TODO: consider explicitly setting the bit for GEN5
546                  */
547                 ring->itlb_before_ctx_switch =
548                         !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
549         }
550
551         if (INTEL_INFO(dev)->gen >= 6)
552                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
553
554         if (HAS_L3_GPU_CACHE(dev))
555                 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
556
557         return ret;
558 }
559
560 static void render_ring_cleanup(struct intel_ring_buffer *ring)
561 {
562         struct drm_device *dev = ring->dev;
563
564         if (!ring->private)
565                 return;
566
567         if (HAS_BROKEN_CS_TLB(dev))
568                 drm_gem_object_unreference(to_gem_object(ring->private));
569
570         if (INTEL_INFO(dev)->gen >= 5)
571                 cleanup_pipe_control(ring);
572
573         ring->private = NULL;
574 }
575
576 static void
577 update_mboxes(struct intel_ring_buffer *ring,
578               u32 mmio_offset)
579 {
580         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
581         intel_ring_emit(ring, mmio_offset);
582         intel_ring_emit(ring, ring->outstanding_lazy_request);
583 }
584
585 /**
586  * gen6_add_request - Update the semaphore mailbox registers
587  * 
588  * @ring - ring that is adding a request
589  * @seqno - return seqno stuck into the ring
590  *
591  * Update the mailbox registers in the *other* rings with the current seqno.
592  * This acts like a signal in the canonical semaphore.
593  */
594 static int
595 gen6_add_request(struct intel_ring_buffer *ring)
596 {
597         u32 mbox1_reg;
598         u32 mbox2_reg;
599         int ret;
600
601         ret = intel_ring_begin(ring, 10);
602         if (ret)
603                 return ret;
604
605         mbox1_reg = ring->signal_mbox[0];
606         mbox2_reg = ring->signal_mbox[1];
607
608         update_mboxes(ring, mbox1_reg);
609         update_mboxes(ring, mbox2_reg);
610         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
611         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
612         intel_ring_emit(ring, ring->outstanding_lazy_request);
613         intel_ring_emit(ring, MI_USER_INTERRUPT);
614         intel_ring_advance(ring);
615
616         return 0;
617 }
618
619 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
620                                               u32 seqno)
621 {
622         struct drm_i915_private *dev_priv = dev->dev_private;
623         return dev_priv->last_seqno < seqno;
624 }
625
626 /**
627  * intel_ring_sync - sync the waiter to the signaller on seqno
628  *
629  * @waiter - ring that is waiting
630  * @signaller - ring which has, or will signal
631  * @seqno - seqno which the waiter will block on
632  */
633 static int
634 gen6_ring_sync(struct intel_ring_buffer *waiter,
635                struct intel_ring_buffer *signaller,
636                u32 seqno)
637 {
638         int ret;
639         u32 dw1 = MI_SEMAPHORE_MBOX |
640                   MI_SEMAPHORE_COMPARE |
641                   MI_SEMAPHORE_REGISTER;
642
643         /* Throughout all of the GEM code, seqno passed implies our current
644          * seqno is >= the last seqno executed. However for hardware the
645          * comparison is strictly greater than.
646          */
647         seqno -= 1;
648
649         WARN_ON(signaller->semaphore_register[waiter->id] ==
650                 MI_SEMAPHORE_SYNC_INVALID);
651
652         ret = intel_ring_begin(waiter, 4);
653         if (ret)
654                 return ret;
655
656         /* If seqno wrap happened, omit the wait with no-ops */
657         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
658                 intel_ring_emit(waiter,
659                                 dw1 |
660                                 signaller->semaphore_register[waiter->id]);
661                 intel_ring_emit(waiter, seqno);
662                 intel_ring_emit(waiter, 0);
663                 intel_ring_emit(waiter, MI_NOOP);
664         } else {
665                 intel_ring_emit(waiter, MI_NOOP);
666                 intel_ring_emit(waiter, MI_NOOP);
667                 intel_ring_emit(waiter, MI_NOOP);
668                 intel_ring_emit(waiter, MI_NOOP);
669         }
670         intel_ring_advance(waiter);
671
672         return 0;
673 }
674
675 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
676 do {                                                                    \
677         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
678                  PIPE_CONTROL_DEPTH_STALL);                             \
679         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
680         intel_ring_emit(ring__, 0);                                                     \
681         intel_ring_emit(ring__, 0);                                                     \
682 } while (0)
683
684 static int
685 pc_render_add_request(struct intel_ring_buffer *ring)
686 {
687         struct pipe_control *pc = ring->private;
688         u32 scratch_addr = pc->gtt_offset + 128;
689         int ret;
690
691         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
692          * incoherent with writes to memory, i.e. completely fubar,
693          * so we need to use PIPE_NOTIFY instead.
694          *
695          * However, we also need to workaround the qword write
696          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
697          * memory before requesting an interrupt.
698          */
699         ret = intel_ring_begin(ring, 32);
700         if (ret)
701                 return ret;
702
703         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
704                         PIPE_CONTROL_WRITE_FLUSH |
705                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
706         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
707         intel_ring_emit(ring, ring->outstanding_lazy_request);
708         intel_ring_emit(ring, 0);
709         PIPE_CONTROL_FLUSH(ring, scratch_addr);
710         scratch_addr += 128; /* write to separate cachelines */
711         PIPE_CONTROL_FLUSH(ring, scratch_addr);
712         scratch_addr += 128;
713         PIPE_CONTROL_FLUSH(ring, scratch_addr);
714         scratch_addr += 128;
715         PIPE_CONTROL_FLUSH(ring, scratch_addr);
716         scratch_addr += 128;
717         PIPE_CONTROL_FLUSH(ring, scratch_addr);
718         scratch_addr += 128;
719         PIPE_CONTROL_FLUSH(ring, scratch_addr);
720
721         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
722                         PIPE_CONTROL_WRITE_FLUSH |
723                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
724                         PIPE_CONTROL_NOTIFY);
725         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
726         intel_ring_emit(ring, ring->outstanding_lazy_request);
727         intel_ring_emit(ring, 0);
728         intel_ring_advance(ring);
729
730         return 0;
731 }
732
733 static u32
734 gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
735 {
736         /* Workaround to force correct ordering between irq and seqno writes on
737          * ivb (and maybe also on snb) by reading from a CS register (like
738          * ACTHD) before reading the status page. */
739         if (!lazy_coherency)
740                 intel_ring_get_active_head(ring);
741         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
742 }
743
744 static u32
745 ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
746 {
747         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
748 }
749
750 static void
751 ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
752 {
753         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
754 }
755
756 static u32
757 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
758 {
759         struct pipe_control *pc = ring->private;
760         return pc->cpu_page[0];
761 }
762
763 static void
764 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
765 {
766         struct pipe_control *pc = ring->private;
767         pc->cpu_page[0] = seqno;
768 }
769
770 static bool
771 gen5_ring_get_irq(struct intel_ring_buffer *ring)
772 {
773         struct drm_device *dev = ring->dev;
774         drm_i915_private_t *dev_priv = dev->dev_private;
775         unsigned long flags;
776
777         if (!dev->irq_enabled)
778                 return false;
779
780         spin_lock_irqsave(&dev_priv->irq_lock, flags);
781         if (ring->irq_refcount++ == 0) {
782                 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
783                 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
784                 POSTING_READ(GTIMR);
785         }
786         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
787
788         return true;
789 }
790
791 static void
792 gen5_ring_put_irq(struct intel_ring_buffer *ring)
793 {
794         struct drm_device *dev = ring->dev;
795         drm_i915_private_t *dev_priv = dev->dev_private;
796         unsigned long flags;
797
798         spin_lock_irqsave(&dev_priv->irq_lock, flags);
799         if (--ring->irq_refcount == 0) {
800                 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
801                 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
802                 POSTING_READ(GTIMR);
803         }
804         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
805 }
806
807 static bool
808 i9xx_ring_get_irq(struct intel_ring_buffer *ring)
809 {
810         struct drm_device *dev = ring->dev;
811         drm_i915_private_t *dev_priv = dev->dev_private;
812         unsigned long flags;
813
814         if (!dev->irq_enabled)
815                 return false;
816
817         spin_lock_irqsave(&dev_priv->irq_lock, flags);
818         if (ring->irq_refcount++ == 0) {
819                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
820                 I915_WRITE(IMR, dev_priv->irq_mask);
821                 POSTING_READ(IMR);
822         }
823         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
824
825         return true;
826 }
827
828 static void
829 i9xx_ring_put_irq(struct intel_ring_buffer *ring)
830 {
831         struct drm_device *dev = ring->dev;
832         drm_i915_private_t *dev_priv = dev->dev_private;
833         unsigned long flags;
834
835         spin_lock_irqsave(&dev_priv->irq_lock, flags);
836         if (--ring->irq_refcount == 0) {
837                 dev_priv->irq_mask |= ring->irq_enable_mask;
838                 I915_WRITE(IMR, dev_priv->irq_mask);
839                 POSTING_READ(IMR);
840         }
841         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
842 }
843
844 static bool
845 i8xx_ring_get_irq(struct intel_ring_buffer *ring)
846 {
847         struct drm_device *dev = ring->dev;
848         drm_i915_private_t *dev_priv = dev->dev_private;
849         unsigned long flags;
850
851         if (!dev->irq_enabled)
852                 return false;
853
854         spin_lock_irqsave(&dev_priv->irq_lock, flags);
855         if (ring->irq_refcount++ == 0) {
856                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
857                 I915_WRITE16(IMR, dev_priv->irq_mask);
858                 POSTING_READ16(IMR);
859         }
860         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
861
862         return true;
863 }
864
865 static void
866 i8xx_ring_put_irq(struct intel_ring_buffer *ring)
867 {
868         struct drm_device *dev = ring->dev;
869         drm_i915_private_t *dev_priv = dev->dev_private;
870         unsigned long flags;
871
872         spin_lock_irqsave(&dev_priv->irq_lock, flags);
873         if (--ring->irq_refcount == 0) {
874                 dev_priv->irq_mask |= ring->irq_enable_mask;
875                 I915_WRITE16(IMR, dev_priv->irq_mask);
876                 POSTING_READ16(IMR);
877         }
878         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
879 }
880
881 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
882 {
883         struct drm_device *dev = ring->dev;
884         drm_i915_private_t *dev_priv = ring->dev->dev_private;
885         u32 mmio = 0;
886
887         /* The ring status page addresses are no longer next to the rest of
888          * the ring registers as of gen7.
889          */
890         if (IS_GEN7(dev)) {
891                 switch (ring->id) {
892                 case RCS:
893                         mmio = RENDER_HWS_PGA_GEN7;
894                         break;
895                 case BCS:
896                         mmio = BLT_HWS_PGA_GEN7;
897                         break;
898                 case VCS:
899                         mmio = BSD_HWS_PGA_GEN7;
900                         break;
901                 }
902         } else if (IS_GEN6(ring->dev)) {
903                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
904         } else {
905                 mmio = RING_HWS_PGA(ring->mmio_base);
906         }
907
908         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
909         POSTING_READ(mmio);
910
911         /* Flush the TLB for this page */
912         if (INTEL_INFO(dev)->gen >= 6) {
913                 u32 reg = RING_INSTPM(ring->mmio_base);
914                 I915_WRITE(reg,
915                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
916                                               INSTPM_SYNC_FLUSH));
917                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
918                              1000))
919                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
920                                   ring->name);
921         }
922 }
923
924 static int
925 bsd_ring_flush(struct intel_ring_buffer *ring,
926                u32     invalidate_domains,
927                u32     flush_domains)
928 {
929         int ret;
930
931         ret = intel_ring_begin(ring, 2);
932         if (ret)
933                 return ret;
934
935         intel_ring_emit(ring, MI_FLUSH);
936         intel_ring_emit(ring, MI_NOOP);
937         intel_ring_advance(ring);
938         return 0;
939 }
940
941 static int
942 i9xx_add_request(struct intel_ring_buffer *ring)
943 {
944         int ret;
945
946         ret = intel_ring_begin(ring, 4);
947         if (ret)
948                 return ret;
949
950         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
951         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
952         intel_ring_emit(ring, ring->outstanding_lazy_request);
953         intel_ring_emit(ring, MI_USER_INTERRUPT);
954         intel_ring_advance(ring);
955
956         return 0;
957 }
958
959 static bool
960 gen6_ring_get_irq(struct intel_ring_buffer *ring)
961 {
962         struct drm_device *dev = ring->dev;
963         drm_i915_private_t *dev_priv = dev->dev_private;
964         unsigned long flags;
965
966         if (!dev->irq_enabled)
967                return false;
968
969         /* It looks like we need to prevent the gt from suspending while waiting
970          * for an notifiy irq, otherwise irqs seem to get lost on at least the
971          * blt/bsd rings on ivb. */
972         gen6_gt_force_wake_get(dev_priv);
973
974         spin_lock_irqsave(&dev_priv->irq_lock, flags);
975         if (ring->irq_refcount++ == 0) {
976                 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
977                         I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
978                                                 GEN6_RENDER_L3_PARITY_ERROR));
979                 else
980                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
981                 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
982                 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
983                 POSTING_READ(GTIMR);
984         }
985         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
986
987         return true;
988 }
989
990 static void
991 gen6_ring_put_irq(struct intel_ring_buffer *ring)
992 {
993         struct drm_device *dev = ring->dev;
994         drm_i915_private_t *dev_priv = dev->dev_private;
995         unsigned long flags;
996
997         spin_lock_irqsave(&dev_priv->irq_lock, flags);
998         if (--ring->irq_refcount == 0) {
999                 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1000                         I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
1001                 else
1002                         I915_WRITE_IMR(ring, ~0);
1003                 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
1004                 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1005                 POSTING_READ(GTIMR);
1006         }
1007         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1008
1009         gen6_gt_force_wake_put(dev_priv);
1010 }
1011
1012 static int
1013 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1014                          u32 offset, u32 length,
1015                          unsigned flags)
1016 {
1017         int ret;
1018
1019         ret = intel_ring_begin(ring, 2);
1020         if (ret)
1021                 return ret;
1022
1023         intel_ring_emit(ring,
1024                         MI_BATCH_BUFFER_START |
1025                         MI_BATCH_GTT |
1026                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1027         intel_ring_emit(ring, offset);
1028         intel_ring_advance(ring);
1029
1030         return 0;
1031 }
1032
1033 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1034 #define I830_BATCH_LIMIT (256*1024)
1035 static int
1036 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1037                                 u32 offset, u32 len,
1038                                 unsigned flags)
1039 {
1040         int ret;
1041
1042         if (flags & I915_DISPATCH_PINNED) {
1043                 ret = intel_ring_begin(ring, 4);
1044                 if (ret)
1045                         return ret;
1046
1047                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1048                 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1049                 intel_ring_emit(ring, offset + len - 8);
1050                 intel_ring_emit(ring, MI_NOOP);
1051                 intel_ring_advance(ring);
1052         } else {
1053                 struct drm_i915_gem_object *obj = ring->private;
1054                 u32 cs_offset = obj->gtt_offset;
1055
1056                 if (len > I830_BATCH_LIMIT)
1057                         return -ENOSPC;
1058
1059                 ret = intel_ring_begin(ring, 9+3);
1060                 if (ret)
1061                         return ret;
1062                 /* Blit the batch (which has now all relocs applied) to the stable batch
1063                  * scratch bo area (so that the CS never stumbles over its tlb
1064                  * invalidation bug) ... */
1065                 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1066                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
1067                                 XY_SRC_COPY_BLT_WRITE_RGB);
1068                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1069                 intel_ring_emit(ring, 0);
1070                 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1071                 intel_ring_emit(ring, cs_offset);
1072                 intel_ring_emit(ring, 0);
1073                 intel_ring_emit(ring, 4096);
1074                 intel_ring_emit(ring, offset);
1075                 intel_ring_emit(ring, MI_FLUSH);
1076
1077                 /* ... and execute it. */
1078                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1079                 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1080                 intel_ring_emit(ring, cs_offset + len - 8);
1081                 intel_ring_advance(ring);
1082         }
1083
1084         return 0;
1085 }
1086
1087 static int
1088 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1089                          u32 offset, u32 len,
1090                          unsigned flags)
1091 {
1092         int ret;
1093
1094         ret = intel_ring_begin(ring, 2);
1095         if (ret)
1096                 return ret;
1097
1098         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1099         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1100         intel_ring_advance(ring);
1101
1102         return 0;
1103 }
1104
1105 static void cleanup_status_page(struct intel_ring_buffer *ring)
1106 {
1107         struct drm_i915_gem_object *obj;
1108
1109         obj = ring->status_page.obj;
1110         if (obj == NULL)
1111                 return;
1112
1113         kunmap(sg_page(obj->pages->sgl));
1114         i915_gem_object_unpin(obj);
1115         drm_gem_object_unreference(&obj->base);
1116         ring->status_page.obj = NULL;
1117 }
1118
1119 static int init_status_page(struct intel_ring_buffer *ring)
1120 {
1121         struct drm_device *dev = ring->dev;
1122         struct drm_i915_gem_object *obj;
1123         int ret;
1124
1125         obj = i915_gem_alloc_object(dev, 4096);
1126         if (obj == NULL) {
1127                 DRM_ERROR("Failed to allocate status page\n");
1128                 ret = -ENOMEM;
1129                 goto err;
1130         }
1131
1132         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1133
1134         ret = i915_gem_object_pin(obj, 4096, true, false);
1135         if (ret != 0) {
1136                 goto err_unref;
1137         }
1138
1139         ring->status_page.gfx_addr = obj->gtt_offset;
1140         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1141         if (ring->status_page.page_addr == NULL) {
1142                 ret = -ENOMEM;
1143                 goto err_unpin;
1144         }
1145         ring->status_page.obj = obj;
1146         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1147
1148         intel_ring_setup_status_page(ring);
1149         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1150                         ring->name, ring->status_page.gfx_addr);
1151
1152         return 0;
1153
1154 err_unpin:
1155         i915_gem_object_unpin(obj);
1156 err_unref:
1157         drm_gem_object_unreference(&obj->base);
1158 err:
1159         return ret;
1160 }
1161
1162 static int init_phys_hws_pga(struct intel_ring_buffer *ring)
1163 {
1164         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1165         u32 addr;
1166
1167         if (!dev_priv->status_page_dmah) {
1168                 dev_priv->status_page_dmah =
1169                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1170                 if (!dev_priv->status_page_dmah)
1171                         return -ENOMEM;
1172         }
1173
1174         addr = dev_priv->status_page_dmah->busaddr;
1175         if (INTEL_INFO(ring->dev)->gen >= 4)
1176                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1177         I915_WRITE(HWS_PGA, addr);
1178
1179         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1180         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1181
1182         return 0;
1183 }
1184
1185 static int intel_init_ring_buffer(struct drm_device *dev,
1186                                   struct intel_ring_buffer *ring)
1187 {
1188         struct drm_i915_gem_object *obj;
1189         struct drm_i915_private *dev_priv = dev->dev_private;
1190         int ret;
1191
1192         ring->dev = dev;
1193         INIT_LIST_HEAD(&ring->active_list);
1194         INIT_LIST_HEAD(&ring->request_list);
1195         ring->size = 32 * PAGE_SIZE;
1196         memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1197
1198         init_waitqueue_head(&ring->irq_queue);
1199
1200         if (I915_NEED_GFX_HWS(dev)) {
1201                 ret = init_status_page(ring);
1202                 if (ret)
1203                         return ret;
1204         } else {
1205                 BUG_ON(ring->id != RCS);
1206                 ret = init_phys_hws_pga(ring);
1207                 if (ret)
1208                         return ret;
1209         }
1210
1211         obj = NULL;
1212         if (!HAS_LLC(dev))
1213                 obj = i915_gem_object_create_stolen(dev, ring->size);
1214         if (obj == NULL)
1215                 obj = i915_gem_alloc_object(dev, ring->size);
1216         if (obj == NULL) {
1217                 DRM_ERROR("Failed to allocate ringbuffer\n");
1218                 ret = -ENOMEM;
1219                 goto err_hws;
1220         }
1221
1222         ring->obj = obj;
1223
1224         ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
1225         if (ret)
1226                 goto err_unref;
1227
1228         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1229         if (ret)
1230                 goto err_unpin;
1231
1232         ring->virtual_start =
1233                 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
1234                            ring->size);
1235         if (ring->virtual_start == NULL) {
1236                 DRM_ERROR("Failed to map ringbuffer.\n");
1237                 ret = -EINVAL;
1238                 goto err_unpin;
1239         }
1240
1241         ret = ring->init(ring);
1242         if (ret)
1243                 goto err_unmap;
1244
1245         /* Workaround an erratum on the i830 which causes a hang if
1246          * the TAIL pointer points to within the last 2 cachelines
1247          * of the buffer.
1248          */
1249         ring->effective_size = ring->size;
1250         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1251                 ring->effective_size -= 128;
1252
1253         return 0;
1254
1255 err_unmap:
1256         iounmap(ring->virtual_start);
1257 err_unpin:
1258         i915_gem_object_unpin(obj);
1259 err_unref:
1260         drm_gem_object_unreference(&obj->base);
1261         ring->obj = NULL;
1262 err_hws:
1263         cleanup_status_page(ring);
1264         return ret;
1265 }
1266
1267 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1268 {
1269         struct drm_i915_private *dev_priv;
1270         int ret;
1271
1272         if (ring->obj == NULL)
1273                 return;
1274
1275         /* Disable the ring buffer. The ring must be idle at this point */
1276         dev_priv = ring->dev->dev_private;
1277         ret = intel_ring_idle(ring);
1278         if (ret)
1279                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1280                           ring->name, ret);
1281
1282         I915_WRITE_CTL(ring, 0);
1283
1284         iounmap(ring->virtual_start);
1285
1286         i915_gem_object_unpin(ring->obj);
1287         drm_gem_object_unreference(&ring->obj->base);
1288         ring->obj = NULL;
1289
1290         if (ring->cleanup)
1291                 ring->cleanup(ring);
1292
1293         cleanup_status_page(ring);
1294 }
1295
1296 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1297 {
1298         int ret;
1299
1300         ret = i915_wait_seqno(ring, seqno);
1301         if (!ret)
1302                 i915_gem_retire_requests_ring(ring);
1303
1304         return ret;
1305 }
1306
1307 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1308 {
1309         struct drm_i915_gem_request *request;
1310         u32 seqno = 0;
1311         int ret;
1312
1313         i915_gem_retire_requests_ring(ring);
1314
1315         if (ring->last_retired_head != -1) {
1316                 ring->head = ring->last_retired_head;
1317                 ring->last_retired_head = -1;
1318                 ring->space = ring_space(ring);
1319                 if (ring->space >= n)
1320                         return 0;
1321         }
1322
1323         list_for_each_entry(request, &ring->request_list, list) {
1324                 int space;
1325
1326                 if (request->tail == -1)
1327                         continue;
1328
1329                 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1330                 if (space < 0)
1331                         space += ring->size;
1332                 if (space >= n) {
1333                         seqno = request->seqno;
1334                         break;
1335                 }
1336
1337                 /* Consume this request in case we need more space than
1338                  * is available and so need to prevent a race between
1339                  * updating last_retired_head and direct reads of
1340                  * I915_RING_HEAD. It also provides a nice sanity check.
1341                  */
1342                 request->tail = -1;
1343         }
1344
1345         if (seqno == 0)
1346                 return -ENOSPC;
1347
1348         ret = intel_ring_wait_seqno(ring, seqno);
1349         if (ret)
1350                 return ret;
1351
1352         if (WARN_ON(ring->last_retired_head == -1))
1353                 return -ENOSPC;
1354
1355         ring->head = ring->last_retired_head;
1356         ring->last_retired_head = -1;
1357         ring->space = ring_space(ring);
1358         if (WARN_ON(ring->space < n))
1359                 return -ENOSPC;
1360
1361         return 0;
1362 }
1363
1364 static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1365 {
1366         struct drm_device *dev = ring->dev;
1367         struct drm_i915_private *dev_priv = dev->dev_private;
1368         unsigned long end;
1369         int ret;
1370
1371         ret = intel_ring_wait_request(ring, n);
1372         if (ret != -ENOSPC)
1373                 return ret;
1374
1375         trace_i915_ring_wait_begin(ring);
1376         /* With GEM the hangcheck timer should kick us out of the loop,
1377          * leaving it early runs the risk of corrupting GEM state (due
1378          * to running on almost untested codepaths). But on resume
1379          * timers don't work yet, so prevent a complete hang in that
1380          * case by choosing an insanely large timeout. */
1381         end = jiffies + 60 * HZ;
1382
1383         do {
1384                 ring->head = I915_READ_HEAD(ring);
1385                 ring->space = ring_space(ring);
1386                 if (ring->space >= n) {
1387                         trace_i915_ring_wait_end(ring);
1388                         return 0;
1389                 }
1390
1391                 if (dev->primary->master) {
1392                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1393                         if (master_priv->sarea_priv)
1394                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1395                 }
1396
1397                 msleep(1);
1398
1399                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1400                                            dev_priv->mm.interruptible);
1401                 if (ret)
1402                         return ret;
1403         } while (!time_after(jiffies, end));
1404         trace_i915_ring_wait_end(ring);
1405         return -EBUSY;
1406 }
1407
1408 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1409 {
1410         uint32_t __iomem *virt;
1411         int rem = ring->size - ring->tail;
1412
1413         if (ring->space < rem) {
1414                 int ret = ring_wait_for_space(ring, rem);
1415                 if (ret)
1416                         return ret;
1417         }
1418
1419         virt = ring->virtual_start + ring->tail;
1420         rem /= 4;
1421         while (rem--)
1422                 iowrite32(MI_NOOP, virt++);
1423
1424         ring->tail = 0;
1425         ring->space = ring_space(ring);
1426
1427         return 0;
1428 }
1429
1430 int intel_ring_idle(struct intel_ring_buffer *ring)
1431 {
1432         u32 seqno;
1433         int ret;
1434
1435         /* We need to add any requests required to flush the objects and ring */
1436         if (ring->outstanding_lazy_request) {
1437                 ret = i915_add_request(ring, NULL, NULL);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         /* Wait upon the last request to be completed */
1443         if (list_empty(&ring->request_list))
1444                 return 0;
1445
1446         seqno = list_entry(ring->request_list.prev,
1447                            struct drm_i915_gem_request,
1448                            list)->seqno;
1449
1450         return i915_wait_seqno(ring, seqno);
1451 }
1452
1453 static int
1454 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1455 {
1456         if (ring->outstanding_lazy_request)
1457                 return 0;
1458
1459         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1460 }
1461
1462 static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1463                                 int bytes)
1464 {
1465         int ret;
1466
1467         if (unlikely(ring->tail + bytes > ring->effective_size)) {
1468                 ret = intel_wrap_ring_buffer(ring);
1469                 if (unlikely(ret))
1470                         return ret;
1471         }
1472
1473         if (unlikely(ring->space < bytes)) {
1474                 ret = ring_wait_for_space(ring, bytes);
1475                 if (unlikely(ret))
1476                         return ret;
1477         }
1478
1479         return 0;
1480 }
1481
1482 int intel_ring_begin(struct intel_ring_buffer *ring,
1483                      int num_dwords)
1484 {
1485         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1486         int ret;
1487
1488         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1489                                    dev_priv->mm.interruptible);
1490         if (ret)
1491                 return ret;
1492
1493         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1494         if (ret)
1495                 return ret;
1496
1497         /* Preallocate the olr before touching the ring */
1498         ret = intel_ring_alloc_seqno(ring);
1499         if (ret)
1500                 return ret;
1501
1502         ring->space -= num_dwords * sizeof(uint32_t);
1503         return 0;
1504 }
1505
1506 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1507 {
1508         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1509
1510         BUG_ON(ring->outstanding_lazy_request);
1511
1512         if (INTEL_INFO(ring->dev)->gen >= 6) {
1513                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1514                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1515         }
1516
1517         ring->set_seqno(ring, seqno);
1518 }
1519
1520 void intel_ring_advance(struct intel_ring_buffer *ring)
1521 {
1522         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1523
1524         ring->tail &= ring->size - 1;
1525         if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
1526                 return;
1527         ring->write_tail(ring, ring->tail);
1528 }
1529
1530
1531 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1532                                      u32 value)
1533 {
1534         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1535
1536        /* Every tail move must follow the sequence below */
1537
1538         /* Disable notification that the ring is IDLE. The GT
1539          * will then assume that it is busy and bring it out of rc6.
1540          */
1541         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1542                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1543
1544         /* Clear the context id. Here be magic! */
1545         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1546
1547         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1548         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1549                       GEN6_BSD_SLEEP_INDICATOR) == 0,
1550                      50))
1551                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1552
1553         /* Now that the ring is fully powered up, update the tail */
1554         I915_WRITE_TAIL(ring, value);
1555         POSTING_READ(RING_TAIL(ring->mmio_base));
1556
1557         /* Let the ring send IDLE messages to the GT again,
1558          * and so let it sleep to conserve power when idle.
1559          */
1560         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1561                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1562 }
1563
1564 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1565                            u32 invalidate, u32 flush)
1566 {
1567         uint32_t cmd;
1568         int ret;
1569
1570         ret = intel_ring_begin(ring, 4);
1571         if (ret)
1572                 return ret;
1573
1574         cmd = MI_FLUSH_DW;
1575         /*
1576          * Bspec vol 1c.5 - video engine command streamer:
1577          * "If ENABLED, all TLBs will be invalidated once the flush
1578          * operation is complete. This bit is only valid when the
1579          * Post-Sync Operation field is a value of 1h or 3h."
1580          */
1581         if (invalidate & I915_GEM_GPU_DOMAINS)
1582                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1583                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1584         intel_ring_emit(ring, cmd);
1585         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1586         intel_ring_emit(ring, 0);
1587         intel_ring_emit(ring, MI_NOOP);
1588         intel_ring_advance(ring);
1589         return 0;
1590 }
1591
1592 static int
1593 hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1594                               u32 offset, u32 len,
1595                               unsigned flags)
1596 {
1597         int ret;
1598
1599         ret = intel_ring_begin(ring, 2);
1600         if (ret)
1601                 return ret;
1602
1603         intel_ring_emit(ring,
1604                         MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1605                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1606         /* bit0-7 is the length on GEN6+ */
1607         intel_ring_emit(ring, offset);
1608         intel_ring_advance(ring);
1609
1610         return 0;
1611 }
1612
1613 static int
1614 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1615                               u32 offset, u32 len,
1616                               unsigned flags)
1617 {
1618         int ret;
1619
1620         ret = intel_ring_begin(ring, 2);
1621         if (ret)
1622                 return ret;
1623
1624         intel_ring_emit(ring,
1625                         MI_BATCH_BUFFER_START |
1626                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1627         /* bit0-7 is the length on GEN6+ */
1628         intel_ring_emit(ring, offset);
1629         intel_ring_advance(ring);
1630
1631         return 0;
1632 }
1633
1634 /* Blitter support (SandyBridge+) */
1635
1636 static int blt_ring_flush(struct intel_ring_buffer *ring,
1637                           u32 invalidate, u32 flush)
1638 {
1639         uint32_t cmd;
1640         int ret;
1641
1642         ret = intel_ring_begin(ring, 4);
1643         if (ret)
1644                 return ret;
1645
1646         cmd = MI_FLUSH_DW;
1647         /*
1648          * Bspec vol 1c.3 - blitter engine command streamer:
1649          * "If ENABLED, all TLBs will be invalidated once the flush
1650          * operation is complete. This bit is only valid when the
1651          * Post-Sync Operation field is a value of 1h or 3h."
1652          */
1653         if (invalidate & I915_GEM_DOMAIN_RENDER)
1654                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1655                         MI_FLUSH_DW_OP_STOREDW;
1656         intel_ring_emit(ring, cmd);
1657         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1658         intel_ring_emit(ring, 0);
1659         intel_ring_emit(ring, MI_NOOP);
1660         intel_ring_advance(ring);
1661         return 0;
1662 }
1663
1664 int intel_init_render_ring_buffer(struct drm_device *dev)
1665 {
1666         drm_i915_private_t *dev_priv = dev->dev_private;
1667         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1668
1669         ring->name = "render ring";
1670         ring->id = RCS;
1671         ring->mmio_base = RENDER_RING_BASE;
1672
1673         if (INTEL_INFO(dev)->gen >= 6) {
1674                 ring->add_request = gen6_add_request;
1675                 ring->flush = gen7_render_ring_flush;
1676                 if (INTEL_INFO(dev)->gen == 6)
1677                         ring->flush = gen6_render_ring_flush;
1678                 ring->irq_get = gen6_ring_get_irq;
1679                 ring->irq_put = gen6_ring_put_irq;
1680                 ring->irq_enable_mask = GT_USER_INTERRUPT;
1681                 ring->get_seqno = gen6_ring_get_seqno;
1682                 ring->set_seqno = ring_set_seqno;
1683                 ring->sync_to = gen6_ring_sync;
1684                 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1685                 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1686                 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1687                 ring->signal_mbox[0] = GEN6_VRSYNC;
1688                 ring->signal_mbox[1] = GEN6_BRSYNC;
1689         } else if (IS_GEN5(dev)) {
1690                 ring->add_request = pc_render_add_request;
1691                 ring->flush = gen4_render_ring_flush;
1692                 ring->get_seqno = pc_render_get_seqno;
1693                 ring->set_seqno = pc_render_set_seqno;
1694                 ring->irq_get = gen5_ring_get_irq;
1695                 ring->irq_put = gen5_ring_put_irq;
1696                 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1697         } else {
1698                 ring->add_request = i9xx_add_request;
1699                 if (INTEL_INFO(dev)->gen < 4)
1700                         ring->flush = gen2_render_ring_flush;
1701                 else
1702                         ring->flush = gen4_render_ring_flush;
1703                 ring->get_seqno = ring_get_seqno;
1704                 ring->set_seqno = ring_set_seqno;
1705                 if (IS_GEN2(dev)) {
1706                         ring->irq_get = i8xx_ring_get_irq;
1707                         ring->irq_put = i8xx_ring_put_irq;
1708                 } else {
1709                         ring->irq_get = i9xx_ring_get_irq;
1710                         ring->irq_put = i9xx_ring_put_irq;
1711                 }
1712                 ring->irq_enable_mask = I915_USER_INTERRUPT;
1713         }
1714         ring->write_tail = ring_write_tail;
1715         if (IS_HASWELL(dev))
1716                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1717         else if (INTEL_INFO(dev)->gen >= 6)
1718                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1719         else if (INTEL_INFO(dev)->gen >= 4)
1720                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1721         else if (IS_I830(dev) || IS_845G(dev))
1722                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1723         else
1724                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1725         ring->init = init_render_ring;
1726         ring->cleanup = render_ring_cleanup;
1727
1728         /* Workaround batchbuffer to combat CS tlb bug. */
1729         if (HAS_BROKEN_CS_TLB(dev)) {
1730                 struct drm_i915_gem_object *obj;
1731                 int ret;
1732
1733                 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1734                 if (obj == NULL) {
1735                         DRM_ERROR("Failed to allocate batch bo\n");
1736                         return -ENOMEM;
1737                 }
1738
1739                 ret = i915_gem_object_pin(obj, 0, true, false);
1740                 if (ret != 0) {
1741                         drm_gem_object_unreference(&obj->base);
1742                         DRM_ERROR("Failed to ping batch bo\n");
1743                         return ret;
1744                 }
1745
1746                 ring->private = obj;
1747         }
1748
1749         return intel_init_ring_buffer(dev, ring);
1750 }
1751
1752 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1753 {
1754         drm_i915_private_t *dev_priv = dev->dev_private;
1755         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1756         int ret;
1757
1758         ring->name = "render ring";
1759         ring->id = RCS;
1760         ring->mmio_base = RENDER_RING_BASE;
1761
1762         if (INTEL_INFO(dev)->gen >= 6) {
1763                 /* non-kms not supported on gen6+ */
1764                 return -ENODEV;
1765         }
1766
1767         /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1768          * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1769          * the special gen5 functions. */
1770         ring->add_request = i9xx_add_request;
1771         if (INTEL_INFO(dev)->gen < 4)
1772                 ring->flush = gen2_render_ring_flush;
1773         else
1774                 ring->flush = gen4_render_ring_flush;
1775         ring->get_seqno = ring_get_seqno;
1776         ring->set_seqno = ring_set_seqno;
1777         if (IS_GEN2(dev)) {
1778                 ring->irq_get = i8xx_ring_get_irq;
1779                 ring->irq_put = i8xx_ring_put_irq;
1780         } else {
1781                 ring->irq_get = i9xx_ring_get_irq;
1782                 ring->irq_put = i9xx_ring_put_irq;
1783         }
1784         ring->irq_enable_mask = I915_USER_INTERRUPT;
1785         ring->write_tail = ring_write_tail;
1786         if (INTEL_INFO(dev)->gen >= 4)
1787                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1788         else if (IS_I830(dev) || IS_845G(dev))
1789                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1790         else
1791                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1792         ring->init = init_render_ring;
1793         ring->cleanup = render_ring_cleanup;
1794
1795         ring->dev = dev;
1796         INIT_LIST_HEAD(&ring->active_list);
1797         INIT_LIST_HEAD(&ring->request_list);
1798
1799         ring->size = size;
1800         ring->effective_size = ring->size;
1801         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1802                 ring->effective_size -= 128;
1803
1804         ring->virtual_start = ioremap_wc(start, size);
1805         if (ring->virtual_start == NULL) {
1806                 DRM_ERROR("can not ioremap virtual address for"
1807                           " ring buffer\n");
1808                 return -ENOMEM;
1809         }
1810
1811         if (!I915_NEED_GFX_HWS(dev)) {
1812                 ret = init_phys_hws_pga(ring);
1813                 if (ret)
1814                         return ret;
1815         }
1816
1817         return 0;
1818 }
1819
1820 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1821 {
1822         drm_i915_private_t *dev_priv = dev->dev_private;
1823         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1824
1825         ring->name = "bsd ring";
1826         ring->id = VCS;
1827
1828         ring->write_tail = ring_write_tail;
1829         if (IS_GEN6(dev) || IS_GEN7(dev)) {
1830                 ring->mmio_base = GEN6_BSD_RING_BASE;
1831                 /* gen6 bsd needs a special wa for tail updates */
1832                 if (IS_GEN6(dev))
1833                         ring->write_tail = gen6_bsd_ring_write_tail;
1834                 ring->flush = gen6_ring_flush;
1835                 ring->add_request = gen6_add_request;
1836                 ring->get_seqno = gen6_ring_get_seqno;
1837                 ring->set_seqno = ring_set_seqno;
1838                 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1839                 ring->irq_get = gen6_ring_get_irq;
1840                 ring->irq_put = gen6_ring_put_irq;
1841                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1842                 ring->sync_to = gen6_ring_sync;
1843                 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1844                 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1845                 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1846                 ring->signal_mbox[0] = GEN6_RVSYNC;
1847                 ring->signal_mbox[1] = GEN6_BVSYNC;
1848         } else {
1849                 ring->mmio_base = BSD_RING_BASE;
1850                 ring->flush = bsd_ring_flush;
1851                 ring->add_request = i9xx_add_request;
1852                 ring->get_seqno = ring_get_seqno;
1853                 ring->set_seqno = ring_set_seqno;
1854                 if (IS_GEN5(dev)) {
1855                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1856                         ring->irq_get = gen5_ring_get_irq;
1857                         ring->irq_put = gen5_ring_put_irq;
1858                 } else {
1859                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1860                         ring->irq_get = i9xx_ring_get_irq;
1861                         ring->irq_put = i9xx_ring_put_irq;
1862                 }
1863                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1864         }
1865         ring->init = init_ring_common;
1866
1867         return intel_init_ring_buffer(dev, ring);
1868 }
1869
1870 int intel_init_blt_ring_buffer(struct drm_device *dev)
1871 {
1872         drm_i915_private_t *dev_priv = dev->dev_private;
1873         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1874
1875         ring->name = "blitter ring";
1876         ring->id = BCS;
1877
1878         ring->mmio_base = BLT_RING_BASE;
1879         ring->write_tail = ring_write_tail;
1880         ring->flush = blt_ring_flush;
1881         ring->add_request = gen6_add_request;
1882         ring->get_seqno = gen6_ring_get_seqno;
1883         ring->set_seqno = ring_set_seqno;
1884         ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1885         ring->irq_get = gen6_ring_get_irq;
1886         ring->irq_put = gen6_ring_put_irq;
1887         ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1888         ring->sync_to = gen6_ring_sync;
1889         ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1890         ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1891         ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1892         ring->signal_mbox[0] = GEN6_RBSYNC;
1893         ring->signal_mbox[1] = GEN6_VBSYNC;
1894         ring->init = init_ring_common;
1895
1896         return intel_init_ring_buffer(dev, ring);
1897 }
1898
1899 int
1900 intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
1901 {
1902         int ret;
1903
1904         if (!ring->gpu_caches_dirty)
1905                 return 0;
1906
1907         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
1908         if (ret)
1909                 return ret;
1910
1911         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
1912
1913         ring->gpu_caches_dirty = false;
1914         return 0;
1915 }
1916
1917 int
1918 intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
1919 {
1920         uint32_t flush_domains;
1921         int ret;
1922
1923         flush_domains = 0;
1924         if (ring->gpu_caches_dirty)
1925                 flush_domains = I915_GEM_GPU_DOMAINS;
1926
1927         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1928         if (ret)
1929                 return ret;
1930
1931         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1932
1933         ring->gpu_caches_dirty = false;
1934         return 0;
1935 }